// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2010-2021, The Linux Foundation. All rights reserved. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. */ #include #include #include #include #include #include #include #include #include "kgsl_device.h" #include "kgsl_pwrscale.h" #include "kgsl_trace.h" #include "kgsl_trace_power.h" #define KGSL_PWRFLAGS_POWER_ON 0 #define KGSL_PWRFLAGS_CLK_ON 1 #define KGSL_PWRFLAGS_AXI_ON 2 #define KGSL_PWRFLAGS_IRQ_ON 3 #define KGSL_PWRFLAGS_NAP_OFF 5 #define UPDATE_BUSY_VAL 1000000 /* Number of jiffies for a full thermal cycle */ #define TH_HZ (HZ/5) #define KGSL_MAX_BUSLEVELS 20 #define DEFAULT_BUS_P 25 /* Order deeply matters here because reasons. New entries go on the end */ static const char * const clocks[] = { "src_clk", "core_clk", "iface_clk", "mem_clk", "mem_iface_clk", "alt_mem_iface_clk", "rbbmtimer_clk", "gtcu_clk", "gtbu_clk", "gtcu_iface_clk", "alwayson_clk", "isense_clk", "rbcpr_clk", "iref_clk", "gmu_clk", "ahb_clk", "smmu_vote", "apb_pclk", }; static unsigned long ib_votes[KGSL_MAX_BUSLEVELS]; static int last_vote_buslevel; static int max_vote_buslevel; static unsigned long last_ab; static void kgsl_pwrctrl_clk(struct kgsl_device *device, int state, int requested_state); static void kgsl_pwrctrl_axi(struct kgsl_device *device, int state); static int kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state); static void kgsl_pwrctrl_set_state(struct kgsl_device *device, unsigned int state); static void kgsl_pwrctrl_request_state(struct kgsl_device *device, unsigned int state); static int _isense_clk_set_rate(struct kgsl_pwrctrl *pwr, int level); static int kgsl_pwrctrl_clk_set_rate(struct clk *grp_clk, unsigned int freq, const char *name); static void _gpu_clk_prepare_enable(struct kgsl_device *device, struct clk *clk, const char *name); static void _bimc_clk_prepare_enable(struct kgsl_device *device, struct clk *clk, const char *name); /** * _record_pwrevent() - Record the history of the new event * @device: Pointer to the kgsl_device struct * @t: Timestamp * @event: Event type * * Finish recording the duration of the previous event. Then update the * index, record the start of the new event, and the relevant data. */ static void _record_pwrevent(struct kgsl_device *device, ktime_t t, int event) { struct kgsl_pwrscale *psc = &device->pwrscale; struct kgsl_pwr_history *history = &psc->history[event]; int i = history->index; if (history->events == NULL) return; history->events[i].duration = ktime_us_delta(t, history->events[i].start); i = (i + 1) % history->size; history->index = i; history->events[i].start = t; switch (event) { case KGSL_PWREVENT_STATE: history->events[i].data = device->state; break; case KGSL_PWREVENT_GPU_FREQ: history->events[i].data = device->pwrctrl.active_pwrlevel; break; case KGSL_PWREVENT_BUS_FREQ: history->events[i].data = last_vote_buslevel; break; default: break; } } #if IS_ENABLED(CONFIG_DEVFREQ_GOV_QCOM_GPUBW_MON) #include /** * kgsl_get_bw() - Return latest msm bus IB vote */ static void kgsl_get_bw(unsigned long *ib, unsigned long *ab, void *data) { struct kgsl_device *device = (struct kgsl_device *)data; if (gmu_core_scales_bandwidth(device)) *ib = 0; else *ib = ib_votes[last_vote_buslevel]; *ab = last_ab; } #endif /** * _ab_buslevel_update() - Return latest msm bus AB vote * @pwr: Pointer to the kgsl_pwrctrl struct * @ab: Pointer to be updated with the calculated AB vote */ static void _ab_buslevel_update(struct kgsl_pwrctrl *pwr, unsigned long *ab) { unsigned long ib = ib_votes[last_vote_buslevel]; unsigned long max_bw = ib_votes[max_vote_buslevel]; if (!ab) return; if (ib == 0) *ab = 0; else if ((!pwr->bus_percent_ab) && (!pwr->bus_ab_mbytes)) *ab = DEFAULT_BUS_P * ib / 100; else if (pwr->bus_width) *ab = pwr->bus_ab_mbytes; else *ab = (pwr->bus_percent_ab * max_bw) / 100; } /** * _adjust_pwrlevel() - Given a requested power level do bounds checking on the * constraints and return the nearest possible level * @device: Pointer to the kgsl_device struct * @level: Requested level * @pwrc: Pointer to the power constraint to be applied * * Apply thermal and max/min limits first. Then force the level with a * constraint if one exists. */ static unsigned int _adjust_pwrlevel(struct kgsl_pwrctrl *pwr, int level, struct kgsl_pwr_constraint *pwrc, int popp) { unsigned int max_pwrlevel = max_t(unsigned int, pwr->thermal_pwrlevel, pwr->max_pwrlevel); unsigned int min_pwrlevel = min_t(unsigned int, pwr->thermal_pwrlevel_floor, pwr->min_pwrlevel); /* Ensure that max/min pwrlevels are within thermal max/min limits */ max_pwrlevel = min_t(unsigned int, max_pwrlevel, pwr->thermal_pwrlevel_floor); min_pwrlevel = max_t(unsigned int, min_pwrlevel, pwr->thermal_pwrlevel); switch (pwrc->type) { case KGSL_CONSTRAINT_PWRLEVEL: { switch (pwrc->sub_type) { case KGSL_CONSTRAINT_PWR_MAX: return max_pwrlevel; case KGSL_CONSTRAINT_PWR_MIN: return min_pwrlevel; default: break; } } break; } if (popp && (max_pwrlevel < pwr->active_pwrlevel)) max_pwrlevel = pwr->active_pwrlevel; if (level < max_pwrlevel) return max_pwrlevel; if (level > min_pwrlevel) return min_pwrlevel; return level; } #if IS_ENABLED(CONFIG_DEVFREQ_GOV_QCOM_GPUBW_MON) static void kgsl_pwrctrl_vbif_update(void) { /* ask a governor to vote on behalf of us */ devfreq_vbif_update_bw(); } #else static void kgsl_pwrctrl_vbif_update(void) { } #endif /** * kgsl_bus_scale_request() - set GPU BW vote * @device: Pointer to the kgsl_device struct * @buslevel: index of bw vector[] table */ static int kgsl_bus_scale_request(struct kgsl_device *device, unsigned int buslevel) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; int ret = 0; /* GMU scales BW */ if (gmu_core_scales_bandwidth(device)) ret = gmu_core_dcvs_set(device, INVALID_DCVS_IDX, buslevel); else if (pwr->pcl) /* Linux bus driver scales BW */ ret = msm_bus_scale_client_update_request(pwr->pcl, buslevel); if (ret) dev_err(device->dev, "GPU BW scaling failure: %d\n", ret); return ret; } /** * kgsl_clk_set_rate() - set GPU clock rate * @device: Pointer to the kgsl_device struct * @pwrlevel: power level in pwrlevels[] table */ int kgsl_clk_set_rate(struct kgsl_device *device, unsigned int pwrlevel) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; struct kgsl_pwrlevel *pl = &pwr->pwrlevels[pwrlevel]; int ret = 0; /* GMU scales GPU freq */ if (gmu_core_gpmu_isenabled(device)) ret = gmu_core_dcvs_set(device, pwrlevel, INVALID_DCVS_IDX); else /* Linux clock driver scales GPU freq */ ret = kgsl_pwrctrl_clk_set_rate(pwr->grp_clks[0], pl->gpu_freq, clocks[0]); if (ret) dev_err(device->dev, "GPU clk freq set failure: %d\n", ret); return ret; } /** * kgsl_pwrctrl_buslevel_update() - Recalculate the bus vote and send it * @device: Pointer to the kgsl_device struct * @on: true for setting and active bus vote, false to turn off the vote */ void kgsl_pwrctrl_buslevel_update(struct kgsl_device *device, bool on) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; int cur = pwr->pwrlevels[pwr->active_pwrlevel].bus_freq; int buslevel = 0; unsigned long ab; /* the bus should be ON to update the active frequency */ if (on && !(test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags))) return; /* * If the bus should remain on calculate our request and submit it, * otherwise request bus level 0, off. */ if (on) { buslevel = min_t(int, pwr->pwrlevels[0].bus_max, cur + pwr->bus_mod); buslevel = max_t(int, buslevel, 1); } else { /* If the bus is being turned off, reset to default level */ pwr->bus_mod = 0; pwr->bus_percent_ab = 0; pwr->bus_ab_mbytes = 0; } trace_kgsl_buslevel(device, pwr->active_pwrlevel, buslevel); last_vote_buslevel = buslevel; /* buslevel is the IB vote, update the AB */ _ab_buslevel_update(pwr, &ab); last_ab = ab; kgsl_bus_scale_request(device, buslevel); kgsl_pwrctrl_vbif_update(); } EXPORT_SYMBOL(kgsl_pwrctrl_buslevel_update); #if IS_ENABLED(CONFIG_QCOM_CX_IPEAK) static int kgsl_pwr_cx_ipeak_freq_limit(void *ptr, unsigned int freq) { struct kgsl_pwr_limit *cx_ipeak_pwr_limit = ptr; if (IS_ERR_OR_NULL(cx_ipeak_pwr_limit)) return -EINVAL; /* CX-ipeak safe interrupt to remove freq limit */ if (freq == 0) { kgsl_pwr_limits_set_default(cx_ipeak_pwr_limit); return 0; } return kgsl_pwr_limits_set_freq(cx_ipeak_pwr_limit, freq); } static int kgsl_pwrctrl_cx_ipeak_vote(struct kgsl_device *device, u64 old_freq, u64 new_freq) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; int i, ret; for (i = 0; i < ARRAY_SIZE(pwr->gpu_ipeak_client); i++) { struct gpu_cx_ipeak_client *ipeak_client = &pwr->gpu_ipeak_client[i]; /* * Set CX Ipeak vote for GPU if it tries to cross * threshold frequency. */ if (old_freq < ipeak_client->freq && new_freq >= ipeak_client->freq) { ret = cx_ipeak_update(ipeak_client->client, true); /* * Hardware damage is possible at peak current * if mitigation not done to limit peak power. */ if (ret) { dev_err(device->dev, "ipeak voting failed for client%d: %d\n", i, ret); return ret; } } } return 0; } static void kgsl_pwrctrl_cx_ipeak_unvote(struct kgsl_device *device, u64 old_freq, u64 new_freq) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; int i, ret; for (i = 0; i < ARRAY_SIZE(pwr->gpu_ipeak_client); i++) { struct gpu_cx_ipeak_client *ipeak_client = &pwr->gpu_ipeak_client[i]; /* * Reset CX Ipeak vote for GPU if it goes below * threshold frequency. */ if (old_freq >= ipeak_client->freq && new_freq < ipeak_client->freq) { ret = cx_ipeak_update(ipeak_client->client, false); /* Failed to withdraw the voting from ipeak driver */ if (ret) dev_err(device->dev, "Failed to withdraw ipeak vote for client%d: %d\n", i, ret); } } } static int kgsl_pwrctrl_cx_ipeak_init(struct kgsl_device *device) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; struct device_node *node, *child; struct gpu_cx_ipeak_client *cx_ipeak_client; int i = 0, ret; node = of_get_child_by_name(device->pdev->dev.of_node, "qcom,gpu-cx-ipeak"); if (node == NULL) return 0; for_each_child_of_node(node, child) { if (i >= ARRAY_SIZE(pwr->gpu_ipeak_client)) { dev_err(device->dev, "dt: too many CX ipeak clients defined\n", i); ret = -EINVAL; of_node_put(child); goto error; } cx_ipeak_client = &pwr->gpu_ipeak_client[i]; if (!of_property_read_u32(child, "qcom,gpu-cx-ipeak-freq", &cx_ipeak_client->freq)) { cx_ipeak_client->client = cx_ipeak_register(child, "qcom,gpu-cx-ipeak"); if (IS_ERR_OR_NULL(cx_ipeak_client->client)) { ret = IS_ERR(cx_ipeak_client->client) ? PTR_ERR(cx_ipeak_client->client) : -EINVAL; dev_err(device->dev, "Failed to register client%d with CX Ipeak %d\n", i, ret); } } else { ret = -EINVAL; dev_err(device->dev, "Failed to get GPU-CX-Ipeak client%d frequency\n", i); } if (ret) { of_node_put(child); goto error; } ++i; } /* cx_ipeak limits for GPU freq throttling */ pwr->cx_ipeak_pwr_limit = kgsl_pwr_limits_add(KGSL_DEVICE_3D0); if (IS_ERR_OR_NULL(pwr->cx_ipeak_pwr_limit)) { dev_err(device->dev, "Failed to get cx_ipeak power limit\n"); ret = -EINVAL; goto error; } cx_ipeak_client = &pwr->gpu_ipeak_client[0]; if (!IS_ERR_OR_NULL(cx_ipeak_client->client)) { ret = cx_ipeak_victim_register(cx_ipeak_client->client, kgsl_pwr_cx_ipeak_freq_limit, pwr->cx_ipeak_pwr_limit); if (ret) { kgsl_pwr_limits_del(pwr->cx_ipeak_pwr_limit); if (ret != -ENOENT) { dev_err(device->dev, "Failed to register GPU-CX-Ipeak victim\n"); goto error; } } } of_node_put(node); return 0; error: for (i = 0; i < ARRAY_SIZE(pwr->gpu_ipeak_client); i++) { if (!IS_ERR_OR_NULL(pwr->gpu_ipeak_client[i].client)) { cx_ipeak_unregister(pwr->gpu_ipeak_client[i].client); pwr->gpu_ipeak_client[i].client = NULL; } } of_node_put(node); return ret; } #else static int kgsl_pwrctrl_cx_ipeak_vote(struct kgsl_device *device, u64 old_freq, u64 new_freq) { return 0; } static void kgsl_pwrctrl_cx_ipeak_unvote(struct kgsl_device *device, u64 old_freq, u64 new_freq) { } static int kgsl_pwrctrl_cx_ipeak_init(struct kgsl_device *device) { return 0; } #endif /** * kgsl_pwrctrl_pwrlevel_change_settings() - Program h/w during powerlevel * transitions * @device: Pointer to the kgsl_device struct * @post: flag to check if the call is before/after the clk_rate change * @wake_up: flag to check if device is active or waking up */ static void kgsl_pwrctrl_pwrlevel_change_settings(struct kgsl_device *device, bool post) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; unsigned int old = pwr->previous_pwrlevel; unsigned int new = pwr->active_pwrlevel; if (device->state != KGSL_STATE_ACTIVE) return; if (old == new) return; if (!device->ftbl->pwrlevel_change_settings) return; device->ftbl->pwrlevel_change_settings(device, old, new, post); } /** * kgsl_pwrctrl_set_thermal_cycle() - set the thermal cycle if required * @device: Pointer to the kgsl_device struct * @new_level: the level to transition to */ void kgsl_pwrctrl_set_thermal_cycle(struct kgsl_device *device, unsigned int new_level) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; if ((new_level != pwr->thermal_pwrlevel) || !pwr->sysfs_pwr_limit) return; if (pwr->thermal_pwrlevel == pwr->sysfs_pwr_limit->level) { /* Thermal cycle for sysfs pwr limit, start cycling*/ if (pwr->thermal_cycle == CYCLE_ENABLE) { pwr->thermal_cycle = CYCLE_ACTIVE; mod_timer(&pwr->thermal_timer, jiffies + (TH_HZ - pwr->thermal_timeout)); pwr->thermal_highlow = 1; } } else { /* Non sysfs pwr limit, stop thermal cycle if active*/ if (pwr->thermal_cycle == CYCLE_ACTIVE) { pwr->thermal_cycle = CYCLE_ENABLE; del_timer_sync(&pwr->thermal_timer); } } } /** * kgsl_pwrctrl_adjust_pwrlevel() - Adjust the power level if * required by thermal, max/min, constraints, etc * @device: Pointer to the kgsl_device struct * @new_level: Requested powerlevel, an index into the pwrlevel array */ unsigned int kgsl_pwrctrl_adjust_pwrlevel(struct kgsl_device *device, unsigned int new_level) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; unsigned int old_level = pwr->active_pwrlevel; /* If a pwr constraint is expired, remove it */ if ((pwr->constraint.type != KGSL_CONSTRAINT_NONE) && (time_after(jiffies, pwr->constraint.expires))) { /* Trace the constraint being un-set by the driver */ trace_kgsl_constraint(device, pwr->constraint.type, old_level, 0); /*Invalidate the constraint set */ pwr->constraint.expires = 0; pwr->constraint.type = KGSL_CONSTRAINT_NONE; } /* * Adjust the power level if required by thermal, max/min, * constraints, etc */ return _adjust_pwrlevel(pwr, new_level, &pwr->constraint, device->pwrscale.popp_level); } /** * kgsl_pwrctrl_pwrlevel_change() - Validate and change power levels * @device: Pointer to the kgsl_device struct * @new_level: Requested powerlevel, an index into the pwrlevel array * * Check that any power level constraints are still valid. Update the * requested level according to any thermal, max/min, or power constraints. * If a new GPU level is going to be set, update the bus to that level's * default value. Do not change the bus if a constraint keeps the new * level at the current level. Set the new GPU frequency. */ void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device, unsigned int new_level) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; struct kgsl_pwrlevel *pwrlevel; unsigned int old_level = pwr->active_pwrlevel; new_level = kgsl_pwrctrl_adjust_pwrlevel(device, new_level); /* * If thermal cycling is required and the new level hits the * thermal limit, kick off the cycling. */ kgsl_pwrctrl_set_thermal_cycle(device, new_level); if (new_level == old_level && !test_bit(GMU_DCVS_REPLAY, &device->gmu_core.flags)) return; /* * If new freq is equal or above CX Ipeak threshold set the vote * first before switching to new freq to allow CX Ipeak driver * to trigger required mitigation, if necessary for safe switch * to new GPU freq. */ if (kgsl_pwrctrl_cx_ipeak_vote(device, pwr->pwrlevels[old_level].gpu_freq, pwr->pwrlevels[new_level].gpu_freq)) return; kgsl_pwrscale_update_stats(device); /* * Set the active and previous powerlevel first in case the clocks are * off - if we don't do this then the pwrlevel change won't take effect * when the clocks come back */ pwr->active_pwrlevel = new_level; pwr->previous_pwrlevel = old_level; /* * If the bus is running faster than its default level and the GPU * frequency is moving down keep the DDR at a relatively high level. */ if (pwr->bus_mod < 0 || new_level < old_level) { pwr->bus_mod = 0; pwr->bus_percent_ab = 0; } /* * Update the bus before the GPU clock to prevent underrun during * frequency increases. */ kgsl_pwrctrl_buslevel_update(device, true); pwrlevel = &pwr->pwrlevels[pwr->active_pwrlevel]; /* Change register settings if any BEFORE pwrlevel change*/ kgsl_pwrctrl_pwrlevel_change_settings(device, 0); kgsl_clk_set_rate(device, pwr->active_pwrlevel); _isense_clk_set_rate(pwr, pwr->active_pwrlevel); trace_kgsl_pwrlevel(device, pwr->active_pwrlevel, pwrlevel->gpu_freq, pwr->previous_pwrlevel, pwr->pwrlevels[old_level].gpu_freq); trace_gpu_frequency(pwrlevel->gpu_freq/1000, 0); /* * Some targets do not support the bandwidth requirement of * GPU at TURBO, for such targets we need to set GPU-BIMC * interface clocks to TURBO directly whenever GPU runs at * TURBO. The TURBO frequency of gfx-bimc need to be defined * in target device tree. */ if (pwr->gpu_bimc_int_clk) { if (pwr->active_pwrlevel == 0 && !pwr->gpu_bimc_interface_enabled) { _bimc_clk_prepare_enable(device, pwr->gpu_bimc_int_clk, "bimc_gpu_clk"); kgsl_pwrctrl_clk_set_rate(pwr->gpu_bimc_int_clk, pwr->gpu_bimc_int_clk_freq, "bimc_gpu_clk"); pwr->gpu_bimc_interface_enabled = true; } else if (pwr->previous_pwrlevel == 0 && pwr->gpu_bimc_interface_enabled) { clk_disable_unprepare(pwr->gpu_bimc_int_clk); pwr->gpu_bimc_interface_enabled = false; } } /* Change register settings if any AFTER pwrlevel change*/ kgsl_pwrctrl_pwrlevel_change_settings(device, 1); /* Timestamp the frequency change */ device->pwrscale.freq_change_time = ktime_to_ms(ktime_get()); /* * If new freq is below CX Ipeak threshold remove the GPU vote * here after switching to new freq. Its done after switching * to ensure that we are below CX Ipeak threshold before * removing the GPU vote. */ kgsl_pwrctrl_cx_ipeak_unvote(device, pwr->pwrlevels[old_level].gpu_freq, pwr->pwrlevels[new_level].gpu_freq); } EXPORT_SYMBOL(kgsl_pwrctrl_pwrlevel_change); /** * kgsl_pwrctrl_set_constraint() - Validate and change enforced constraint * @device: Pointer to the kgsl_device struct * @pwrc: Pointer to requested constraint * @id: Context id which owns the constraint * * Accept the new constraint if no previous constraint existed or if the * new constraint is faster than the previous one. If the new and previous * constraints are equal, update the timestamp and ownership to make sure * the constraint expires at the correct time. */ void kgsl_pwrctrl_set_constraint(struct kgsl_device *device, struct kgsl_pwr_constraint *pwrc, uint32_t id) { unsigned int constraint; struct kgsl_pwr_constraint *pwrc_old; if (device == NULL || pwrc == NULL) return; constraint = _adjust_pwrlevel(&device->pwrctrl, device->pwrctrl.active_pwrlevel, pwrc, 0); pwrc_old = &device->pwrctrl.constraint; /* * If a constraint is already set, set a new constraint only * if it is faster. If the requested constraint is the same * as the current one, update ownership and timestamp. */ if ((pwrc_old->type == KGSL_CONSTRAINT_NONE) || (constraint < pwrc_old->hint.pwrlevel.level)) { pwrc_old->type = pwrc->type; pwrc_old->sub_type = pwrc->sub_type; pwrc_old->hint.pwrlevel.level = constraint; pwrc_old->owner_id = id; pwrc_old->expires = jiffies + device->pwrctrl.interval_timeout; kgsl_pwrctrl_pwrlevel_change(device, constraint); /* Trace the constraint being set by the driver */ trace_kgsl_constraint(device, pwrc_old->type, constraint, 1); } else if ((pwrc_old->type == pwrc->type) && (pwrc_old->hint.pwrlevel.level == constraint)) { pwrc_old->owner_id = id; pwrc_old->expires = jiffies + device->pwrctrl.interval_timeout; } } EXPORT_SYMBOL(kgsl_pwrctrl_set_constraint); /** * kgsl_pwrctrl_update_l2pc() - Update existing qos request * @device: Pointer to the kgsl_device struct * @timeout_us: the effective duration of qos request in usecs. * * Updates an existing qos request to avoid L2PC on the * CPUs (which are selected through dtsi) on which GPU * thread is running. This would help for performance. */ void kgsl_pwrctrl_update_l2pc(struct kgsl_device *device, unsigned long timeout_us) { int cpu; if (device->pwrctrl.l2pc_cpus_mask == 0) return; cpu = get_cpu(); put_cpu(); if ((1 << cpu) & device->pwrctrl.l2pc_cpus_mask) { pm_qos_update_request_timeout( &device->pwrctrl.l2pc_cpus_qos, device->pwrctrl.pm_qos_cpu_mask_latency, timeout_us); } } EXPORT_SYMBOL(kgsl_pwrctrl_update_l2pc); static ssize_t thermal_pwrlevel_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_pwrctrl *pwr = &device->pwrctrl; int ret; unsigned int level = 0; ret = kgsl_sysfs_store(buf, &level); if (ret) return ret; if (level > pwr->num_pwrlevels - 2) level = pwr->num_pwrlevels - 2; if (kgsl_pwr_limits_set_freq(pwr->sysfs_pwr_limit, pwr->pwrlevels[level].gpu_freq)) { dev_err(device->dev, "Failed to set sysfs thermal limit via limits fw\n"); mutex_lock(&device->mutex); pwr->thermal_pwrlevel = level; /* Update the current level using the new limit */ kgsl_pwrctrl_pwrlevel_change(device, pwr->active_pwrlevel); mutex_unlock(&device->mutex); } return count; } static ssize_t thermal_pwrlevel_show(struct device *dev, struct device_attribute *attr, char *buf) { struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_pwrctrl *pwr = &device->pwrctrl; return scnprintf(buf, PAGE_SIZE, "%d\n", pwr->thermal_pwrlevel); } static ssize_t max_pwrlevel_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_pwrctrl *pwr = &device->pwrctrl; int ret; unsigned int level = 0; ret = kgsl_sysfs_store(buf, &level); if (ret) return ret; mutex_lock(&device->mutex); /* You can't set a maximum power level lower than the minimum */ if (level > pwr->min_pwrlevel) level = pwr->min_pwrlevel; pwr->max_pwrlevel = level; /* Update the current level using the new limit */ kgsl_pwrctrl_pwrlevel_change(device, pwr->active_pwrlevel); mutex_unlock(&device->mutex); return count; } static ssize_t max_pwrlevel_show(struct device *dev, struct device_attribute *attr, char *buf) { struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_pwrctrl *pwr = &device->pwrctrl; return scnprintf(buf, PAGE_SIZE, "%u\n", pwr->max_pwrlevel); } static void kgsl_pwrctrl_min_pwrlevel_set(struct kgsl_device *device, int level) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; mutex_lock(&device->mutex); if (level > pwr->num_pwrlevels - 2) level = pwr->num_pwrlevels - 2; /* You can't set a minimum power level lower than the maximum */ if (level < pwr->max_pwrlevel) level = pwr->max_pwrlevel; pwr->min_pwrlevel = level; /* Update the current level using the new limit */ kgsl_pwrctrl_pwrlevel_change(device, pwr->active_pwrlevel); mutex_unlock(&device->mutex); } static ssize_t min_pwrlevel_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct kgsl_device *device = dev_get_drvdata(dev); int ret; unsigned int level = 0; ret = kgsl_sysfs_store(buf, &level); if (ret) return ret; kgsl_pwrctrl_min_pwrlevel_set(device, level); return count; } static ssize_t min_pwrlevel_show(struct device *dev, struct device_attribute *attr, char *buf) { struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_pwrctrl *pwr = &device->pwrctrl; return scnprintf(buf, PAGE_SIZE, "%u\n", pwr->min_pwrlevel); } static ssize_t num_pwrlevels_show(struct device *dev, struct device_attribute *attr, char *buf) { struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_pwrctrl *pwr = &device->pwrctrl; return scnprintf(buf, PAGE_SIZE, "%d\n", pwr->num_pwrlevels - 1); } /* Given a GPU clock value, return the lowest matching powerlevel */ static int _get_nearest_pwrlevel(struct kgsl_pwrctrl *pwr, unsigned int clock) { int i; for (i = pwr->num_pwrlevels - 2; i >= 0; i--) { if (abs(pwr->pwrlevels[i].gpu_freq - clock) < 5000000) return i; } return -ERANGE; } static void kgsl_pwrctrl_max_clock_set(struct kgsl_device *device, int val) { struct kgsl_pwrctrl *pwr; int level; pwr = &device->pwrctrl; mutex_lock(&device->mutex); level = _get_nearest_pwrlevel(pwr, val); /* If the requested power level is not supported by hw, try cycling */ if (level < 0) { unsigned int hfreq, diff, udiff, i; if ((val < pwr->pwrlevels[pwr->num_pwrlevels - 1].gpu_freq) || (val > pwr->pwrlevels[0].gpu_freq)) goto err; /* Find the neighboring frequencies */ for (i = 0; i < pwr->num_pwrlevels - 1; i++) { if ((pwr->pwrlevels[i].gpu_freq > val) && (pwr->pwrlevels[i + 1].gpu_freq < val)) { level = i; break; } } if (i == pwr->num_pwrlevels - 1) goto err; hfreq = pwr->pwrlevels[i].gpu_freq; diff = hfreq - pwr->pwrlevels[i + 1].gpu_freq; udiff = hfreq - val; pwr->thermal_timeout = (udiff * TH_HZ) / diff; pwr->thermal_cycle = CYCLE_ENABLE; } else { pwr->thermal_cycle = CYCLE_DISABLE; del_timer_sync(&pwr->thermal_timer); } mutex_unlock(&device->mutex); if (pwr->sysfs_pwr_limit) kgsl_pwr_limits_set_freq(pwr->sysfs_pwr_limit, pwr->pwrlevels[level].gpu_freq); return; err: mutex_unlock(&device->mutex); } static ssize_t max_gpuclk_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct kgsl_device *device = dev_get_drvdata(dev); unsigned int val = 0; int ret; ret = kgsl_sysfs_store(buf, &val); if (ret) return ret; kgsl_pwrctrl_max_clock_set(device, val); return count; } static unsigned int kgsl_pwrctrl_max_clock_get(struct kgsl_device *device) { struct kgsl_pwrctrl *pwr; unsigned int freq; if (device == NULL) return 0; pwr = &device->pwrctrl; freq = pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq; /* Calculate the effective frequency if we're cycling */ if (pwr->thermal_cycle) { unsigned int hfreq = freq; unsigned int lfreq = pwr->pwrlevels[pwr->thermal_pwrlevel + 1].gpu_freq; freq = pwr->thermal_timeout * (lfreq / TH_HZ) + (TH_HZ - pwr->thermal_timeout) * (hfreq / TH_HZ); } return freq; } static ssize_t max_gpuclk_show(struct device *dev, struct device_attribute *attr, char *buf) { struct kgsl_device *device = dev_get_drvdata(dev); return scnprintf(buf, PAGE_SIZE, "%d\n", kgsl_pwrctrl_max_clock_get(device)); } static ssize_t gpuclk_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_pwrctrl *pwr = &device->pwrctrl; unsigned int val = 0; int ret, level; ret = kgsl_sysfs_store(buf, &val); if (ret) return ret; mutex_lock(&device->mutex); level = _get_nearest_pwrlevel(pwr, val); if (level >= 0) kgsl_pwrctrl_pwrlevel_change(device, (unsigned int) level); mutex_unlock(&device->mutex); return count; } static ssize_t gpuclk_show(struct device *dev, struct device_attribute *attr, char *buf) { struct kgsl_device *device = dev_get_drvdata(dev); return scnprintf(buf, PAGE_SIZE, "%ld\n", kgsl_pwrctrl_active_freq(&device->pwrctrl)); } static ssize_t __timer_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count, enum kgsl_pwrctrl_timer_type timer) { unsigned int val = 0; struct kgsl_device *device = dev_get_drvdata(dev); int ret; ret = kgsl_sysfs_store(buf, &val); if (ret) return ret; /* * We don't quite accept a maximum of 0xFFFFFFFF due to internal jiffy * math, so make sure the value falls within the largest offset we can * deal with */ if (val > jiffies_to_usecs(MAX_JIFFY_OFFSET)) return -EINVAL; mutex_lock(&device->mutex); /* Let the timeout be requested in ms, but convert to jiffies. */ if (timer == KGSL_PWR_IDLE_TIMER) device->pwrctrl.interval_timeout = msecs_to_jiffies(val); mutex_unlock(&device->mutex); return count; } static ssize_t idle_timer_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { return __timer_store(dev, attr, buf, count, KGSL_PWR_IDLE_TIMER); } static ssize_t idle_timer_show(struct device *dev, struct device_attribute *attr, char *buf) { struct kgsl_device *device = dev_get_drvdata(dev); /* Show the idle_timeout converted to msec */ return scnprintf(buf, PAGE_SIZE, "%u\n", jiffies_to_msecs(device->pwrctrl.interval_timeout)); } static ssize_t pmqos_active_latency_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned int val = 0; struct kgsl_device *device = dev_get_drvdata(dev); int ret; ret = kgsl_sysfs_store(buf, &val); if (ret) return ret; mutex_lock(&device->mutex); device->pwrctrl.pm_qos_active_latency = val; mutex_unlock(&device->mutex); return count; } static ssize_t pmqos_active_latency_show(struct device *dev, struct device_attribute *attr, char *buf) { struct kgsl_device *device = dev_get_drvdata(dev); return scnprintf(buf, PAGE_SIZE, "%d\n", device->pwrctrl.pm_qos_active_latency); } static ssize_t gpubusy_show(struct device *dev, struct device_attribute *attr, char *buf) { int ret; struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_clk_stats *stats = &device->pwrctrl.clk_stats; ret = scnprintf(buf, PAGE_SIZE, "%7d %7d\n", stats->busy_old, stats->total_old); if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) { stats->busy_old = 0; stats->total_old = 0; } return ret; } static ssize_t gpu_available_frequencies_show(struct device *dev, struct device_attribute *attr, char *buf) { struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_pwrctrl *pwr = &device->pwrctrl; int index, num_chars = 0; for (index = 0; index < pwr->num_pwrlevels - 1; index++) { num_chars += scnprintf(buf + num_chars, PAGE_SIZE - num_chars - 1, "%d ", pwr->pwrlevels[index].gpu_freq); /* One space for trailing null and another for the newline */ if (num_chars >= PAGE_SIZE - 2) break; } buf[num_chars++] = '\n'; return num_chars; } static ssize_t gpu_clock_stats_show(struct device *dev, struct device_attribute *attr, char *buf) { struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_pwrctrl *pwr = &device->pwrctrl; int index, num_chars = 0; mutex_lock(&device->mutex); kgsl_pwrscale_update_stats(device); mutex_unlock(&device->mutex); for (index = 0; index < pwr->num_pwrlevels - 1; index++) num_chars += scnprintf(buf + num_chars, PAGE_SIZE - num_chars, "%llu ", pwr->clock_times[index]); if (num_chars < PAGE_SIZE) buf[num_chars++] = '\n'; return num_chars; } static ssize_t reset_count_show(struct device *dev, struct device_attribute *attr, char *buf) { struct kgsl_device *device = dev_get_drvdata(dev); return scnprintf(buf, PAGE_SIZE, "%d\n", device->reset_counter); } static void __force_on(struct kgsl_device *device, int flag, int on) { if (on) { switch (flag) { case KGSL_PWRFLAGS_CLK_ON: /* make sure pwrrail is ON before enabling clocks */ kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON); kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE); break; case KGSL_PWRFLAGS_AXI_ON: kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON); break; case KGSL_PWRFLAGS_POWER_ON: kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON); break; } set_bit(flag, &device->pwrctrl.ctrl_flags); } else { clear_bit(flag, &device->pwrctrl.ctrl_flags); } } static ssize_t __force_on_show(struct device *dev, struct device_attribute *attr, char *buf, int flag) { struct kgsl_device *device = dev_get_drvdata(dev); return scnprintf(buf, PAGE_SIZE, "%d\n", test_bit(flag, &device->pwrctrl.ctrl_flags)); } static ssize_t __force_on_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count, int flag) { unsigned int val = 0; struct kgsl_device *device = dev_get_drvdata(dev); int ret; ret = kgsl_sysfs_store(buf, &val); if (ret) return ret; mutex_lock(&device->mutex); __force_on(device, flag, val); mutex_unlock(&device->mutex); return count; } static ssize_t force_clk_on_show(struct device *dev, struct device_attribute *attr, char *buf) { return __force_on_show(dev, attr, buf, KGSL_PWRFLAGS_CLK_ON); } static ssize_t force_clk_on_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { return __force_on_store(dev, attr, buf, count, KGSL_PWRFLAGS_CLK_ON); } static ssize_t force_bus_on_show(struct device *dev, struct device_attribute *attr, char *buf) { return __force_on_show(dev, attr, buf, KGSL_PWRFLAGS_AXI_ON); } static ssize_t force_bus_on_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { return __force_on_store(dev, attr, buf, count, KGSL_PWRFLAGS_AXI_ON); } static ssize_t force_rail_on_show(struct device *dev, struct device_attribute *attr, char *buf) { return __force_on_show(dev, attr, buf, KGSL_PWRFLAGS_POWER_ON); } static ssize_t force_rail_on_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { return __force_on_store(dev, attr, buf, count, KGSL_PWRFLAGS_POWER_ON); } static ssize_t force_no_nap_show(struct device *dev, struct device_attribute *attr, char *buf) { return __force_on_show(dev, attr, buf, KGSL_PWRFLAGS_NAP_OFF); } static ssize_t force_no_nap_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { return __force_on_store(dev, attr, buf, count, KGSL_PWRFLAGS_NAP_OFF); } static ssize_t bus_split_show(struct device *dev, struct device_attribute *attr, char *buf) { struct kgsl_device *device = dev_get_drvdata(dev); return scnprintf(buf, PAGE_SIZE, "%d\n", device->pwrctrl.bus_control); } static ssize_t bus_split_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned int val = 0; struct kgsl_device *device = dev_get_drvdata(dev); int ret; ret = kgsl_sysfs_store(buf, &val); if (ret) return ret; mutex_lock(&device->mutex); device->pwrctrl.bus_control = val ? true : false; mutex_unlock(&device->mutex); return count; } static ssize_t default_pwrlevel_show(struct device *dev, struct device_attribute *attr, char *buf) { struct kgsl_device *device = dev_get_drvdata(dev); return scnprintf(buf, PAGE_SIZE, "%d\n", device->pwrctrl.default_pwrlevel); } static ssize_t default_pwrlevel_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_pwrctrl *pwr = &device->pwrctrl; struct kgsl_pwrscale *pwrscale = &device->pwrscale; int ret; unsigned int level = 0; ret = kgsl_sysfs_store(buf, &level); if (ret) return ret; if (level > pwr->num_pwrlevels - 2) goto done; mutex_lock(&device->mutex); pwr->default_pwrlevel = level; pwrscale->gpu_profile.profile.initial_freq = pwr->pwrlevels[level].gpu_freq; mutex_unlock(&device->mutex); done: return count; } static ssize_t popp_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned int val = 0; struct kgsl_device *device = dev_get_drvdata(dev); int ret; ret = kgsl_sysfs_store(buf, &val); if (ret) return ret; mutex_lock(&device->mutex); if (val) set_bit(POPP_ON, &device->pwrscale.popp_state); else clear_bit(POPP_ON, &device->pwrscale.popp_state); mutex_unlock(&device->mutex); return count; } static ssize_t popp_show(struct device *dev, struct device_attribute *attr, char *buf) { struct kgsl_device *device = dev_get_drvdata(dev); return scnprintf(buf, PAGE_SIZE, "%d\n", test_bit(POPP_ON, &device->pwrscale.popp_state)); } static ssize_t gpu_model_show(struct device *dev, struct device_attribute *attr, char *buf) { struct kgsl_device *device = dev_get_drvdata(dev); char model_str[32] = {0}; device->ftbl->gpu_model(device, model_str, sizeof(model_str)); return scnprintf(buf, PAGE_SIZE, "%s\n", model_str); } static ssize_t gpu_busy_percentage_show(struct device *dev, struct device_attribute *attr, char *buf) { int ret; struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_clk_stats *stats = &device->pwrctrl.clk_stats; unsigned int busy_percent = 0; if (stats->total_old != 0) busy_percent = (stats->busy_old * 100) / stats->total_old; ret = scnprintf(buf, PAGE_SIZE, "%d %%\n", busy_percent); /* Reset the stats if GPU is OFF */ if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) { stats->busy_old = 0; stats->total_old = 0; } return ret; } static ssize_t min_clock_mhz_show(struct device *dev, struct device_attribute *attr, char *buf) { struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_pwrctrl *pwr = &device->pwrctrl; return scnprintf(buf, PAGE_SIZE, "%d\n", pwr->pwrlevels[pwr->min_pwrlevel].gpu_freq / 1000000); } static ssize_t min_clock_mhz_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct kgsl_device *device = dev_get_drvdata(dev); int level, ret; unsigned int freq; struct kgsl_pwrctrl *pwr = &device->pwrctrl; ret = kgsl_sysfs_store(buf, &freq); if (ret) return ret; freq *= 1000000; level = _get_nearest_pwrlevel(pwr, freq); if (level >= 0) kgsl_pwrctrl_min_pwrlevel_set(device, level); return count; } static ssize_t max_clock_mhz_show(struct device *dev, struct device_attribute *attr, char *buf) { struct kgsl_device *device = dev_get_drvdata(dev); unsigned int freq = kgsl_pwrctrl_max_clock_get(device); return scnprintf(buf, PAGE_SIZE, "%d\n", freq / 1000000); } static ssize_t max_clock_mhz_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct kgsl_device *device = dev_get_drvdata(dev); unsigned int val = 0; int ret; ret = kgsl_sysfs_store(buf, &val); if (ret) return ret; val *= 1000000; kgsl_pwrctrl_max_clock_set(device, val); return count; } static ssize_t clock_mhz_show(struct device *dev, struct device_attribute *attr, char *buf) { struct kgsl_device *device = dev_get_drvdata(dev); return scnprintf(buf, PAGE_SIZE, "%ld\n", kgsl_pwrctrl_active_freq(&device->pwrctrl) / 1000000); } static ssize_t freq_table_mhz_show(struct device *dev, struct device_attribute *attr, char *buf) { struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_pwrctrl *pwr = &device->pwrctrl; int index, num_chars = 0; for (index = 0; index < pwr->num_pwrlevels - 1; index++) { num_chars += scnprintf(buf + num_chars, PAGE_SIZE - num_chars - 1, "%d ", pwr->pwrlevels[index].gpu_freq / 1000000); /* One space for trailing null and another for the newline */ if (num_chars >= PAGE_SIZE - 2) break; } buf[num_chars++] = '\n'; return num_chars; } static ssize_t temp_show(struct device *dev, struct device_attribute *attr, char *buf) { struct kgsl_device *device = dev_get_drvdata(dev); struct device *_dev; struct thermal_zone_device *thermal_dev; int temperature = INT_MIN, max_temp = INT_MIN; const char *name; struct property *prop; _dev = &device->pdev->dev; of_property_for_each_string(_dev->of_node, "qcom,tzone-names", prop, name) { thermal_dev = thermal_zone_get_zone_by_name(name); if (IS_ERR(thermal_dev)) continue; if (thermal_zone_get_temp(thermal_dev, &temperature)) continue; max_temp = max(temperature, max_temp); } return scnprintf(buf, PAGE_SIZE, "%d\n", max_temp); } static ssize_t pwrscale_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct kgsl_device *device = dev_get_drvdata(dev); int ret; unsigned int enable = 0; ret = kgsl_sysfs_store(buf, &enable); if (ret) return ret; mutex_lock(&device->mutex); if (enable) kgsl_pwrscale_enable(device); else kgsl_pwrscale_disable(device, false); mutex_unlock(&device->mutex); return count; } static ssize_t pwrscale_show(struct device *dev, struct device_attribute *attr, char *buf) { struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_pwrscale *psc = &device->pwrscale; return scnprintf(buf, PAGE_SIZE, "%u\n", psc->enabled); } static DEVICE_ATTR_RO(temp); static DEVICE_ATTR_RW(gpuclk); static DEVICE_ATTR_RW(max_gpuclk); static DEVICE_ATTR_RW(idle_timer); static DEVICE_ATTR_RO(gpubusy); static DEVICE_ATTR_RO(gpu_available_frequencies); static DEVICE_ATTR_RO(gpu_clock_stats); static DEVICE_ATTR_RW(max_pwrlevel); static DEVICE_ATTR_RW(min_pwrlevel); static DEVICE_ATTR_RW(thermal_pwrlevel); static DEVICE_ATTR_RO(num_pwrlevels); static DEVICE_ATTR_RW(pmqos_active_latency); static DEVICE_ATTR_RO(reset_count); static DEVICE_ATTR_RW(force_clk_on); static DEVICE_ATTR_RW(force_bus_on); static DEVICE_ATTR_RW(force_rail_on); static DEVICE_ATTR_RW(bus_split); static DEVICE_ATTR_RW(default_pwrlevel); static DEVICE_ATTR_RW(popp); static DEVICE_ATTR_RW(force_no_nap); static DEVICE_ATTR_RO(gpu_model); static DEVICE_ATTR_RO(gpu_busy_percentage); static DEVICE_ATTR_RW(min_clock_mhz); static DEVICE_ATTR_RW(max_clock_mhz); static DEVICE_ATTR_RO(clock_mhz); static DEVICE_ATTR_RO(freq_table_mhz); static DEVICE_ATTR_RW(pwrscale); static const struct attribute *pwrctrl_attr_list[] = { &dev_attr_gpuclk.attr, &dev_attr_max_gpuclk.attr, &dev_attr_idle_timer.attr, &dev_attr_gpubusy.attr, &dev_attr_gpu_available_frequencies.attr, &dev_attr_gpu_clock_stats.attr, &dev_attr_max_pwrlevel.attr, &dev_attr_min_pwrlevel.attr, &dev_attr_thermal_pwrlevel.attr, &dev_attr_num_pwrlevels.attr, &dev_attr_pmqos_active_latency.attr, &dev_attr_reset_count.attr, &dev_attr_force_clk_on.attr, &dev_attr_force_bus_on.attr, &dev_attr_force_rail_on.attr, &dev_attr_force_no_nap.attr, &dev_attr_bus_split.attr, &dev_attr_default_pwrlevel.attr, &dev_attr_popp.attr, &dev_attr_gpu_model.attr, &dev_attr_gpu_busy_percentage.attr, &dev_attr_min_clock_mhz.attr, &dev_attr_max_clock_mhz.attr, &dev_attr_clock_mhz.attr, &dev_attr_freq_table_mhz.attr, &dev_attr_temp.attr, &dev_attr_pwrscale.attr, NULL, }; struct sysfs_link { const char *src; const char *dst; }; static struct sysfs_link link_names[] = { { "gpu_model", "gpu_model",}, { "gpu_busy_percentage", "gpu_busy",}, { "min_clock_mhz", "gpu_min_clock",}, { "max_clock_mhz", "gpu_max_clock",}, { "clock_mhz", "gpu_clock",}, { "freq_table_mhz", "gpu_freq_table",}, { "temp", "gpu_tmu",}, }; int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device) { int i, ret; ret = sysfs_create_files(&device->dev->kobj, pwrctrl_attr_list); if (ret) return ret; device->gpu_sysfs_kobj = kobject_create_and_add("gpu", kernel_kobj); if (IS_ERR_OR_NULL(device->gpu_sysfs_kobj)) return (device->gpu_sysfs_kobj == NULL) ? -ENOMEM : PTR_ERR(device->gpu_sysfs_kobj); for (i = 0; i < ARRAY_SIZE(link_names); i++) kgsl_gpu_sysfs_add_link(device->gpu_sysfs_kobj, &device->dev->kobj, link_names[i].src, link_names[i].dst); return 0; } void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device) { sysfs_remove_files(&device->dev->kobj, pwrctrl_attr_list); } /* * Track the amount of time the gpu is on vs the total system time. * Regularly update the percentage of busy time displayed by sysfs. */ void kgsl_pwrctrl_busy_time(struct kgsl_device *device, u64 time, u64 busy) { struct kgsl_clk_stats *stats = &device->pwrctrl.clk_stats; stats->total += time; stats->busy += busy; if (stats->total < UPDATE_BUSY_VAL) return; /* Update the output regularly and reset the counters. */ stats->total_old = stats->total; stats->busy_old = stats->busy; stats->total = 0; stats->busy = 0; trace_kgsl_gpubusy(device, stats->busy_old, stats->total_old); } EXPORT_SYMBOL(kgsl_pwrctrl_busy_time); static void kgsl_pwrctrl_clk(struct kgsl_device *device, int state, int requested_state) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; int i = 0; if (gmu_core_gpmu_isenabled(device)) return; if (test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->ctrl_flags)) return; if (state == KGSL_PWRFLAGS_OFF) { if (test_and_clear_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags)) { trace_kgsl_clk(device, state, kgsl_pwrctrl_active_freq(pwr)); /* Disable gpu-bimc-interface clocks */ if (pwr->gpu_bimc_int_clk && pwr->gpu_bimc_interface_enabled) { clk_disable_unprepare(pwr->gpu_bimc_int_clk); pwr->gpu_bimc_interface_enabled = false; } for (i = KGSL_MAX_CLKS - 1; i > 0; i--) clk_disable(pwr->grp_clks[i]); /* High latency clock maintenance. */ if ((pwr->pwrlevels[0].gpu_freq > 0) && (requested_state != KGSL_STATE_NAP)) { for (i = KGSL_MAX_CLKS - 1; i > 0; i--) clk_unprepare(pwr->grp_clks[i]); kgsl_clk_set_rate(device, pwr->num_pwrlevels - 1); _isense_clk_set_rate(pwr, pwr->num_pwrlevels - 1); } /* Turn off the IOMMU clocks */ kgsl_mmu_disable_clk(&device->mmu); } else if (requested_state == KGSL_STATE_SLUMBER) { /* High latency clock maintenance. */ for (i = KGSL_MAX_CLKS - 1; i > 0; i--) clk_unprepare(pwr->grp_clks[i]); if ((pwr->pwrlevels[0].gpu_freq > 0)) { kgsl_clk_set_rate(device, pwr->num_pwrlevels - 1); _isense_clk_set_rate(pwr, pwr->num_pwrlevels - 1); } } } else if (state == KGSL_PWRFLAGS_ON) { if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags)) { trace_kgsl_clk(device, state, kgsl_pwrctrl_active_freq(pwr)); /* High latency clock maintenance. */ if (device->state != KGSL_STATE_NAP) { if (pwr->pwrlevels[0].gpu_freq > 0) { kgsl_clk_set_rate(device, pwr->active_pwrlevel); _isense_clk_set_rate(pwr, pwr->active_pwrlevel); } } for (i = KGSL_MAX_CLKS - 1; i > 0; i--) _gpu_clk_prepare_enable(device, pwr->grp_clks[i], clocks[i]); /* Enable the gpu-bimc-interface clocks */ if (pwr->gpu_bimc_int_clk) { if (pwr->active_pwrlevel == 0 && !pwr->gpu_bimc_interface_enabled) { kgsl_pwrctrl_clk_set_rate( pwr->gpu_bimc_int_clk, pwr->gpu_bimc_int_clk_freq, "bimc_gpu_clk"); _bimc_clk_prepare_enable(device, pwr->gpu_bimc_int_clk, "bimc_gpu_clk"); pwr->gpu_bimc_interface_enabled = true; } } /* Turn on the IOMMU clocks */ kgsl_mmu_enable_clk(&device->mmu); } } } #if IS_ENABLED(CONFIG_DEVFREQ_GOV_QCOM_GPUBW_MON) static void kgsl_pwrctrl_suspend_devbw(struct kgsl_pwrctrl *pwr) { if (pwr->devbw) devfreq_suspend_devbw(pwr->devbw); } static void kgsl_pwrctrl_resume_devbw(struct kgsl_pwrctrl *pwr) { if (pwr->devbw) devfreq_resume_devbw(pwr->devbw); } #else static void kgsl_pwrctrl_suspend_devbw(struct kgsl_pwrctrl *pwr) { } static void kgsl_pwrctrl_resume_devbw(struct kgsl_pwrctrl *pwr) { } #endif static void kgsl_pwrctrl_axi(struct kgsl_device *device, int state) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->ctrl_flags)) return; if (state == KGSL_PWRFLAGS_OFF) { if (test_and_clear_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) { trace_kgsl_bus(device, state); kgsl_pwrctrl_buslevel_update(device, false); kgsl_pwrctrl_suspend_devbw(pwr); } } else if (state == KGSL_PWRFLAGS_ON) { if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) { trace_kgsl_bus(device, state); kgsl_pwrctrl_buslevel_update(device, true); kgsl_pwrctrl_resume_devbw(pwr); } } } static int _regulator_enable(struct kgsl_device *device, struct kgsl_regulator *regulator) { int ret; if (IS_ERR_OR_NULL(regulator->reg)) return 0; ret = regulator_enable(regulator->reg); if (ret) dev_err(device->dev, "Failed to enable regulator '%s': %d\n", regulator->name, ret); return ret; } static void _regulator_disable(struct kgsl_regulator *regulator) { if (!IS_ERR_OR_NULL(regulator->reg)) regulator_disable(regulator->reg); } static int _enable_regulators(struct kgsl_device *device, struct kgsl_pwrctrl *pwr) { int i; for (i = 0; i < KGSL_MAX_REGULATORS; i++) { int ret = _regulator_enable(device, &pwr->regulators[i]); if (ret) { for (i = i - 1; i >= 0; i--) _regulator_disable(&pwr->regulators[i]); return ret; } } return 0; } static int kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; int status = 0; if (gmu_core_gpmu_isenabled(device)) return 0; /* * Disabling the regulator means also disabling dependent clocks. * Hence don't disable it if force clock ON is set. */ if (test_bit(KGSL_PWRFLAGS_POWER_ON, &pwr->ctrl_flags) || test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->ctrl_flags)) return 0; if (state == KGSL_PWRFLAGS_OFF) { if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON, &pwr->power_flags)) { trace_kgsl_rail(device, state); device->ftbl->regulator_disable_poll(device); } } else if (state == KGSL_PWRFLAGS_ON) { if (!test_and_set_bit(KGSL_PWRFLAGS_POWER_ON, &pwr->power_flags)) { status = _enable_regulators(device, pwr); if (status) clear_bit(KGSL_PWRFLAGS_POWER_ON, &pwr->power_flags); else trace_kgsl_rail(device, state); } } return status; } static void kgsl_pwrctrl_irq(struct kgsl_device *device, int state) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; if (state == KGSL_PWRFLAGS_ON) { if (!test_and_set_bit(KGSL_PWRFLAGS_IRQ_ON, &pwr->power_flags)) { trace_kgsl_irq(device, state); enable_irq(pwr->interrupt_num); } } else if (state == KGSL_PWRFLAGS_OFF) { if (test_and_clear_bit(KGSL_PWRFLAGS_IRQ_ON, &pwr->power_flags)) { trace_kgsl_irq(device, state); if (in_interrupt()) disable_irq_nosync(pwr->interrupt_num); else disable_irq(pwr->interrupt_num); } } } /** * kgsl_thermal_cycle() - Work function for thermal timer. * @work: The input work * * This function is called for work that is queued by the thermal * timer. It cycles to the alternate thermal frequency. */ static void kgsl_thermal_cycle(struct work_struct *work) { struct kgsl_pwrctrl *pwr = container_of(work, struct kgsl_pwrctrl, thermal_cycle_ws); struct kgsl_device *device = container_of(pwr, struct kgsl_device, pwrctrl); if (device == NULL) return; mutex_lock(&device->mutex); if (pwr->thermal_cycle == CYCLE_ACTIVE) { if (pwr->thermal_highlow) kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel); else kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel + 1); } mutex_unlock(&device->mutex); } static void kgsl_thermal_timer(struct timer_list *t) { struct kgsl_pwrctrl *pwr = from_timer(pwr, t, thermal_timer); struct kgsl_device *device = container_of(pwr, struct kgsl_device, pwrctrl); /* Keep the timer running consistently despite processing time */ if (device->pwrctrl.thermal_highlow) { mod_timer(&device->pwrctrl.thermal_timer, jiffies + device->pwrctrl.thermal_timeout); device->pwrctrl.thermal_highlow = 0; } else { mod_timer(&device->pwrctrl.thermal_timer, jiffies + (TH_HZ - device->pwrctrl.thermal_timeout)); device->pwrctrl.thermal_highlow = 1; } /* Have work run in a non-interrupt context. */ kgsl_schedule_work(&device->pwrctrl.thermal_cycle_ws); } #if IS_ENABLED(CONFIG_DEVFREQ_GOV_QCOM_GPUBW_MON) static void kgsl_pwrctrl_vbif_init(struct kgsl_device *device) { devfreq_vbif_register_callback(kgsl_get_bw, device); } #else static void kgsl_pwrctrl_vbif_init(struct kgsl_device *device) { } #endif static int _get_regulator(struct kgsl_device *device, struct kgsl_regulator *regulator, const char *str) { regulator->reg = devm_regulator_get(&device->pdev->dev, str); if (IS_ERR(regulator->reg)) { int ret = PTR_ERR(regulator->reg); dev_err(&device->pdev->dev, "Couldn't get regulator: %s (%d)\n", str, ret); return ret; } strlcpy(regulator->name, str, sizeof(regulator->name)); return 0; } static int get_legacy_regulators(struct kgsl_device *device) { struct device *dev = &device->pdev->dev; struct kgsl_pwrctrl *pwr = &device->pwrctrl; int ret; ret = _get_regulator(device, &pwr->regulators[0], "vdd"); /* Use vddcx only on targets that have it. */ if (ret == 0 && of_find_property(dev->of_node, "vddcx-supply", NULL)) ret = _get_regulator(device, &pwr->regulators[1], "vddcx"); return ret; } static int get_regulators(struct kgsl_device *device) { struct device *dev = &device->pdev->dev; struct kgsl_pwrctrl *pwr = &device->pwrctrl; int index = 0; const char *name; struct property *prop; if (!of_find_property(dev->of_node, "regulator-names", NULL)) return get_legacy_regulators(device); of_property_for_each_string(dev->of_node, "regulator-names", prop, name) { int ret; if (index == KGSL_MAX_REGULATORS) { dev_err(dev, "Too many regulators defined\n"); return -ENOMEM; } ret = _get_regulator(device, &pwr->regulators[index], name); if (ret) return ret; index++; } return 0; } static int _get_clocks(struct kgsl_device *device) { struct device *dev = &device->pdev->dev; struct kgsl_pwrctrl *pwr = &device->pwrctrl; const char *name; struct property *prop; pwr->isense_clk_indx = 0; of_property_for_each_string(dev->of_node, "clock-names", prop, name) { int i; for (i = 0; i < KGSL_MAX_CLKS; i++) { if (pwr->grp_clks[i] || strcmp(clocks[i], name)) continue; pwr->grp_clks[i] = devm_clk_get(dev, name); if (IS_ERR(pwr->grp_clks[i])) { int ret = PTR_ERR(pwr->grp_clks[i]); dev_err(dev, "Couldn't get clock: %s (%d)\n", name, ret); pwr->grp_clks[i] = NULL; return ret; } if (!strcmp(name, "isense_clk")) pwr->isense_clk_indx = i; break; } } if (pwr->isense_clk_indx && of_property_read_u32(dev->of_node, "qcom,isense-clk-on-level", &pwr->isense_clk_on_level)) { dev_err(dev, "Couldn't get isense clock on level\n"); return -ENXIO; } return 0; } static int _isense_clk_set_rate(struct kgsl_pwrctrl *pwr, int level) { int rate; if (!pwr->isense_clk_indx) return -EINVAL; rate = clk_round_rate(pwr->grp_clks[pwr->isense_clk_indx], level > pwr->isense_clk_on_level ? KGSL_XO_CLK_FREQ : KGSL_ISENSE_CLK_FREQ); return kgsl_pwrctrl_clk_set_rate(pwr->grp_clks[pwr->isense_clk_indx], rate, clocks[pwr->isense_clk_indx]); } /* * _gpu_clk_prepare_enable - Enable the specified GPU clock * Try once to enable it and then BUG() for debug */ static void _gpu_clk_prepare_enable(struct kgsl_device *device, struct clk *clk, const char *name) { int ret; if (device->state == KGSL_STATE_NAP) { ret = clk_enable(clk); if (ret) goto err; return; } ret = clk_prepare_enable(clk); if (!ret) return; err: /* Failure is fatal so BUG() to facilitate debug */ dev_err(device->dev, "GPU Clock %s enable error:%d\n", name, ret); } /* * _bimc_clk_prepare_enable - Enable the specified GPU clock * Try once to enable it and then BUG() for debug */ static void _bimc_clk_prepare_enable(struct kgsl_device *device, struct clk *clk, const char *name) { int ret = clk_prepare_enable(clk); /* Failure is fatal so BUG() to facilitate debug */ if (ret) dev_err(device->dev, "GPU clock %s enable error:%d\n", name, ret); } static int kgsl_pwrctrl_clk_set_rate(struct clk *grp_clk, unsigned int freq, const char *name) { int ret = clk_set_rate(grp_clk, freq); WARN(ret, "%s set freq %d failed:%d\n", name, freq, ret); return ret; } static inline void _close_pcl(struct kgsl_pwrctrl *pwr) { if (pwr->pcl) msm_bus_scale_unregister_client(pwr->pcl); pwr->pcl = 0; } static void _close_gpu_cfg(struct kgsl_pwrctrl *pwr) { if (pwr->gpu_cfg) msm_bus_scale_unregister_client(pwr->gpu_cfg); pwr->gpu_cfg = 0; } static inline void _close_regulators(struct kgsl_pwrctrl *pwr) { int i; for (i = 0; i < KGSL_MAX_REGULATORS; i++) pwr->regulators[i].reg = NULL; } static inline void _close_clks(struct kgsl_device *device) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; int i; for (i = 0; i < KGSL_MAX_CLKS; i++) pwr->grp_clks[i] = NULL; if (pwr->gpu_bimc_int_clk) devm_clk_put(&device->pdev->dev, pwr->gpu_bimc_int_clk); } static bool _gpu_freq_supported(struct kgsl_pwrctrl *pwr, unsigned int freq) { int i; for (i = pwr->num_pwrlevels - 2; i >= 0; i--) { if (pwr->pwrlevels[i].gpu_freq == freq) return true; } return false; } void kgsl_pwrctrl_disable_unused_opp(struct kgsl_device *device, struct device *dev) { struct dev_pm_opp *opp; unsigned long freq = 0; int ret; ret = dev_pm_opp_get_opp_count(dev); /* Return early, If no OPP table or OPP count is zero */ if (ret <= 0) return; while (1) { opp = dev_pm_opp_find_freq_ceil(dev, &freq); if (IS_ERR(opp)) break; if (!_gpu_freq_supported(&device->pwrctrl, freq)) dev_pm_opp_disable(dev, freq); dev_pm_opp_put(opp); freq++; } } static bool pwrlevel_uses_ib(struct msm_bus_scale_pdata *bus_scale_table, struct msm_bus_vectors *vector, struct kgsl_pwrctrl *pwr, int m) { if (bus_scale_table->usecase[pwr->pwrlevels[m].bus_freq].vectors[0].ib == vector->ib) return true; else return false; } int kgsl_pwrctrl_init(struct kgsl_device *device) { int i, k, m, n = 0, result, freq; struct platform_device *pdev = device->pdev; struct kgsl_pwrctrl *pwr = &device->pwrctrl; struct device_node *gpu_cfg_node; struct msm_bus_scale_pdata *bus_scale_table; struct msm_bus_scale_pdata *gpu_cfg_table; struct device_node *gpubw_dev_node = NULL; struct platform_device *p2dev; bus_scale_table = kgsl_get_bus_scale_table(device); if (bus_scale_table == NULL) return -EINVAL; result = _get_clocks(device); if (result) goto error_cleanup_clks; /* Make sure we have a source clk for freq setting */ if (pwr->grp_clks[0] == NULL) pwr->grp_clks[0] = pwr->grp_clks[1]; if (of_property_read_bool(pdev->dev.of_node, "qcom,no-nap")) device->pwrctrl.ctrl_flags |= BIT(KGSL_PWRFLAGS_NAP_OFF); if (pwr->num_pwrlevels == 0) { dev_err(device->dev, "No power levels are defined\n"); result = -EINVAL; goto error_cleanup_clks; } /* Initialize the user and thermal clock constraints */ pwr->max_pwrlevel = 0; pwr->min_pwrlevel = pwr->num_pwrlevels - 2; pwr->thermal_pwrlevel = 0; pwr->thermal_pwrlevel_floor = pwr->min_pwrlevel; pwr->wakeup_maxpwrlevel = 0; for (i = 0; i < pwr->num_pwrlevels; i++) { freq = pwr->pwrlevels[i].gpu_freq; if (freq > 0) freq = clk_round_rate(pwr->grp_clks[0], freq); if (freq >= pwr->pwrlevels[i].gpu_freq) pwr->pwrlevels[i].gpu_freq = freq; } kgsl_pwrctrl_disable_unused_opp(device, &pdev->dev); kgsl_clk_set_rate(device, pwr->num_pwrlevels - 1); freq = clk_round_rate(pwr->grp_clks[6], KGSL_RBBMTIMER_CLK_FREQ); if (freq > 0) kgsl_pwrctrl_clk_set_rate(pwr->grp_clks[6], freq, clocks[6]); _isense_clk_set_rate(pwr, pwr->num_pwrlevels - 1); result = get_regulators(device); if (result) goto error_cleanup_regulators; pwr->power_flags = 0; of_property_read_u32(device->pdev->dev.of_node, "qcom,l2pc-cpu-mask", &pwr->l2pc_cpus_mask); pwr->l2pc_update_queue = of_property_read_bool( device->pdev->dev.of_node, "qcom,l2pc-update-queue"); pm_runtime_enable(&pdev->dev); gpu_cfg_node = of_find_node_by_name(device->pdev->dev.of_node, "qcom,cpu-to-gpu-cfg-path"); if (gpu_cfg_node) { gpu_cfg_table = msm_bus_pdata_from_node(device->pdev, gpu_cfg_node); if (gpu_cfg_table) pwr->gpu_cfg = msm_bus_scale_register_client(gpu_cfg_table); if (!pwr->gpu_cfg) { result = -EINVAL; goto error_disable_pm; } } /* Check if gpu bandwidth vote device is defined in dts */ if (pwr->bus_control) /* Check if gpu bandwidth vote device is defined in dts */ gpubw_dev_node = of_parse_phandle(pdev->dev.of_node, "qcom,gpubw-dev", 0); /* * Governor support enables the gpu bus scaling via governor * and hence no need to register for bus scaling client * if gpubw-dev is defined. */ if (gpubw_dev_node) { p2dev = of_find_device_by_node(gpubw_dev_node); if (p2dev) pwr->devbw = &p2dev->dev; } else { /* * Register for gpu bus scaling if governor support * is not enabled and gpu bus voting is to be done * from the driver. */ pwr->pcl = msm_bus_scale_register_client(bus_scale_table); if (pwr->pcl == 0) { result = -EINVAL; goto error_cleanup_gpu_cfg; } } pwr->bus_ib = kzalloc(bus_scale_table->num_usecases * sizeof(*pwr->bus_ib), GFP_KERNEL); if (pwr->bus_ib == NULL) { result = -ENOMEM; goto error_cleanup_pcl; } /* * Pull the BW vote out of the bus table. They will be used to * calculate the ratio between the votes. */ for (i = 0; i < bus_scale_table->num_usecases; i++) { struct msm_bus_paths *usecase = &bus_scale_table->usecase[i]; struct msm_bus_vectors *vector = &usecase->vectors[0]; if (vector->dst == MSM_BUS_SLAVE_EBI_CH0 && vector->ib != 0) { if (i < KGSL_MAX_BUSLEVELS) { /* Convert bytes to Mbytes. */ ib_votes[i] = DIV_ROUND_UP_ULL(vector->ib, 1048576) - 1; if (ib_votes[i] > ib_votes[max_vote_buslevel]) max_vote_buslevel = i; } /* check for duplicate values */ for (k = 0; k < n; k++) if (vector->ib == pwr->bus_ib[k]) break; /* if this is a new ib value, save it */ if (k == n) { pwr->bus_ib[k] = vector->ib; n++; /* find which pwrlevels use this ib */ for (m = 0; m < pwr->num_pwrlevels - 1; m++) { if (pwrlevel_uses_ib(bus_scale_table, vector, pwr, m)) pwr->bus_index[m] = k; } } } } INIT_LIST_HEAD(&pwr->limits); spin_lock_init(&pwr->limits_lock); result = kgsl_pwrctrl_cx_ipeak_init(device); if (result) goto error_cleanup_bus_ib; pwr->cooling_pwr_limit = kgsl_pwr_limits_add(KGSL_DEVICE_3D0); if (IS_ERR_OR_NULL(pwr->cooling_pwr_limit)) { dev_err(device->dev, "Failed to add cooling power limit\n"); result = -EINVAL; pwr->cooling_pwr_limit = NULL; goto error_cleanup_bus_ib; } INIT_WORK(&pwr->thermal_cycle_ws, kgsl_thermal_cycle); timer_setup(&pwr->thermal_timer, kgsl_thermal_timer, 0); pwr->sysfs_pwr_limit = kgsl_pwr_limits_add(KGSL_DEVICE_3D0); kgsl_pwrctrl_vbif_init(device); return result; error_cleanup_bus_ib: kfree(pwr->bus_ib); error_cleanup_pcl: _close_pcl(pwr); error_cleanup_gpu_cfg: _close_gpu_cfg(pwr); error_disable_pm: pm_runtime_disable(&pdev->dev); error_cleanup_regulators: _close_regulators(pwr); error_cleanup_clks: _close_clks(device); return result; } void kgsl_pwrctrl_close(struct kgsl_device *device) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; int i; kgsl_pwr_limits_del(pwr->cx_ipeak_pwr_limit); pwr->cx_ipeak_pwr_limit = NULL; if (!IS_ERR_OR_NULL(pwr->gpu_ipeak_client[0].client)) cx_ipeak_victim_unregister(pwr->gpu_ipeak_client[0].client); for (i = 0; i < ARRAY_SIZE(pwr->gpu_ipeak_client); i++) { if (!IS_ERR_OR_NULL(pwr->gpu_ipeak_client[i].client)) { cx_ipeak_unregister(pwr->gpu_ipeak_client[i].client); pwr->gpu_ipeak_client[i].client = NULL; } } pwr->power_flags = 0; if (!IS_ERR_OR_NULL(pwr->sysfs_pwr_limit)) { list_del(&pwr->sysfs_pwr_limit->node); kfree(pwr->sysfs_pwr_limit); pwr->sysfs_pwr_limit = NULL; } kgsl_pwr_limits_del(pwr->cooling_pwr_limit); pwr->cooling_pwr_limit = NULL; kfree(pwr->bus_ib); _close_pcl(pwr); _close_gpu_cfg(pwr); pm_runtime_disable(&device->pdev->dev); _close_regulators(pwr); _close_clks(device); } /** * kgsl_idle_check() - Work function for GPU interrupts and idle timeouts. * @device: The device * * This function is called for work that is queued by the interrupt * handler or the idle timer. It attempts to transition to a clocks * off state if the active_cnt is 0 and the hardware is idle. */ void kgsl_idle_check(struct work_struct *work) { struct kgsl_device *device = container_of(work, struct kgsl_device, idle_check_ws); int ret = 0; unsigned int requested_state; mutex_lock(&device->mutex); requested_state = device->requested_state; if ((requested_state != KGSL_STATE_NONE) && (device->state == KGSL_STATE_ACTIVE || device->state == KGSL_STATE_NAP)) { if (!atomic_read(&device->active_cnt)) { spin_lock(&device->submit_lock); if (device->submit_now) { spin_unlock(&device->submit_lock); goto done; } /* Don't allow GPU inline submission in SLUMBER */ if (requested_state == KGSL_STATE_SLUMBER) device->slumber = true; spin_unlock(&device->submit_lock); ret = kgsl_pwrctrl_change_state(device, device->requested_state); if (ret == -EBUSY) { if (requested_state == KGSL_STATE_SLUMBER) { spin_lock(&device->submit_lock); device->slumber = false; spin_unlock(&device->submit_lock); } /* * If the GPU is currently busy, restore * the requested state and reschedule * idle work. */ kgsl_pwrctrl_request_state(device, requested_state); kgsl_schedule_work(&device->idle_check_ws); } } done: if (!ret) kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE); if (device->state == KGSL_STATE_ACTIVE) mod_timer(&device->idle_timer, jiffies + device->pwrctrl.interval_timeout); } kgsl_pwrscale_update(device); mutex_unlock(&device->mutex); } EXPORT_SYMBOL(kgsl_idle_check); void kgsl_timer(struct timer_list *t) { struct kgsl_device *device = from_timer(device, t, idle_timer); if (device->requested_state != KGSL_STATE_SUSPEND) { kgsl_pwrctrl_request_state(device, KGSL_STATE_SLUMBER); /* Have work run in a non-interrupt context. */ kgsl_schedule_work(&device->idle_check_ws); } } static bool kgsl_pwrctrl_isenabled(struct kgsl_device *device) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; return ((test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags) != 0) && (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags) != 0)); } /** * kgsl_pre_hwaccess - Enforce preconditions for touching registers * @device: The device * * This function ensures that the correct lock is held and that the GPU * clock is on immediately before a register is read or written. Note * that this function does not check active_cnt because the registers * must be accessed during device start and stop, when the active_cnt * may legitimately be 0. */ void kgsl_pre_hwaccess(struct kgsl_device *device) { /* In order to touch a register you must hold the device mutex */ WARN_ON(!mutex_is_locked(&device->mutex)); /* * A register access without device power will cause a fatal timeout. * This is not valid for targets with a GMU. */ if (!gmu_core_gpmu_isenabled(device)) WARN_ON(!kgsl_pwrctrl_isenabled(device)); } EXPORT_SYMBOL(kgsl_pre_hwaccess); static int kgsl_pwrctrl_enable(struct kgsl_device *device) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; int level, status; if (pwr->wakeup_maxpwrlevel) { level = pwr->max_pwrlevel; pwr->wakeup_maxpwrlevel = 0; } else if (kgsl_popp_check(device)) { level = pwr->active_pwrlevel; } else { level = pwr->default_pwrlevel; } kgsl_pwrctrl_pwrlevel_change(device, level); if (gmu_core_gpmu_isenabled(device)) { int ret = gmu_core_start(device); if (!ret) kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON); return ret; } /* Order pwrrail/clk sequence based upon platform */ status = kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON); if (status) return status; kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE); kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON); return device->ftbl->regulator_enable(device); } static void kgsl_pwrctrl_disable(struct kgsl_device *device) { int status; status = clk_set_rate(device->l3_clk, device->l3_freq[0]); if (!status) device->cur_l3_pwrlevel = 0; else dev_err(device->dev, "Could not clear l3_vote: %d\n", status); if (gmu_core_gpmu_isenabled(device)) { kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF); return gmu_core_stop(device); } /* Order pwrrail/clk sequence based upon platform */ device->ftbl->regulator_disable(device); kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF); kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLUMBER); kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF); } static void kgsl_pwrctrl_clk_set_options(struct kgsl_device *device, bool on) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; int i; for (i = 0; i < KGSL_MAX_CLKS; i++) { if (pwr->grp_clks[i] == NULL) continue; if (device->ftbl->clk_set_options) device->ftbl->clk_set_options(device, clocks[i], pwr->grp_clks[i], on); } } /** * _init() - Get the GPU ready to start, but don't turn anything on * @device - Pointer to the kgsl_device struct */ static int _init(struct kgsl_device *device) { int status = 0; switch (device->state) { case KGSL_STATE_RESET: if (gmu_core_isenabled(device)) { /* * If we fail a INIT -> AWARE transition, we will * transition back to INIT. However, we must hard reset * the GMU as we go back to INIT. This is done by * forcing a RESET -> INIT transition. */ gmu_core_suspend(device); kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT); } break; case KGSL_STATE_NAP: /* Force power on to do the stop */ status = kgsl_pwrctrl_enable(device); case KGSL_STATE_ACTIVE: kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF); del_timer_sync(&device->idle_timer); kgsl_pwrscale_midframe_timer_cancel(device); device->ftbl->stop(device); /* fall through */ case KGSL_STATE_AWARE: kgsl_pwrctrl_disable(device); /* fall through */ case KGSL_STATE_SLUMBER: case KGSL_STATE_NONE: kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT); } return status; } /** * _wake() - Power up the GPU from a slumber state * @device - Pointer to the kgsl_device struct * * Resume the GPU from a lower power state to ACTIVE. */ static int _wake(struct kgsl_device *device) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; int status = 0; unsigned int state = device->state; switch (device->state) { case KGSL_STATE_SUSPEND: complete_all(&device->hwaccess_gate); /* Call the GPU specific resume function */ device->ftbl->resume(device); /* fall through */ case KGSL_STATE_SLUMBER: kgsl_pwrctrl_clk_set_options(device, true); status = device->ftbl->start(device, device->pwrctrl.superfast); device->pwrctrl.superfast = false; if (status) { kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE); dev_err(device->dev, "start failed %d\n", status); break; } kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON); kgsl_pwrscale_wake(device); kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON); /* fall through */ case KGSL_STATE_NAP: /* Turn on the core clocks */ kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE); if (state == KGSL_STATE_SLUMBER || state == KGSL_STATE_SUSPEND) trace_gpu_frequency( pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq/1000, 0); /* * No need to turn on/off irq here as it no longer affects * power collapse */ kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE); /* * Change register settings if any after pwrlevel change. * If there was dcvs level change during nap - call * pre and post in the row after clock is enabled. */ kgsl_pwrctrl_pwrlevel_change_settings(device, 0); kgsl_pwrctrl_pwrlevel_change_settings(device, 1); /* All settings for power level transitions are complete*/ pwr->previous_pwrlevel = pwr->active_pwrlevel; mod_timer(&device->idle_timer, jiffies + device->pwrctrl.interval_timeout); break; case KGSL_STATE_AWARE: kgsl_pwrctrl_clk_set_options(device, true); /* Enable state before turning on irq */ kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE); kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON); mod_timer(&device->idle_timer, jiffies + device->pwrctrl.interval_timeout); break; default: dev_warn(device->dev, "unhandled state %s\n", kgsl_pwrstate_to_str(device->state)); kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE); status = -EINVAL; break; } return status; } /* * _aware() - Put device into AWARE * @device: Device pointer * * The GPU should be available for register reads/writes and able * to communicate with the rest of the system. However disable all * paths that allow a switch to an interrupt context (interrupts & * timers). * Return 0 on success else error code */ static int _aware(struct kgsl_device *device) { int status = 0; switch (device->state) { case KGSL_STATE_RESET: if (!gmu_core_gpmu_isenabled(device)) break; kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF); status = gmu_core_start(device); break; case KGSL_STATE_INIT: status = kgsl_pwrctrl_enable(device); break; /* The following 3 cases shouldn't occur, but don't panic. */ case KGSL_STATE_NAP: status = _wake(device); case KGSL_STATE_ACTIVE: kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF); del_timer_sync(&device->idle_timer); kgsl_pwrscale_midframe_timer_cancel(device); break; case KGSL_STATE_SLUMBER: status = kgsl_pwrctrl_enable(device); break; default: status = -EINVAL; } if (status && gmu_core_isenabled(device)) /* * If a SLUMBER/INIT -> AWARE fails, we transition back to * SLUMBER/INIT state. We must hard reset the GMU while * transitioning back to SLUMBER/INIT. A RESET -> AWARE * transition is different. It happens when dispatcher is * attempting reset/recovery as part of fault handling. If it * fails, we should still transition back to RESET in case * we want to attempt another reset/recovery. */ kgsl_pwrctrl_set_state(device, KGSL_STATE_RESET); else kgsl_pwrctrl_set_state(device, KGSL_STATE_AWARE); return status; } static int _nap(struct kgsl_device *device) { switch (device->state) { case KGSL_STATE_ACTIVE: if (!device->ftbl->is_hw_collapsible(device)) { kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE); return -EBUSY; } device->ftbl->stop_fault_timer(device); kgsl_pwrscale_midframe_timer_cancel(device); /* * Read HW busy counters before going to NAP state. * The data might be used by power scale governors * independently of the HW activity. For example * the simple-on-demand governor will get the latest * busy_time data even if the gpu isn't active. */ kgsl_pwrscale_update_stats(device); kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_NAP); kgsl_pwrctrl_set_state(device, KGSL_STATE_NAP); /* fallthrough */ case KGSL_STATE_SLUMBER: case KGSL_STATE_RESET: break; case KGSL_STATE_AWARE: dev_warn(device->dev, "transition AWARE -> NAP is not permitted\n"); /* fallthrough */ default: kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE); break; } return 0; } static int _slumber(struct kgsl_device *device) { int status = 0; switch (device->state) { case KGSL_STATE_ACTIVE: if (!device->ftbl->is_hw_collapsible(device)) { kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE); return -EBUSY; } /* fall through */ case KGSL_STATE_NAP: del_timer_sync(&device->idle_timer); kgsl_pwrscale_midframe_timer_cancel(device); if (device->pwrctrl.thermal_cycle == CYCLE_ACTIVE) { device->pwrctrl.thermal_cycle = CYCLE_ENABLE; del_timer_sync(&device->pwrctrl.thermal_timer); } kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF); /* make sure power is on to stop the device*/ status = kgsl_pwrctrl_enable(device); device->ftbl->suspend_context(device); device->ftbl->stop(device); kgsl_pwrctrl_clk_set_options(device, false); kgsl_pwrctrl_disable(device); kgsl_pwrscale_sleep(device); trace_gpu_frequency(0, 0); kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER); pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma, PM_QOS_DEFAULT_VALUE); if (device->pwrctrl.l2pc_cpus_mask) pm_qos_update_request( &device->pwrctrl.l2pc_cpus_qos, PM_QOS_DEFAULT_VALUE); break; case KGSL_STATE_SUSPEND: complete_all(&device->hwaccess_gate); device->ftbl->resume(device); kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER); break; case KGSL_STATE_AWARE: kgsl_pwrctrl_disable(device); trace_gpu_frequency(0, 0); kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER); break; case KGSL_STATE_RESET: if (gmu_core_isenabled(device)) { /* Reset the GMU if we failed to boot the GMU */ gmu_core_suspend(device); kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER); } break; default: kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE); break; } return status; } /* * _suspend() - Put device into suspend * @device: Device pointer * * Return 0 on success else error code */ static int _suspend(struct kgsl_device *device) { int ret = 0; if ((device->state == KGSL_STATE_NONE) || (device->state == KGSL_STATE_INIT) || (device->state == KGSL_STATE_SUSPEND)) return ret; /* drain to prevent from more commands being submitted */ device->ftbl->drain(device); /* wait for active count so device can be put in slumber */ ret = kgsl_active_count_wait(device, 0); if (ret) goto err; ret = device->ftbl->idle(device); if (ret) goto err; ret = _slumber(device); if (ret) goto err; kgsl_pwrctrl_set_state(device, KGSL_STATE_SUSPEND); return ret; err: device->ftbl->resume(device); dev_err(device->dev, "device failed to SUSPEND %d\n", ret); return ret; } /* * kgsl_pwrctrl_change_state() changes the GPU state to the input * @device: Pointer to a KGSL device * @state: desired KGSL state * * Caller must hold the device mutex. If the requested state change * is valid, execute it. Otherwise return an error code explaining * why the change has not taken place. Also print an error if an * unexpected state change failure occurs. For example, a change to * NAP may be rejected because the GPU is busy, this is not an error. * A change to SUSPEND should go through no matter what, so if it * fails an additional error message will be printed to dmesg. */ int kgsl_pwrctrl_change_state(struct kgsl_device *device, int state) { int status = 0; if (device->state == state) return status; kgsl_pwrctrl_request_state(device, state); /* Work through the legal state transitions */ switch (state) { case KGSL_STATE_INIT: status = _init(device); break; case KGSL_STATE_AWARE: status = _aware(device); break; case KGSL_STATE_ACTIVE: status = _wake(device); break; case KGSL_STATE_NAP: status = _nap(device); break; case KGSL_STATE_SLUMBER: status = _slumber(device); break; case KGSL_STATE_SUSPEND: status = _suspend(device); break; case KGSL_STATE_RESET: kgsl_pwrctrl_set_state(device, KGSL_STATE_RESET); break; default: dev_err(device->dev, "bad state request 0x%x\n", state); kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE); status = -EINVAL; break; } /* Record the state timing info */ if (!status) { ktime_t t = ktime_get(); _record_pwrevent(device, t, KGSL_PWREVENT_STATE); } return status; } EXPORT_SYMBOL(kgsl_pwrctrl_change_state); static void kgsl_pwrctrl_set_state(struct kgsl_device *device, unsigned int state) { trace_kgsl_pwr_set_state(device, state); device->state = state; device->requested_state = KGSL_STATE_NONE; spin_lock(&device->submit_lock); if (state == KGSL_STATE_SLUMBER || state == KGSL_STATE_SUSPEND) device->slumber = true; else device->slumber = false; spin_unlock(&device->submit_lock); } static void kgsl_pwrctrl_request_state(struct kgsl_device *device, unsigned int state) { if (state != KGSL_STATE_NONE && state != device->requested_state) trace_kgsl_pwr_request_state(device, state); device->requested_state = state; } const char *kgsl_pwrstate_to_str(unsigned int state) { switch (state) { case KGSL_STATE_NONE: return "NONE"; case KGSL_STATE_INIT: return "INIT"; case KGSL_STATE_AWARE: return "AWARE"; case KGSL_STATE_ACTIVE: return "ACTIVE"; case KGSL_STATE_NAP: return "NAP"; case KGSL_STATE_SUSPEND: return "SUSPEND"; case KGSL_STATE_SLUMBER: return "SLUMBER"; case KGSL_STATE_RESET: return "RESET"; default: break; } return "UNKNOWN"; } EXPORT_SYMBOL(kgsl_pwrstate_to_str); /** * kgsl_active_count_get() - Increase the device active count * @device: Pointer to a KGSL device * * Increase the active count for the KGSL device and turn on * clocks if this is the first reference. Code paths that need * to touch the hardware or wait for the hardware to complete * an operation must hold an active count reference until they * are finished. An error code will be returned if waking the * device fails. The device mutex must be held while *calling * this function. */ int kgsl_active_count_get(struct kgsl_device *device) { int ret = 0; if (WARN_ON(!mutex_is_locked(&device->mutex))) return -EINVAL; if ((atomic_read(&device->active_cnt) == 0) && (device->state != KGSL_STATE_ACTIVE)) { mutex_unlock(&device->mutex); wait_for_completion(&device->hwaccess_gate); mutex_lock(&device->mutex); device->pwrctrl.superfast = true; ret = kgsl_pwrctrl_change_state(device, KGSL_STATE_ACTIVE); } if (ret == 0) atomic_inc(&device->active_cnt); trace_kgsl_active_count(device, (unsigned long) __builtin_return_address(0)); return ret; } EXPORT_SYMBOL(kgsl_active_count_get); /** * kgsl_active_count_put() - Decrease the device active count * @device: Pointer to a KGSL device * * Decrease the active count for the KGSL device and turn off * clocks if there are no remaining references. This function will * transition the device to NAP if there are no other pending state * changes. It also completes the suspend gate. The device mutex must * be held while calling this function. */ void kgsl_active_count_put(struct kgsl_device *device) { if (WARN_ON(!mutex_is_locked(&device->mutex))) return; if (WARN(atomic_read(&device->active_cnt) == 0, "Unbalanced get/put calls to KGSL active count\n")) return; if (atomic_dec_and_test(&device->active_cnt)) { bool nap_on = !(device->pwrctrl.ctrl_flags & BIT(KGSL_PWRFLAGS_NAP_OFF)); if (nap_on && device->state == KGSL_STATE_ACTIVE && device->requested_state == KGSL_STATE_NONE) { kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP); kgsl_schedule_work(&device->idle_check_ws); } else if (!nap_on) { kgsl_pwrscale_update_stats(device); kgsl_pwrscale_update(device); } mod_timer(&device->idle_timer, jiffies + device->pwrctrl.interval_timeout); } trace_kgsl_active_count(device, (unsigned long) __builtin_return_address(0)); wake_up(&device->active_cnt_wq); } EXPORT_SYMBOL(kgsl_active_count_put); static int _check_active_count(struct kgsl_device *device, int count) { /* Return 0 if the active count is greater than the desired value */ return atomic_read(&device->active_cnt) > count ? 0 : 1; } /** * kgsl_active_count_wait() - Wait for activity to finish. * @device: Pointer to a KGSL device * @count: Active count value to wait for * * Block until the active_cnt value hits the desired value */ int kgsl_active_count_wait(struct kgsl_device *device, int count) { int result = 0; long wait_jiffies = HZ; if (WARN_ON(!mutex_is_locked(&device->mutex))) return -EINVAL; while (atomic_read(&device->active_cnt) > count) { long ret; mutex_unlock(&device->mutex); ret = wait_event_timeout(device->active_cnt_wq, _check_active_count(device, count), wait_jiffies); mutex_lock(&device->mutex); result = ret == 0 ? -ETIMEDOUT : 0; if (!result) wait_jiffies = ret; else break; } return result; } EXPORT_SYMBOL(kgsl_active_count_wait); /** * _update_limits() - update the limits based on the current requests * @limit: Pointer to the limits structure * @reason: Reason for the update * @level: Level if any to be set * * Set the thermal pwrlevel based on the current limits */ static void _update_limits(struct kgsl_pwr_limit *limit, unsigned int reason, unsigned int level) { struct kgsl_device *device = limit->device; struct kgsl_pwrctrl *pwr = &device->pwrctrl; struct kgsl_pwr_limit *temp_limit; unsigned int max_level = 0; spin_lock(&pwr->limits_lock); switch (reason) { case KGSL_PWR_ADD_LIMIT: list_add(&limit->node, &pwr->limits); break; case KGSL_PWR_DEL_LIMIT: list_del(&limit->node); if (list_empty(&pwr->limits)) goto done; break; case KGSL_PWR_SET_LIMIT: limit->level = level; break; default: break; } list_for_each_entry(temp_limit, &pwr->limits, node) { max_level = max_t(unsigned int, max_level, temp_limit->level); } done: spin_unlock(&pwr->limits_lock); mutex_lock(&device->mutex); pwr->thermal_pwrlevel = max_level; kgsl_pwrctrl_pwrlevel_change(device, pwr->active_pwrlevel); mutex_unlock(&device->mutex); } /** * kgsl_pwr_limits_add() - Add a new pwr limit * @id: Device ID * * Allocate a pwr limit structure for the client, add it to the limits * list and return the pointer to the client */ void *kgsl_pwr_limits_add(u32 id) { struct kgsl_device *device = kgsl_get_device(id); struct kgsl_pwr_limit *limit; if (IS_ERR_OR_NULL(device)) return NULL; limit = kzalloc(sizeof(struct kgsl_pwr_limit), GFP_KERNEL); if (limit == NULL) return ERR_PTR(-ENOMEM); limit->device = device; _update_limits(limit, KGSL_PWR_ADD_LIMIT, 0); return limit; } EXPORT_SYMBOL(kgsl_pwr_limits_add); /** * kgsl_pwr_limits_del() - Unregister the pwr limit client and * adjust the thermal limits * @limit_ptr: Client handle * * Delete the client handle from the thermal list and adjust the * active clocks if needed. */ void kgsl_pwr_limits_del(void *limit_ptr) { struct kgsl_pwr_limit *limit = limit_ptr; if (IS_ERR_OR_NULL(limit)) return; _update_limits(limit, KGSL_PWR_DEL_LIMIT, 0); kfree(limit); } EXPORT_SYMBOL(kgsl_pwr_limits_del); /** * kgsl_pwr_limits_set_freq() - Set the requested limit for the client * @limit_ptr: Client handle * @freq: Client requested frequency * * Set the new limit for the client and adjust the clocks */ int kgsl_pwr_limits_set_freq(void *limit_ptr, unsigned int freq) { struct kgsl_pwrctrl *pwr; struct kgsl_pwr_limit *limit = limit_ptr; int level; if (IS_ERR_OR_NULL(limit)) return -EINVAL; pwr = &limit->device->pwrctrl; level = _get_nearest_pwrlevel(pwr, freq); if (level < 0) return -EINVAL; _update_limits(limit, KGSL_PWR_SET_LIMIT, level); return 0; } EXPORT_SYMBOL(kgsl_pwr_limits_set_freq); /** * kgsl_pwr_limits_set_gpu_fmax() - Set the requested limit for the * client, if requested freq value is larger than fmax supported * function returns with success. * @limit_ptr: Client handle * @freq: Client requested frequency * * Set the new limit for the client and adjust the clocks */ int kgsl_pwr_limits_set_gpu_fmax(void *limit_ptr, unsigned int freq) { struct kgsl_pwrctrl *pwr; struct kgsl_pwr_limit *limit = limit_ptr; int level; if (IS_ERR_OR_NULL(limit)) return -EINVAL; pwr = &limit->device->pwrctrl; /* * When requested frequency is greater than fmax, * requested limit is implicit, return success here. */ if (freq >= pwr->pwrlevels[0].gpu_freq) return 0; level = _get_nearest_pwrlevel(pwr, freq); if (level < 0) return -EINVAL; _update_limits(limit, KGSL_PWR_SET_LIMIT, level); return 0; } EXPORT_SYMBOL(kgsl_pwr_limits_set_gpu_fmax); /** * kgsl_pwr_limits_set_default() - Set the default thermal limit for the client * @limit_ptr: Client handle * * Set the default for the client and adjust the clocks */ void kgsl_pwr_limits_set_default(void *limit_ptr) { struct kgsl_pwr_limit *limit = limit_ptr; if (IS_ERR_OR_NULL(limit)) return; _update_limits(limit, KGSL_PWR_SET_LIMIT, 0); } EXPORT_SYMBOL(kgsl_pwr_limits_set_default); /** * kgsl_pwr_limits_get_freq() - Get the current limit * @id: Device ID * * Get the current limit set for the device */ unsigned int kgsl_pwr_limits_get_freq(u32 id) { struct kgsl_device *device = kgsl_get_device(id); struct kgsl_pwrctrl *pwr; unsigned int freq; if (IS_ERR_OR_NULL(device)) return 0; pwr = &device->pwrctrl; mutex_lock(&device->mutex); freq = pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq; mutex_unlock(&device->mutex); return freq; } EXPORT_SYMBOL(kgsl_pwr_limits_get_freq); /** * kgsl_pwrctrl_set_default_gpu_pwrlevel() - Set GPU to default power level * @device: Pointer to the kgsl_device struct */ int kgsl_pwrctrl_set_default_gpu_pwrlevel(struct kgsl_device *device) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; unsigned int new_level = pwr->default_pwrlevel; unsigned int old_level = pwr->active_pwrlevel; /* * Update the level according to any thermal, * max/min, or power constraints. */ new_level = kgsl_pwrctrl_adjust_pwrlevel(device, new_level); /* * If thermal cycling is required and the new level hits the * thermal limit, kick off the cycling. */ kgsl_pwrctrl_set_thermal_cycle(device, new_level); pwr->active_pwrlevel = new_level; pwr->previous_pwrlevel = old_level; /* Request adjusted DCVS level */ return kgsl_clk_set_rate(device, pwr->active_pwrlevel); }