aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKeith Lynch <spleef79@gmail.com>2015-02-18 11:07:58 -0600
committerKeith Lynch <spleef79@gmail.com>2015-02-18 11:07:58 -0600
commit8c96207c1c26815ddd2a4588062b527ef91d0f73 (patch)
tree55a796a6ccd15005993c0a708c850dd26e7e2f84
parent1f6fcb0fb79dcf2313cb3cda16dc49fd97370915 (diff)
parent8d260d86bee9f2a278f62401fce433e44bbe2c7f (diff)
Merge pull request #5 from CyanogenMod/cm-12.0lp5.0
merging recent cm12 changes
-rw-r--r--Documentation/devicetree/bindings/gpu/adreno.txt12
-rw-r--r--arch/arm/boot/dts/qcom/apq8084-gpu.dtsi4
-rw-r--r--drivers/gpu/msm/adreno.c189
-rw-r--r--drivers/gpu/msm/adreno.h31
-rw-r--r--drivers/gpu/msm/adreno_a4xx_snapshot.c16
-rw-r--r--drivers/gpu/msm/adreno_compat.c4
-rw-r--r--drivers/gpu/msm/adreno_dispatch.c80
-rw-r--r--drivers/gpu/msm/adreno_profile.c31
-rw-r--r--drivers/gpu/msm/adreno_ringbuffer.c32
-rw-r--r--drivers/gpu/msm/kgsl.c140
-rw-r--r--drivers/gpu/msm/kgsl.h2
-rw-r--r--drivers/gpu/msm/kgsl_cffdump.h13
-rw-r--r--drivers/gpu/msm/kgsl_compat.c50
-rw-r--r--drivers/gpu/msm/kgsl_device.h9
-rw-r--r--drivers/gpu/msm/kgsl_iommu.c258
-rw-r--r--drivers/gpu/msm/kgsl_iommu.h29
-rw-r--r--drivers/gpu/msm/kgsl_mmu.c79
-rw-r--r--drivers/gpu/msm/kgsl_mmu.h31
-rw-r--r--drivers/gpu/msm/kgsl_pwrctrl.c21
-rw-r--r--drivers/gpu/msm/kgsl_pwrctrl.h7
-rw-r--r--drivers/gpu/msm/kgsl_sharedmem.c20
-rw-r--r--drivers/gpu/msm/kgsl_sync.c44
-rw-r--r--drivers/video/msm/mdss/mdss_mdp.h1
-rw-r--r--drivers/video/msm/mdss/mdss_mdp_ctl.c24
-rw-r--r--drivers/video/msm/mdss/mdss_mdp_intf_cmd.c320
-rw-r--r--drivers/video/msm/mdss/mdss_mdp_overlay.c383
-rw-r--r--drivers/video/msm/mdss/mdss_mdp_pipe.c24
-rw-r--r--drivers/video/msm/mdss/mdss_mdp_rotator.c9
-rw-r--r--drivers/video/msm/mdss/mdss_mdp_splash_logo.c1
-rw-r--r--drivers/video/msm/mdss/mdss_mdp_wb.c34
-rw-r--r--include/linux/msm_kgsl.h6
31 files changed, 1161 insertions, 743 deletions
diff --git a/Documentation/devicetree/bindings/gpu/adreno.txt b/Documentation/devicetree/bindings/gpu/adreno.txt
index ec5cfa5991b..edd5bee2837 100644
--- a/Documentation/devicetree/bindings/gpu/adreno.txt
+++ b/Documentation/devicetree/bindings/gpu/adreno.txt
@@ -71,12 +71,16 @@ Optional Properties:
This is used to override faulty hardware readings.
- qcom,strtstp-sleepwake: Boolean. Enables use of GPU SLUMBER instead of SLEEP for power savings
-- qcom,pm-qos-latency: Every time GPU wakes up from sleep, driver votes for
+- qcom,pm-qos-active-latency:
+ Right after GPU wakes up from sleep, driver votes for
acceptable maximum latency to the pm-qos driver. This
- voting demands that *CPU* can not go into a power save
- state *if* the latency to bring CPU back into normal
- state is more than this value.
+ voting demands that the system can not go into any
+ power save state *if* the latency to bring system back
+ into active state is more than this value.
Value is in microseconds.
+- qcom,pm-qos-wakeup-latency:
+ Similar to the above. Driver votes against deep low
+ power modes right before GPU wakes up from sleep.
The following properties are optional as collecting data via coresight might
not be supported for every chipset. The documentation for coresight
diff --git a/arch/arm/boot/dts/qcom/apq8084-gpu.dtsi b/arch/arm/boot/dts/qcom/apq8084-gpu.dtsi
index b643551fc5b..8455f9fb3e5 100644
--- a/arch/arm/boot/dts/qcom/apq8084-gpu.dtsi
+++ b/arch/arm/boot/dts/qcom/apq8084-gpu.dtsi
@@ -29,6 +29,10 @@
qcom,strtstp-sleepwake;
qcom,clk-map = <0x00000006>; //KGSL_CLK_CORE | KGSL_CLK_IFACE
+ qcom,pm-qos-active-latency = <501>;
+ qcom,pm-qos-wakeup-latency = <101>;
+
+
/* Bus Scale Settings */
qcom,bus-control;
qcom,msm-bus,name = "grp3d";
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index d2b9262e19d..ac7df87ad1f 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -176,16 +176,18 @@ static void adreno_input_event(struct input_handle *handle, unsigned int type,
if (device->flags & KGSL_FLAG_WAKE_ON_TOUCH)
return;
+
/*
* If the device is in nap, kick the idle timer to make sure that we
* don't go into slumber before the first render. If the device is
* already in slumber schedule the wake.
*/
- if (device->state == KGSL_STATE_NAP) {
- /*
- * Set the wake on touch bit to keep from coming back here and
- * keeping the device in nap without rendering
- */
+
+ if (device->state == KGSL_STATE_NAP) {
+ /*
+ * Set the wake on touch bit to keep from coming back here and
+ * keeping the device in nap without rendering
+ */
device->flags |= KGSL_FLAG_WAKE_ON_TOUCH;
@@ -359,6 +361,7 @@ done:
int adreno_perfcounter_read_group(struct adreno_device *adreno_dev,
struct kgsl_perfcounter_read_group __user *reads, unsigned int count)
{
+ struct kgsl_device *device = &adreno_dev->dev;
struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
struct adreno_perfcount_group *group;
struct kgsl_perfcounter_read_group *list = NULL;
@@ -387,6 +390,13 @@ int adreno_perfcounter_read_group(struct adreno_device *adreno_dev,
goto done;
}
+ kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
+ ret = kgsl_active_count_get(device);
+ if (ret) {
+ kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
+ goto done;
+ }
+
/* list iterator */
for (j = 0; j < count; j++) {
@@ -395,7 +405,7 @@ int adreno_perfcounter_read_group(struct adreno_device *adreno_dev,
/* Verify that the group ID is within range */
if (list[j].groupid >= counters->group_count) {
ret = -EINVAL;
- goto done;
+ break;
}
group = &(counters->groups[list[j].groupid]);
@@ -411,11 +421,13 @@ int adreno_perfcounter_read_group(struct adreno_device *adreno_dev,
}
}
+ kgsl_active_count_put(device);
+ kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
+
/* write the data */
- if (copy_to_user(reads, list,
- sizeof(struct kgsl_perfcounter_read_group) *
- count) != 0)
- ret = -EFAULT;
+ if (ret == 0)
+ ret = copy_to_user(reads, list,
+ sizeof(struct kgsl_perfcounter_read_group) * count);
done:
kfree(list);
@@ -490,9 +502,12 @@ int adreno_perfcounter_query_group(struct adreno_device *adreno_dev,
unsigned int groupid, unsigned int __user *countables,
unsigned int count, unsigned int *max_counters)
{
+ struct kgsl_device *device = &adreno_dev->dev;
struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
struct adreno_perfcount_group *group;
- unsigned int i;
+ unsigned int i, t;
+ int ret;
+ unsigned int *buf;
*max_counters = 0;
@@ -502,6 +517,8 @@ int adreno_perfcounter_query_group(struct adreno_device *adreno_dev,
if (groupid >= counters->group_count)
return -EINVAL;
+ kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
+
group = &(counters->groups[groupid]);
*max_counters = group->reg_count;
@@ -509,20 +526,28 @@ int adreno_perfcounter_query_group(struct adreno_device *adreno_dev,
* if NULL countable or *count of zero, return max reg_count in
* *max_counters and return success
*/
- if (countables == NULL || count == 0)
+ if (countables == NULL || count == 0) {
+ kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
return 0;
+ }
- /*
- * Go through all available counters. Write upto *count * countable
- * values.
- */
- for (i = 0; i < group->reg_count && i < count; i++) {
- if (copy_to_user(&countables[i], &(group->regs[i].countable),
- sizeof(unsigned int)) != 0)
- return -EFAULT;
+ t = min_t(int, group->reg_count, count);
+
+ buf = kmalloc(t * sizeof(unsigned int), GFP_KERNEL);
+ if (buf == NULL) {
+ kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
+ return -ENOMEM;
}
- return 0;
+ for (i = 0; i < t; i++)
+ buf[i] = group->regs[i].countable;
+
+ kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
+
+ ret = copy_to_user(countables, buf, sizeof(unsigned int) * t);
+ kfree(buf);
+
+ return ret;
}
static inline void refcount_group(struct adreno_perfcount_group *group,
@@ -1135,10 +1160,7 @@ static int adreno_iommu_setstate(struct kgsl_device *device,
cmds = link;
- result = kgsl_mmu_enable_clk(&device->mmu, KGSL_IOMMU_CONTEXT_USER);
-
- if (result)
- goto done;
+ kgsl_mmu_enable_clk(&device->mmu, KGSL_IOMMU_MAX_UNITS);
pt_val = kgsl_mmu_get_pt_base_addr(&device->mmu,
device->mmu.hwpagetable);
@@ -1178,11 +1200,10 @@ static int adreno_iommu_setstate(struct kgsl_device *device,
* after the command has been retired
*/
if (result)
- kgsl_mmu_disable_clk(&device->mmu,
- KGSL_IOMMU_CONTEXT_USER);
+ kgsl_mmu_disable_clk(&device->mmu, KGSL_IOMMU_MAX_UNITS);
else
kgsl_mmu_disable_clk_on_ts(&device->mmu, rb->global_ts,
- KGSL_IOMMU_CONTEXT_USER);
+ KGSL_IOMMU_MAX_UNITS);
done:
kfree(link);
@@ -1468,10 +1489,17 @@ static int adreno_of_get_pdata(struct platform_device *pdev)
if (ret)
goto err;
- /* get pm-qos-latency from target, set it to default if not found */
- if (of_property_read_u32(pdev->dev.of_node, "qcom,pm-qos-latency",
- &pdata->pm_qos_latency))
- pdata->pm_qos_latency = 501;
+ /* get pm-qos-active-latency, set it to default if not found */
+ if (of_property_read_u32(pdev->dev.of_node,
+ "qcom,pm-qos-active-latency",
+ &pdata->pm_qos_active_latency))
+ pdata->pm_qos_active_latency = 501;
+
+ /* get pm-qos-wakeup-latency, set it to default if not found */
+ if (of_property_read_u32(pdev->dev.of_node,
+ "qcom,pm-qos-wakeup-latency",
+ &pdata->pm_qos_wakeup_latency))
+ pdata->pm_qos_wakeup_latency = 101;
if (of_property_read_u32(pdev->dev.of_node, "qcom,idle-timeout",
&pdata->idle_timeout))
@@ -1776,6 +1804,11 @@ static int _adreno_start(struct adreno_device *adreno_dev)
int status = -EINVAL;
unsigned int state = device->state;
unsigned int regulator_left_on = 0;
+ unsigned int pmqos_wakeup_vote = device->pwrctrl.pm_qos_wakeup_latency;
+ unsigned int pmqos_active_vote = device->pwrctrl.pm_qos_active_latency;
+
+ pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
+ pmqos_wakeup_vote);
kgsl_cffdump_open(device);
@@ -1839,6 +1872,10 @@ static int _adreno_start(struct adreno_device *adreno_dev)
set_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv);
+ if (pmqos_active_vote != pmqos_wakeup_vote)
+ pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
+ pmqos_active_vote);
+
return 0;
error_rb_stop:
@@ -1854,6 +1891,10 @@ error_clk_off:
/* set the state back to original state */
kgsl_pwrctrl_set_state(device, state);
+ if (pmqos_active_vote != pmqos_wakeup_vote)
+ pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
+ pmqos_active_vote);
+
return status;
}
@@ -1912,7 +1953,7 @@ static int adreno_start(struct kgsl_device *device, int priority)
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
/* default 501 will allow PC to happen, set it to 490 to prevent PC happening during adreno_start; */
- pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma, 490);
+ //pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma, 490);
/* No priority (normal latency) call the core start function directly */
if (!priority)
@@ -1927,7 +1968,7 @@ static int adreno_start(struct kgsl_device *device, int priority)
flush_work(&adreno_dev->start_work);
kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
- pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma, device->pwrctrl.pm_qos_latency);
+ //pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma, device->pwrctrl.pm_qos_latency);
return _status;
}
@@ -2562,6 +2603,11 @@ static int adreno_set_constraint(struct kgsl_device *device,
break;
}
+ /* If a new constraint has been set for a context, cancel the old one */
+ if ((status == 0) &&
+ (context->id == device->pwrctrl.constraint.owner_id))
+ device->pwrctrl.constraint.type = KGSL_CONSTRAINT_NONE;
+
return status;
}
@@ -2586,6 +2632,8 @@ static int adreno_setproperty(struct kgsl_device_private *dev_priv,
break;
}
+ kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
+
if (enable) {
device->pwrctrl.ctrl_flags = 0;
adreno_dev->fast_hang_detect = 1;
@@ -2605,6 +2653,7 @@ static int adreno_setproperty(struct kgsl_device_private *dev_priv,
kgsl_pwrscale_disable(device);
}
+ kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
status = 0;
}
break;
@@ -2623,10 +2672,13 @@ static int adreno_setproperty(struct kgsl_device_private *dev_priv,
context = kgsl_context_get_owner(dev_priv,
constraint.context_id);
+
if (context == NULL)
break;
+
status = adreno_set_constraint(device, context,
&constraint);
+
kgsl_context_put(context);
}
break;
@@ -2771,24 +2823,16 @@ bool adreno_isidle(struct kgsl_device *device)
}
/**
- * adreno_idle() - wait for the GPU hardware to go idle
- * @device: Pointer to the KGSL device structure for the GPU
+ * adreno_spin_idle() - Spin wait for the GPU to idle
+ * @device: Pointer to the KGSL device
*
- * Wait up to ADRENO_IDLE_TIMEOUT milliseconds for the GPU hardware to go quiet.
+ * Spin the CPU waiting for the RBBM status to return idle
*/
-
-int adreno_idle(struct kgsl_device *device)
+int adreno_spin_idle(struct kgsl_device *device)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
unsigned long wait = jiffies + msecs_to_jiffies(ADRENO_IDLE_TIMEOUT);
- /*
- * Make sure the device mutex is held so the dispatcher can't send any
- * more commands to the hardware
- */
-
- BUG_ON(!mutex_is_locked(&device->mutex));
-
kgsl_cffdump_regpoll(device,
adreno_getreg(adreno_dev, ADRENO_REG_RBBM_STATUS) << 2,
0x00000000, 0x80000000);
@@ -2812,6 +2856,39 @@ int adreno_idle(struct kgsl_device *device)
}
/**
+ * adreno_idle() - wait for the GPU hardware to go idle
+ * @device: Pointer to the KGSL device structure for the GPU
+ *
+ * Wait up to ADRENO_IDLE_TIMEOUT milliseconds for the GPU hardware to go quiet.
+ */
+
+int adreno_idle(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ int ret;
+
+ /*
+ * Make sure the device mutex is held so the dispatcher can't send any
+ * more commands to the hardware
+ */
+
+ BUG_ON(!mutex_is_locked(&device->mutex));
+
+ /* Check if we are already idle before idling dispatcher */
+ if (adreno_isidle(device))
+ return 0;
+ /*
+ * Wait for dispatcher to finish completing commands
+ * already submitted
+ */
+ ret = adreno_dispatcher_idle(adreno_dev);
+ if (ret)
+ return ret;
+
+ return adreno_spin_idle(device);
+}
+
+/**
* adreno_drain() - Drain the dispatch queue
* @device: Pointer to the KGSL device structure for the GPU
*
@@ -3030,24 +3107,28 @@ static long adreno_ioctl(struct kgsl_device_private *dev_priv,
switch (cmd) {
case IOCTL_KGSL_PERFCOUNTER_GET: {
struct kgsl_perfcounter_get *get = data;
+ kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
/*
* adreno_perfcounter_get() is called by kernel clients
* during start(), so it is not safe to take an
* active count inside this function.
*/
result = kgsl_active_count_get(device);
- if (result)
- break;
- result = adreno_perfcounter_get(adreno_dev, get->groupid,
- get->countable, &get->offset, &get->offset_hi,
- PERFCOUNTER_FLAG_NONE);
- kgsl_active_count_put(device);
+ if (result == 0) {
+ result = adreno_perfcounter_get(adreno_dev,
+ get->groupid, get->countable, &get->offset,
+ &get->offset_hi, PERFCOUNTER_FLAG_NONE);
+ kgsl_active_count_put(device);
+ }
+ kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
break;
}
case IOCTL_KGSL_PERFCOUNTER_PUT: {
struct kgsl_perfcounter_put *put = data;
+ kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
result = adreno_perfcounter_put(adreno_dev, put->groupid,
put->countable, PERFCOUNTER_FLAG_NONE);
+ kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
break;
}
case IOCTL_KGSL_PERFCOUNTER_QUERY: {
@@ -3059,12 +3140,8 @@ static long adreno_ioctl(struct kgsl_device_private *dev_priv,
}
case IOCTL_KGSL_PERFCOUNTER_READ: {
struct kgsl_perfcounter_read *read = data;
- result = kgsl_active_count_get(device);
- if (result)
- break;
result = adreno_perfcounter_read_group(adreno_dev,
read->reads, read->count);
- kgsl_active_count_put(device);
break;
}
default:
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 7c47f11dad5..ab8087e1725 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -117,6 +117,7 @@ enum adreno_gpurev {
* submitted operation
* @work: work_struct to put the dispatcher in a work queue
* @kobj: kobject for the dispatcher directory in the device sysfs node
+ * @idle_gate: Gate to wait on for dispatcher to idle
*/
struct adreno_dispatcher {
struct mutex mutex;
@@ -132,6 +133,7 @@ struct adreno_dispatcher {
unsigned int tail;
struct work_struct work;
struct kobject kobj;
+ struct completion idle_gate;
};
enum adreno_dispatcher_flags {
@@ -189,6 +191,7 @@ struct adreno_device {
struct adreno_busy_data busy_data;
unsigned int ram_cycles_lo;
unsigned int starved_ram_lo;
+ atomic_t halt;
};
/**
@@ -595,6 +598,7 @@ extern const unsigned int a4xx_sp_tp_registers_count;
extern unsigned int ft_detect_regs[];
+int adreno_spin_idle(struct kgsl_device *device);
int adreno_idle(struct kgsl_device *device);
bool adreno_isidle(struct kgsl_device *device);
@@ -618,6 +622,7 @@ void *adreno_snapshot(struct kgsl_device *device, void *snapshot, int *remain,
void adreno_dispatcher_start(struct kgsl_device *device);
int adreno_dispatcher_init(struct adreno_device *adreno_dev);
void adreno_dispatcher_close(struct adreno_device *adreno_dev);
+int adreno_dispatcher_idle(struct adreno_device *adreno_dev);
void adreno_dispatcher_irq_fault(struct kgsl_device *device);
void adreno_dispatcher_stop(struct adreno_device *adreno_dev);
@@ -936,7 +941,7 @@ static inline unsigned int adreno_getreg(struct adreno_device *adreno_dev,
/**
* adreno_gpu_fault() - Return the current state of the GPU
- * @adreno_dev: A ponter to the adreno_device to query
+ * @adreno_dev: A pointer to the adreno_device to query
*
* Return 0 if there is no fault or positive with the last type of fault that
* occurred
@@ -948,6 +953,18 @@ static inline unsigned int adreno_gpu_fault(struct adreno_device *adreno_dev)
}
/**
+ * adreno_gpu_halt() - Return the halt status of GPU
+ * @adreno_dev: A pointer to the adreno_device to query
+ *
+ * Return the halt request value
+ */
+static inline unsigned int adreno_gpu_halt(struct adreno_device *adreno_dev)
+{
+ smp_rmb();
+ return atomic_read(&adreno_dev->halt);
+}
+
+/**
* adreno_set_gpu_fault() - Set the current fault status of the GPU
* @adreno_dev: A pointer to the adreno_device to set
* @state: fault state to set
@@ -962,6 +979,18 @@ static inline void adreno_set_gpu_fault(struct adreno_device *adreno_dev,
}
/**
+ * adreno_set_gpu_halt() - Set the halt request
+ * @adreno_dev: A pointer to the adreno_device to set
+ * @state: Value to set
+ */
+static inline void adreno_set_gpu_halt(struct adreno_device *adreno_dev,
+ int state)
+{
+ atomic_set(&adreno_dev->halt, state);
+ smp_wmb();
+}
+
+/**
* adreno_clear_gpu_fault() - Clear the GPU fault register
* @adreno_dev: A pointer to an adreno_device structure
*
diff --git a/drivers/gpu/msm/adreno_a4xx_snapshot.c b/drivers/gpu/msm/adreno_a4xx_snapshot.c
index 8adedd91584..f5049bf44ad 100644
--- a/drivers/gpu/msm/adreno_a4xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a4xx_snapshot.c
@@ -364,23 +364,15 @@ void *a4xx_snapshot(struct adreno_device *adreno_dev, void *snapshot,
a4xx_sp_tp_registers_count, 0);
/* Turn on MMU clocks since we read MMU registers */
- if (kgsl_mmu_enable_clk(&device->mmu, KGSL_IOMMU_CONTEXT_USER)) {
- KGSL_CORE_ERR("Failed to turn on iommu user context clocks\n");
- goto skip_regs;
- }
- if (kgsl_mmu_enable_clk(&device->mmu, KGSL_IOMMU_CONTEXT_PRIV)) {
- kgsl_mmu_disable_clk(&device->mmu, KGSL_IOMMU_CONTEXT_USER);
- KGSL_CORE_ERR("Failed to turn on iommu priv context clocks\n");
- goto skip_regs;
- }
+ kgsl_mmu_enable_clk(&device->mmu, KGSL_IOMMU_MAX_UNITS);
+
/* Master set of (non debug) registers */
snapshot = kgsl_snapshot_add_section(device,
KGSL_SNAPSHOT_SECTION_REGS, snapshot, remain,
kgsl_snapshot_dump_regs, &list);
- kgsl_mmu_disable_clk(&device->mmu, KGSL_IOMMU_CONTEXT_USER);
- kgsl_mmu_disable_clk(&device->mmu, KGSL_IOMMU_CONTEXT_PRIV);
-skip_regs:
+ kgsl_mmu_disable_clk(&device->mmu, KGSL_IOMMU_MAX_UNITS);
+
snapshot = kgsl_snapshot_indexed_registers(device, snapshot,
remain,
A4XX_CP_STATE_DEBUG_INDEX, A4XX_CP_STATE_DEBUG_DATA,
diff --git a/drivers/gpu/msm/adreno_compat.c b/drivers/gpu/msm/adreno_compat.c
index 431ab7bf967..a4b8d056608 100644
--- a/drivers/gpu/msm/adreno_compat.c
+++ b/drivers/gpu/msm/adreno_compat.c
@@ -123,12 +123,8 @@ long adreno_compat_ioctl(struct kgsl_device_private *dev_priv,
read.reads = (struct kgsl_perfcounter_read_group __user *)
(uintptr_t)read32->reads;
read.count = read32->count;
- result = kgsl_active_count_get(device);
- if (result)
- break;
result = adreno_perfcounter_read_group(adreno_dev,
read.reads, read.count);
- kgsl_active_count_put(device);
break;
}
default:
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index 3f8ce8bcf4a..a32f4e3b9ad 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -18,6 +18,7 @@
#include <linux/err.h>
#include "kgsl.h"
+#include "kgsl_cffdump.h"
#include "adreno.h"
#include "adreno_ringbuffer.h"
#include "adreno_trace.h"
@@ -295,6 +296,9 @@ static int sendcmd(struct adreno_device *adreno_dev,
struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
int ret;
+ if (0 != adreno_gpu_halt(adreno_dev))
+ return -EINVAL;
+
dispatcher->inflight++;
kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
@@ -320,9 +324,10 @@ static int sendcmd(struct adreno_device *adreno_dev,
*/
if (dispatcher->inflight == 1) {
- if (ret == 0)
+ if (ret == 0) {
fault_detect_read(device);
- else {
+ init_completion(&dispatcher->idle_gate);
+ } else {
kgsl_active_count_put(device);
clear_bit(ADRENO_DISPATCHER_POWER, &dispatcher->priv);
}
@@ -480,6 +485,9 @@ static int _adreno_dispatcher_issuecmds(struct adreno_device *adreno_dev)
if (adreno_gpu_fault(adreno_dev) != 0)
break;
+ if (0 != adreno_gpu_halt(adreno_dev))
+ break;
+
spin_lock(&dispatcher->plist_lock);
if (plist_head_empty(&dispatcher->pending)) {
@@ -1029,6 +1037,7 @@ static int dispatcher_do_fault(struct kgsl_device *device)
int fault, first = 0;
bool pagefault = false;
char *state = "failed";
+ int halt;
fault = atomic_xchg(&dispatcher->fault, 0);
if (fault == 0)
@@ -1055,6 +1064,9 @@ static int dispatcher_do_fault(struct kgsl_device *device)
kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
+ /* hang opcode */
+ kgsl_cffdump_hang(device);
+
cmdbatch = dispatcher->cmdqueue[dispatcher->head];
trace_adreno_cmdbatch_fault(cmdbatch, fault);
@@ -1069,6 +1081,8 @@ static int dispatcher_do_fault(struct kgsl_device *device)
reg |= (1 << 27) | (1 << 28);
adreno_writereg(adreno_dev, ADRENO_REG_CP_ME_CNTL, reg);
}
+ /* Set pagefault if it occurred */
+ kgsl_mmu_set_pagefault(&device->mmu);
adreno_readreg(adreno_dev, ADRENO_REG_CP_IB1_BASE, &base);
@@ -1288,7 +1302,11 @@ replay:
/* Reset the GPU */
kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
+
/* make sure halt is not set during recovery */
+
+ halt = adreno_gpu_halt(adreno_dev);
+ adreno_set_gpu_halt(adreno_dev, 0);
ret = adreno_reset(device);
kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
/* if any other fault got in until reset then ignore */
@@ -1347,6 +1365,8 @@ replay:
}
kfree(replay);
+ /* restore halt indicator */
+ adreno_set_gpu_halt(adreno_dev, halt);
return 1;
}
@@ -1547,7 +1567,10 @@ done:
kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
} else {
/* There is nothing left in the pipeline. Shut 'er down boys */
+
kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
+ complete_all(&dispatcher->idle_gate);
+
/*
* Stop the fault timer before decrementing the active count to
* avoid reading the hardware registers while we are trying to
@@ -1853,6 +1876,9 @@ int adreno_dispatcher_init(struct adreno_device *adreno_dev)
INIT_WORK(&dispatcher->work, adreno_dispatcher_work);
+ init_completion(&dispatcher->idle_gate);
+ complete_all(&dispatcher->idle_gate);
+
plist_head_init(&dispatcher->pending);
spin_lock_init(&dispatcher->plist_lock);
@@ -1861,3 +1887,53 @@ int adreno_dispatcher_init(struct adreno_device *adreno_dev)
return ret;
}
+
+/*
+ * adreno_dispatcher_idle() - Wait for dispatcher to idle
+ * @adreno_dev: Adreno device whose dispatcher needs to idle
+ *
+ * Signal dispatcher to stop sending more commands and complete
+ * the commands that have already been submitted. This function
+ * should not be called when dispatcher mutex is held.
+ */
+int adreno_dispatcher_idle(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+ int ret;
+
+ BUG_ON(!mutex_is_locked(&device->mutex));
+ if (!test_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv))
+ return 0;
+
+ /*
+ * Ensure that this function is not called when dispatcher
+ * mutex is held and device is started
+ */
+ if (mutex_is_locked(&dispatcher->mutex) &&
+ dispatcher->mutex.owner == current)
+ BUG_ON(1);
+
+ adreno_set_gpu_halt(adreno_dev, 1);
+
+ mutex_unlock(&device->mutex);
+
+ ret = wait_for_completion_timeout(&dispatcher->idle_gate,
+ msecs_to_jiffies(ADRENO_IDLE_TIMEOUT));
+ if (ret <= 0) {
+ if (!ret)
+ ret = -ETIMEDOUT;
+ KGSL_DRV_ERR(device, "Dispatcher halt failed %d\n", ret);
+ } else {
+ ret = 0;
+ }
+
+ mutex_lock(&device->mutex);
+ adreno_set_gpu_halt(adreno_dev, 0);
+ /*
+ * requeue dispatcher work to resubmit pending commands
+ * that may have been blocked due to this idling request
+ */
+ adreno_dispatcher_schedule(device);
+ return ret;
+}
diff --git a/drivers/gpu/msm/adreno_profile.c b/drivers/gpu/msm/adreno_profile.c
index 955a278de2a..cf8023ee501 100644
--- a/drivers/gpu/msm/adreno_profile.c
+++ b/drivers/gpu/msm/adreno_profile.c
@@ -660,6 +660,15 @@ static ssize_t profile_assignments_write(struct file *filep,
if (len >= PAGE_SIZE || len == 0)
return -EINVAL;
+ buf = kmalloc(len + 1, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, user_buf, len)) {
+ size = -EFAULT;
+ goto error_free;
+ }
+
kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
if (adreno_profile_enabled(profile)) {
@@ -668,8 +677,10 @@ static ssize_t profile_assignments_write(struct file *filep,
}
ret = kgsl_active_count_get(device);
- if (ret)
- return -EINVAL;
+ if (ret) {
+ size = ret;
+ goto error_unlock;
+ }
/*
* When adding/removing assignments, ensure that the GPU is done with
@@ -677,19 +688,13 @@ static ssize_t profile_assignments_write(struct file *filep,
* GPU and avoid racey conditions.
*/
if (adreno_idle(device)) {
- size = -EINVAL;
+ size = -ETIMEDOUT;
goto error_put;
}
/* clear all shared buffer results */
adreno_profile_process_results(device);
- buf = kmalloc(len + 1, GFP_KERNEL);
- if (!buf) {
- size = -EINVAL;
- goto error_put;
- }
-
pbuf = buf;
/* clear the log buffer */
@@ -698,10 +703,6 @@ static ssize_t profile_assignments_write(struct file *filep,
profile->log_tail = profile->log_buffer;
}
- if (copy_from_user(buf, user_buf, len)) {
- size = -EFAULT;
- goto error_free;
- }
/* for sanity and parsing, ensure it is null terminated */
buf[len] = '\0';
@@ -721,12 +722,12 @@ static ssize_t profile_assignments_write(struct file *filep,
size = len;
-error_free:
- kfree(buf);
error_put:
kgsl_active_count_put(device);
error_unlock:
kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
+error_free:
+ kfree(buf);
return size;
}
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 435744f2698..fdf8d28188c 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -404,7 +404,7 @@ static int _ringbuffer_bootstrap_ucode(struct adreno_ringbuffer *rb,
}
/* idle device to validate bootstrap */
- return adreno_idle(device);
+ return adreno_spin_idle(device);
}
/**
@@ -476,7 +476,7 @@ static int _ringbuffer_start_common(struct adreno_ringbuffer *rb)
return status;
/* idle device to validate ME INIT */
- status = adreno_idle(device);
+ status = adreno_spin_idle(device);
if (status == 0)
rb->flags |= KGSL_FLAGS_STARTED;
@@ -1187,8 +1187,9 @@ void adreno_ringbuffer_set_constraint(struct kgsl_device *device,
constraint = adreno_ringbuffer_get_constraint(device, context);
/*
- * If a constraint is already set, set a new
- * constraint only if it is faster
+ * If a constraint is already set, set a new constraint only
+ * if it is faster. If the requested constraint is the same
+ * as the current one, update ownership and timestamp.
*/
if ((device->pwrctrl.constraint.type ==
KGSL_CONSTRAINT_NONE) || (constraint <
@@ -1199,14 +1200,21 @@ void adreno_ringbuffer_set_constraint(struct kgsl_device *device,
context->pwr_constraint.type;
device->pwrctrl.constraint.hint.
pwrlevel.level = constraint;
+ device->pwrctrl.constraint.owner_id = context->id;
+ device->pwrctrl.constraint.expires = jiffies +
+ device->pwrctrl.interval_timeout;
/* Trace the constraint being set by the driver */
trace_kgsl_constraint(device,
device->pwrctrl.constraint.type,
constraint, 1);
+ } else if ((device->pwrctrl.constraint.type ==
+ context->pwr_constraint.type) &&
+ (device->pwrctrl.constraint.hint.pwrlevel.level ==
+ constraint)) {
+ device->pwrctrl.constraint.owner_id = context->id;
+ device->pwrctrl.constraint.expires = jiffies +
+ device->pwrctrl.interval_timeout;
}
-
- device->pwrctrl.constraint.expires = jiffies +
- device->pwrctrl.interval_timeout;
}
}
@@ -1347,16 +1355,18 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
/* Set the constraints before adding to ringbuffer */
adreno_ringbuffer_set_constraint(device, cmdbatch);
+ /* CFF stuff executed only if CFF is enabled */
+ kgsl_cffdump_capture_ib_desc(device, context, ibdesc, numibs);
+
ret = adreno_ringbuffer_addcmds(&adreno_dev->ringbuffer,
drawctxt,
flags,
&link[0], (cmds - link),
cmdbatch->timestamp);
- /* CFF stuff executed only if CFF is enabled */
- kgsl_cffdump_capture_ib_desc(device, context, ibdesc, numibs);
- kgsl_cff_core_idle(device);
-
+ kgsl_cffdump_regpoll(device,
+ adreno_getreg(adreno_dev, ADRENO_REG_RBBM_STATUS) << 2,
+ 0x00000000, 0x80000000);
done:
device->pwrctrl.irq_last = 0;
trace_kgsl_issueibcmds(device, context->id, cmdbatch,
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 353c39bcb01..faf8ab754e1 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -57,13 +57,6 @@
#define pgprot_writethroughcache(_prot) (_prot)
#endif
-/*
- * To accommodate legacy GPU address mmapping we need to make sure that the GPU
- * object won't conflict with the address space so define the IDs to start
- * at the top of the user address space region
- */
-#define KGSL_GPUOBJ_ID_MIN (TASK_SIZE >> PAGE_SHIFT)
-
static char *ksgl_mmu_type;
module_param_named(mmutype, ksgl_mmu_type, charp, 0);
MODULE_PARM_DESC(ksgl_mmu_type,
@@ -371,8 +364,7 @@ kgsl_mem_entry_attach_process(struct kgsl_mem_entry *entry,
return -EBADF;
idr_preload(GFP_KERNEL);
spin_lock(&process->mem_lock);
- id = idr_alloc(&process->mem_idr, entry, KGSL_GPUOBJ_ID_MIN, 0,
- GFP_NOWAIT);
+ id = idr_alloc(&process->mem_idr, entry, 1, 0, GFP_NOWAIT);
spin_unlock(&process->mem_lock);
idr_preload_end();
@@ -1426,6 +1418,8 @@ long kgsl_ioctl_device_waittimestamp_ctxtid(
unsigned int temp_cur_ts = 0;
struct kgsl_context *context;
+ kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
+
context = kgsl_context_get_owner(dev_priv, param->context_id);
if (context == NULL)
goto out;
@@ -1445,6 +1439,8 @@ long kgsl_ioctl_device_waittimestamp_ctxtid(
out:
kgsl_context_put(context);
+ kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
+
return result;
}
@@ -2240,20 +2236,23 @@ long kgsl_ioctl_cmdstream_readtimestamp_ctxtid(struct kgsl_device_private
void *data)
{
struct kgsl_cmdstream_readtimestamp_ctxtid *param = data;
+ struct kgsl_device *device = dev_priv->device;
struct kgsl_context *context;
long result = -EINVAL;
+ kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
context = kgsl_context_get_owner(dev_priv, param->context_id);
if (context) {
- result = kgsl_readtimestamp(dev_priv->device, context,
+ result = kgsl_readtimestamp(device, context,
param->type, &param->timestamp);
- trace_kgsl_readtimestamp(dev_priv->device, context->id,
+ trace_kgsl_readtimestamp(device, context->id,
param->type, param->timestamp);
}
kgsl_context_put(context);
+ kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
return result;
}
@@ -2286,9 +2285,11 @@ long kgsl_ioctl_cmdstream_freememontimestamp_ctxtid(
if (param->type != KGSL_TIMESTAMP_RETIRED)
return -EINVAL;
+ kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
+
context = kgsl_context_get_owner(dev_priv, param->context_id);
if (context == NULL)
- return -EINVAL;
+ goto out;
entry = kgsl_sharedmem_find(dev_priv->process_priv, param->gpuaddr);
@@ -2320,6 +2321,7 @@ long kgsl_ioctl_cmdstream_freememontimestamp_ctxtid(
out:
kgsl_context_put(context);
+ kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
return result;
}
@@ -2331,6 +2333,7 @@ long kgsl_ioctl_drawctxt_create(struct kgsl_device_private *dev_priv,
struct kgsl_context *context = NULL;
struct kgsl_device *device = dev_priv->device;
+ kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
context = device->ftbl->drawctxt_create(dev_priv, &param->flags);
if (IS_ERR(context)) {
result = PTR_ERR(context);
@@ -2339,6 +2342,7 @@ long kgsl_ioctl_drawctxt_create(struct kgsl_device_private *dev_priv,
trace_kgsl_context_create(dev_priv->device, context, param->flags);
param->drawctxt_id = context->id;
done:
+ kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
return result;
}
@@ -2346,14 +2350,17 @@ long kgsl_ioctl_drawctxt_destroy(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
{
struct kgsl_drawctxt_destroy *param = data;
+ struct kgsl_device *device = dev_priv->device;
struct kgsl_context *context;
long result;
+ kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
context = kgsl_context_get_owner(dev_priv, param->drawctxt_id);
result = kgsl_context_detach(context);
kgsl_context_put(context);
+ kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
return result;
}
@@ -2623,6 +2630,22 @@ static int match_file(const void *p, struct file *file, unsigned int fd)
return (p == file) ? (fd + 1) : 0;
}
+static void _setup_cache_mode(struct kgsl_mem_entry *entry,
+ struct vm_area_struct *vma)
+{
+ unsigned int mode;
+ pgprot_t pgprot = vma->vm_page_prot;
+
+ if (pgprot == pgprot_noncached(pgprot))
+ mode = KGSL_CACHEMODE_UNCACHED;
+ else if (pgprot == pgprot_writecombine(pgprot))
+ mode = KGSL_CACHEMODE_WRITECOMBINE;
+ else
+ mode = KGSL_CACHEMODE_WRITEBACK;
+
+ entry->memdesc.flags |= (mode << KGSL_CACHEMODE_SHIFT);
+}
+
static int kgsl_setup_useraddr(struct kgsl_mem_entry *entry,
struct kgsl_pagetable *pagetable,
void *data,
@@ -2668,6 +2691,13 @@ static int kgsl_setup_useraddr(struct kgsl_mem_entry *entry,
int ret = kgsl_setup_dma_buf(entry, pagetable, device, dmabuf);
if (ret)
dma_buf_put(dmabuf);
+ else {
+ /* Match the cache settings of the vma region */
+ _setup_cache_mode(entry, vma);
+ /* Set the useraddr to the incoming hostptr */
+ entry->memdesc.useraddr = param->hostptr;
+ }
+
return ret;
}
@@ -2796,7 +2826,7 @@ static int kgsl_setup_ion(struct kgsl_mem_entry *entry,
int fd = param->fd;
struct dma_buf *dmabuf;
- if (!param->len)
+ if (!param->len || param->offset)
return -EINVAL;
dmabuf = dma_buf_get(fd);
@@ -2933,7 +2963,7 @@ long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv,
goto error_attach;
/* Adjust the returned value for a non 4k aligned offset */
- param->gpuaddr = entry->memdesc.gpuaddr + (param->offset & ~PAGE_MASK);
+ param->gpuaddr = entry->memdesc.gpuaddr + (param->offset & PAGE_MASK);
KGSL_STATS_ADD(param->len, kgsl_driver.stats.mapped,
kgsl_driver.stats.mapped_max);
@@ -3408,60 +3438,52 @@ long kgsl_ioctl_timestamp_event(struct kgsl_device_private *dev_priv,
static const struct kgsl_ioctl kgsl_ioctl_funcs[] = {
KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_GETPROPERTY,
- kgsl_ioctl_device_getproperty,
- KGSL_IOCTL_LOCK),
+ kgsl_ioctl_device_getproperty),
/* IOCTL_KGSL_DEVICE_WAITTIMESTAMP is no longer supported */
KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID,
- kgsl_ioctl_device_waittimestamp_ctxtid,
- KGSL_IOCTL_LOCK),
+ kgsl_ioctl_device_waittimestamp_ctxtid),
KGSL_IOCTL_FUNC(IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS,
- kgsl_ioctl_rb_issueibcmds, 0),
+ kgsl_ioctl_rb_issueibcmds),
KGSL_IOCTL_FUNC(IOCTL_KGSL_SUBMIT_COMMANDS,
- kgsl_ioctl_submit_commands, 0),
+ kgsl_ioctl_submit_commands),
/* IOCTL_KGSL_CMDSTREAM_READTIMESTAMP is no longer supported */
KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_CTXTID,
- kgsl_ioctl_cmdstream_readtimestamp_ctxtid,
- KGSL_IOCTL_LOCK),
+ kgsl_ioctl_cmdstream_readtimestamp_ctxtid),
/* IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP is no longer supported */
KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_CTXTID,
- kgsl_ioctl_cmdstream_freememontimestamp_ctxtid,
- KGSL_IOCTL_LOCK),
+ kgsl_ioctl_cmdstream_freememontimestamp_ctxtid),
KGSL_IOCTL_FUNC(IOCTL_KGSL_DRAWCTXT_CREATE,
- kgsl_ioctl_drawctxt_create,
- KGSL_IOCTL_LOCK),
+ kgsl_ioctl_drawctxt_create),
KGSL_IOCTL_FUNC(IOCTL_KGSL_DRAWCTXT_DESTROY,
- kgsl_ioctl_drawctxt_destroy,
- KGSL_IOCTL_LOCK),
+ kgsl_ioctl_drawctxt_destroy),
KGSL_IOCTL_FUNC(IOCTL_KGSL_MAP_USER_MEM,
- kgsl_ioctl_map_user_mem, 0),
+ kgsl_ioctl_map_user_mem),
KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FROM_PMEM,
- kgsl_ioctl_map_user_mem, 0),
+ kgsl_ioctl_map_user_mem),
KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FREE,
- kgsl_ioctl_sharedmem_free, 0),
+ kgsl_ioctl_sharedmem_free),
KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FLUSH_CACHE,
- kgsl_ioctl_sharedmem_flush_cache, 0),
+ kgsl_ioctl_sharedmem_flush_cache),
KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_ALLOC,
- kgsl_ioctl_gpumem_alloc, 0),
+ kgsl_ioctl_gpumem_alloc),
KGSL_IOCTL_FUNC(IOCTL_KGSL_CFF_SYNCMEM,
- kgsl_ioctl_cff_syncmem, 0),
+ kgsl_ioctl_cff_syncmem),
KGSL_IOCTL_FUNC(IOCTL_KGSL_CFF_USER_EVENT,
- kgsl_ioctl_cff_user_event, 0),
+ kgsl_ioctl_cff_user_event),
KGSL_IOCTL_FUNC(IOCTL_KGSL_TIMESTAMP_EVENT,
- kgsl_ioctl_timestamp_event,
- KGSL_IOCTL_LOCK),
+ kgsl_ioctl_timestamp_event),
KGSL_IOCTL_FUNC(IOCTL_KGSL_SETPROPERTY,
- kgsl_ioctl_device_setproperty,
- KGSL_IOCTL_LOCK),
+ kgsl_ioctl_device_setproperty),
KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_ALLOC_ID,
- kgsl_ioctl_gpumem_alloc_id, 0),
+ kgsl_ioctl_gpumem_alloc_id),
KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_FREE_ID,
- kgsl_ioctl_gpumem_free_id, 0),
+ kgsl_ioctl_gpumem_free_id),
KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_GET_INFO,
- kgsl_ioctl_gpumem_get_info, 0),
+ kgsl_ioctl_gpumem_get_info),
KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_SYNC_CACHE,
- kgsl_ioctl_gpumem_sync_cache, 0),
+ kgsl_ioctl_gpumem_sync_cache),
KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK,
- kgsl_ioctl_gpumem_sync_cache_bulk, 0),
+ kgsl_ioctl_gpumem_sync_cache_bulk),
};
long kgsl_ioctl_helper(struct file *filep, unsigned int cmd,
@@ -3471,7 +3493,7 @@ long kgsl_ioctl_helper(struct file *filep, unsigned int cmd,
struct kgsl_device_private *dev_priv = filep->private_data;
unsigned int nr;
kgsl_ioctl_func_t func;
- int lock, ret;
+ int ret;
char ustack[64];
void *uptr = NULL;
@@ -3520,7 +3542,6 @@ long kgsl_ioctl_helper(struct file *filep, unsigned int cmd,
}
func = ioctl_funcs[nr].func;
- lock = ioctl_funcs[nr].flags & KGSL_IOCTL_LOCK;
} else {
if (is_compat_task() &&
cmd != IOCTL_KGSL_DRAWCTXT_SET_BIN_BASE_OFFSET &&
@@ -3535,19 +3556,10 @@ long kgsl_ioctl_helper(struct file *filep, unsigned int cmd,
ret = -ENOIOCTLCMD;
goto done;
}
- lock = 1;
}
- if (lock)
- kgsl_mutex_lock(&dev_priv->device->mutex,
- &dev_priv->device->mutex_owner);
-
ret = func(dev_priv, cmd, uptr);
- if (lock)
- kgsl_mutex_unlock(&dev_priv->device->mutex,
- &dev_priv->device->mutex_owner);
-
/*
* Still copy back on failure, but assume function took
* all necessary precautions sanitizing the return values.
@@ -3653,14 +3665,10 @@ get_mmap_entry(struct kgsl_process_private *private,
int ret = 0;
struct kgsl_mem_entry *entry;
- /*
- * GPU object IDs start at TASK_SIZE >> PAGE_SHIFT. Anything
- * less is legacy GPU memory being mapped by address
- */
- if (pgoff >= KGSL_GPUOBJ_ID_MIN)
- entry = kgsl_sharedmem_find_id(private, pgoff);
- else
+ entry = kgsl_sharedmem_find_id(private, pgoff);
+ if (entry == NULL) {
entry = kgsl_sharedmem_find(private, pgoff << PAGE_SHIFT);
+ }
if (!entry)
return -EINVAL;
@@ -3672,12 +3680,6 @@ get_mmap_entry(struct kgsl_process_private *private,
goto err_put;
}
- /* External memory cannot be mapped */
- if ((KGSL_MEMFLAGS_USERMEM_MASK & entry->memdesc.flags) != 0) {
- ret = -EINVAL;
- goto err_put;
- }
-
if (entry->memdesc.useraddr != 0) {
ret = -EBUSY;
goto err_put;
@@ -4099,7 +4101,7 @@ static int kgsl_mmap(struct file *file, struct vm_area_struct *vma)
if (ret)
return ret;
- vma->vm_flags |= entry->memdesc.ops->vmflags(&entry->memdesc);
+ vma->vm_flags |= entry->memdesc.ops->vmflags;
vma->vm_private_data = entry;
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index a5996ce4b66..dc0c3ac0263 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -116,7 +116,7 @@ struct kgsl_memdesc;
struct kgsl_cmdbatch;
struct kgsl_memdesc_ops {
- int (*vmflags)(struct kgsl_memdesc *);
+ unsigned int vmflags;
int (*vmfault)(struct kgsl_memdesc *, struct vm_area_struct *,
struct vm_fault *);
void (*free)(struct kgsl_memdesc *memdesc);
diff --git a/drivers/gpu/msm/kgsl_cffdump.h b/drivers/gpu/msm/kgsl_cffdump.h
index a484e487849..074be149484 100644
--- a/drivers/gpu/msm/kgsl_cffdump.h
+++ b/drivers/gpu/msm/kgsl_cffdump.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2011,2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2011,2013-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -149,15 +149,4 @@ static inline int kgsl_cff_dump_enable_get(void *data, u64 *val)
}
#endif /* CONFIG_MSM_KGSL_CFF_DUMP */
-
-/*
- * kgsl_cff_core_idle() - Idle the device if CFF is on
- * @device: Device whose idle fuunction is called
- */
-static inline void kgsl_cff_core_idle(struct kgsl_device *device)
-{
- if (device->cff_dump_enable)
- device->ftbl->idle(device);
-}
-
#endif /* __KGSL_CFFDUMP_H */
diff --git a/drivers/gpu/msm/kgsl_compat.c b/drivers/gpu/msm/kgsl_compat.c
index b72372275ef..b035d28df97 100644
--- a/drivers/gpu/msm/kgsl_compat.c
+++ b/drivers/gpu/msm/kgsl_compat.c
@@ -287,58 +287,50 @@ static long kgsl_ioctl_timestamp_event_compat(struct kgsl_device_private
static const struct kgsl_ioctl kgsl_compat_ioctl_funcs[] = {
KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_GETPROPERTY_COMPAT,
- kgsl_ioctl_device_getproperty_compat,
- KGSL_IOCTL_LOCK),
+ kgsl_ioctl_device_getproperty_compat),
/* IOCTL_KGSL_DEVICE_WAITTIMESTAMP is no longer supported */
KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID,
- kgsl_ioctl_device_waittimestamp_ctxtid,
- KGSL_IOCTL_LOCK),
+ kgsl_ioctl_device_waittimestamp_ctxtid),
KGSL_IOCTL_FUNC(IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS_COMPAT,
- kgsl_ioctl_rb_issueibcmds_compat, 0),
+ kgsl_ioctl_rb_issueibcmds_compat),
KGSL_IOCTL_FUNC(IOCTL_KGSL_SUBMIT_COMMANDS_COMPAT,
- kgsl_ioctl_submit_commands_compat, 0),
+ kgsl_ioctl_submit_commands_compat),
/* IOCTL_KGSL_CMDSTREAM_READTIMESTAMP is no longer supported */
KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_CTXTID,
- kgsl_ioctl_cmdstream_readtimestamp_ctxtid,
- KGSL_IOCTL_LOCK),
+ kgsl_ioctl_cmdstream_readtimestamp_ctxtid),
/* IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP is no longer supported */
KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_CTXTID_COMPAT,
- kgsl_ioctl_cmdstream_freememontimestamp_ctxtid_compat,
- KGSL_IOCTL_LOCK),
+ kgsl_ioctl_cmdstream_freememontimestamp_ctxtid_compat),
KGSL_IOCTL_FUNC(IOCTL_KGSL_DRAWCTXT_CREATE,
- kgsl_ioctl_drawctxt_create,
- KGSL_IOCTL_LOCK),
+ kgsl_ioctl_drawctxt_create),
KGSL_IOCTL_FUNC(IOCTL_KGSL_DRAWCTXT_DESTROY,
- kgsl_ioctl_drawctxt_destroy,
- KGSL_IOCTL_LOCK),
+ kgsl_ioctl_drawctxt_destroy),
KGSL_IOCTL_FUNC(IOCTL_KGSL_MAP_USER_MEM_COMPAT,
- kgsl_ioctl_map_user_mem_compat, 0),
+ kgsl_ioctl_map_user_mem_compat),
KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FREE_COMPAT,
- kgsl_ioctl_sharedmem_free_compat, 0),
+ kgsl_ioctl_sharedmem_free_compat),
KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FLUSH_CACHE_COMPAT,
- kgsl_ioctl_sharedmem_flush_cache_compat, 0),
+ kgsl_ioctl_sharedmem_flush_cache_compat),
KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_ALLOC_COMPAT,
- kgsl_ioctl_gpumem_alloc_compat, 0),
+ kgsl_ioctl_gpumem_alloc_compat),
KGSL_IOCTL_FUNC(IOCTL_KGSL_CFF_SYNCMEM_COMPAT,
- kgsl_ioctl_cff_syncmem_compat, 0),
+ kgsl_ioctl_cff_syncmem_compat),
KGSL_IOCTL_FUNC(IOCTL_KGSL_CFF_USER_EVENT,
- kgsl_ioctl_cff_user_event, 0),
+ kgsl_ioctl_cff_user_event),
KGSL_IOCTL_FUNC(IOCTL_KGSL_TIMESTAMP_EVENT_COMPAT,
- kgsl_ioctl_timestamp_event_compat,
- KGSL_IOCTL_LOCK),
+ kgsl_ioctl_timestamp_event_compat),
KGSL_IOCTL_FUNC(IOCTL_KGSL_SETPROPERTY_COMPAT,
- kgsl_ioctl_device_setproperty_compat,
- KGSL_IOCTL_LOCK),
+ kgsl_ioctl_device_setproperty_compat),
KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_ALLOC_ID_COMPAT,
- kgsl_ioctl_gpumem_alloc_id_compat, 0),
+ kgsl_ioctl_gpumem_alloc_id_compat),
KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_FREE_ID,
- kgsl_ioctl_gpumem_free_id, 0),
+ kgsl_ioctl_gpumem_free_id),
KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_GET_INFO_COMPAT,
- kgsl_ioctl_gpumem_get_info_compat, 0),
+ kgsl_ioctl_gpumem_get_info_compat),
KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_SYNC_CACHE_COMPAT,
- kgsl_ioctl_gpumem_sync_cache_compat, 0),
+ kgsl_ioctl_gpumem_sync_cache_compat),
KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK_COMPAT,
- kgsl_ioctl_gpumem_sync_cache_bulk_compat, 0),
+ kgsl_ioctl_gpumem_sync_cache_bulk_compat),
};
long kgsl_compat_ioctl(struct file *filep, unsigned int cmd,
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 8856fd9589e..9f616dde125 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -32,13 +32,9 @@
#define FIRST_TIMEOUT (HZ / 2)
-#define KGSL_IOCTL_FUNC(_cmd, _func, _flags) \
+#define KGSL_IOCTL_FUNC(_cmd, _func) \
[_IOC_NR((_cmd))] = \
- { .cmd = (_cmd), .func = (_func), .flags = (_flags) }
-
-#define KGSL_IOCTL_LOCK BIT(0)
-#define KGSL_IOCTL_WAKE BIT(1)
-
+ { .cmd = (_cmd), .func = (_func) }
/* KGSL device state is initialized to INIT when platform_probe *
* sucessfully initialized the device. Once a device has been opened *
@@ -158,7 +154,6 @@ typedef long (*kgsl_ioctl_func_t)(struct kgsl_device_private *,
struct kgsl_ioctl {
unsigned int cmd;
kgsl_ioctl_func_t func;
- unsigned int flags;
};
long kgsl_ioctl_helper(struct file *filep, unsigned int cmd,
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 909104475f6..7ebd2740842 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -38,6 +38,7 @@ static struct kgsl_iommu_register_list kgsl_iommuv0_reg[KGSL_IOMMU_REG_MAX] = {
{ 0x10, 1 }, /* TTBR0 */
{ 0x14, 1 }, /* TTBR1 */
{ 0x20, 1 }, /* FSR */
+ { 0x28, 1 }, /* FAR */
{ 0x800, 1 }, /* TLBIALL */
{ 0x820, 1 }, /* RESUME */
{ 0x03C, 1 }, /* TLBLKCR */
@@ -55,6 +56,7 @@ static struct kgsl_iommu_register_list kgsl_iommuv1_reg[KGSL_IOMMU_REG_MAX] = {
{ 0x20, 1 }, /* TTBR0 */
{ 0x28, 1 }, /* TTBR1 */
{ 0x58, 1 }, /* FSR */
+ { 0x60, 1 }, /* FAR_0 */
{ 0x618, 1 }, /* TLBIALL */
{ 0x008, 1 }, /* RESUME */
{ 0, 0 }, /* TLBLKCR not in V1 */
@@ -338,11 +340,8 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
device = mmu->device;
adreno_dev = ADRENO_DEVICE(device);
- if (atomic_read(&mmu->fault)) {
- if (adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_GPUHALT_ENABLE)
- ret = -EBUSY;
+ if (1 == atomic_cmpxchg(&mmu->fault, 0, 1))
goto done;
- }
iommu_dev = get_iommu_device(iommu_unit, dev);
if (!iommu_dev) {
@@ -453,36 +452,29 @@ done:
/*
* kgsl_iommu_disable_clk - Disable iommu clocks
* @mmu - Pointer to mmu structure
+ * @unit - Iommu unit
*
- * Disables iommu clocks
+ * Disables iommu clocks for an iommu unit
* Return - void
*/
-static void kgsl_iommu_disable_clk(struct kgsl_mmu *mmu, int ctx_id)
+static void kgsl_iommu_disable_clk(struct kgsl_mmu *mmu, int unit)
{
struct kgsl_iommu *iommu = mmu->priv;
- struct msm_iommu_drvdata *iommu_drvdata;
int i, j;
for (i = 0; i < iommu->unit_count; i++) {
struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
- for (j = 0; j < iommu_unit->dev_count; j++) {
- if (ctx_id != iommu_unit->dev[j].ctx_id)
- continue;
- atomic_dec(&iommu_unit->dev[j].clk_enable_count);
- BUG_ON(
- atomic_read(&iommu_unit->dev[j].clk_enable_count) < 0);
- /*
- * the clock calls have a refcount so call them on every
- * enable/disable call
- */
- iommu_drvdata = dev_get_drvdata(
- iommu_unit->dev[j].dev->parent);
- if (iommu_drvdata->aclk)
- clk_disable_unprepare(iommu_drvdata->aclk);
- if (iommu_drvdata->clk)
- clk_disable_unprepare(iommu_drvdata->clk);
- clk_disable_unprepare(iommu_drvdata->pclk);
- }
+
+ /* Turn off the clks for IOMMU unit requested */
+ if ((unit != i) && (unit != KGSL_IOMMU_MAX_UNITS))
+ continue;
+
+ atomic_dec(&iommu_unit->clk_enable_count);
+ BUG_ON(atomic_read(&iommu_unit->clk_enable_count) < 0);
+
+ for (j = (KGSL_IOMMU_MAX_CLKS - 1); j >= 0; j--)
+ if (iommu_unit->clks[j])
+ clk_disable_unprepare(iommu_unit->clks[j]);
}
}
@@ -504,7 +496,7 @@ static void kgsl_iommu_clk_disable_event(struct kgsl_device *device,
{
struct kgsl_iommu_disable_clk_param *param = data;
- kgsl_iommu_disable_clk(param->mmu, param->ctx_id);
+ kgsl_iommu_disable_clk(param->mmu, param->unit);
/* Free param we are done using it */
kfree(param);
@@ -517,8 +509,7 @@ static void kgsl_iommu_clk_disable_event(struct kgsl_device *device,
* @ts_valid - Indicates whether ts parameter is valid, if this parameter
* is false then it means that the calling function wants to disable the
* IOMMU clocks immediately without waiting for any timestamp
- * @ctx_id: Context id of the IOMMU context for which clocks are to be
- * turned off
+ * @unit: IOMMU unit for which clocks are to be turned off
*
* Creates an event to disable the IOMMU clocks on timestamp and if event
* already exists then updates the timestamp of disabling the IOMMU clocks
@@ -528,7 +519,7 @@ static void kgsl_iommu_clk_disable_event(struct kgsl_device *device,
*/
static void
kgsl_iommu_disable_clk_on_ts(struct kgsl_mmu *mmu,
- unsigned int ts, int ctx_id)
+ unsigned int ts, int unit)
{
struct kgsl_iommu_disable_clk_param *param;
@@ -538,7 +529,7 @@ kgsl_iommu_disable_clk_on_ts(struct kgsl_mmu *mmu,
return;
}
param->mmu = mmu;
- param->ctx_id = ctx_id;
+ param->unit = unit;
param->ts = ts;
if (kgsl_add_event(mmu->device, &mmu->device->iommu_events,
@@ -550,70 +541,65 @@ kgsl_iommu_disable_clk_on_ts(struct kgsl_mmu *mmu,
}
/*
+ * kgsl_iommu_enable_clk_prepare_enable - Enable iommu clock
+ * @clk - clock to enable
+ *
+ * Prepare enables clock. Retries 3 times on enable failure, on 4th failure
+ * returns an error.
+ * Return: 0 on success else 1 on error
+ */
+
+static int kgsl_iommu_clk_prepare_enable(struct clk *clk)
+{
+ int num_retries = 4;
+
+ while (num_retries--) {
+ if (!clk_prepare_enable(clk))
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
* kgsl_iommu_enable_clk - Enable iommu clocks
* @mmu - Pointer to mmu structure
- * @ctx_id - The context bank whose clocks are to be turned on
+ * @unit - The iommu unit whose clocks are to be turned on
*
- * Enables iommu clocks of a given context
+ * Enables iommu clocks of a given iommu unit
* Return: 0 on success else error code
*/
-static int kgsl_iommu_enable_clk(struct kgsl_mmu *mmu,
- int ctx_id)
+static void kgsl_iommu_enable_clk(struct kgsl_mmu *mmu,
+ int unit)
{
- int ret = 0;
int i, j;
struct kgsl_iommu *iommu = mmu->priv;
- struct msm_iommu_drvdata *iommu_drvdata;
for (i = 0; i < iommu->unit_count; i++) {
struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
- for (j = 0; j < iommu_unit->dev_count; j++) {
- if (ctx_id != iommu_unit->dev[j].ctx_id)
- continue;
- iommu_drvdata =
- dev_get_drvdata(iommu_unit->dev[j].dev->parent);
- ret = clk_prepare_enable(iommu_drvdata->pclk);
- if (ret)
- goto done;
- if (iommu_drvdata->clk) {
- ret = clk_prepare_enable(iommu_drvdata->clk);
- if (ret) {
- clk_disable_unprepare(
- iommu_drvdata->pclk);
- goto done;
- }
- }
- if (iommu_drvdata->aclk) {
- ret = clk_prepare_enable(iommu_drvdata->aclk);
- if (ret) {
- if (iommu_drvdata->clk)
- clk_disable_unprepare(
- iommu_drvdata->clk);
- clk_disable_unprepare(
- iommu_drvdata->pclk);
- goto done;
- }
- }
- atomic_inc(&iommu_unit->dev[j].clk_enable_count);
+
+ /* Turn on the clks for IOMMU unit requested */
+ if ((unit != i) && (unit != KGSL_IOMMU_MAX_UNITS))
+ continue;
+
+ for (j = 0; j < KGSL_IOMMU_MAX_CLKS; j++) {
+ if (iommu_unit->clks[j])
+ if (kgsl_iommu_clk_prepare_enable(
+ iommu_unit->clks[j]))
+ goto done;
}
+ atomic_inc(&iommu_unit->clk_enable_count);
}
+ return;
done:
- if (ret) {
- struct kgsl_iommu_unit *iommu_unit;
- if (iommu->unit_count == i)
- i--;
- iommu_unit = &iommu->iommu_units[i];
- do {
- for (j--; j >= 0; j--)
- kgsl_iommu_disable_clk(mmu, ctx_id);
- i--;
- if (i >= 0) {
- iommu_unit = &iommu->iommu_units[i];
- j = iommu_unit->dev_count;
- }
- } while (i >= 0);
- }
- return ret;
+ /*
+ * Any Clock enable failure should be fatal,
+ * System usually crashes when enabling clock fails
+ * BUG_ON here to catch the system in bad state for
+ * further debug
+ */
+ KGSL_CORE_ERR("IOMMU clk enable failed\n");
+ BUG();
}
/*
@@ -775,6 +761,7 @@ static int kgsl_attach_pagetable_iommu_domain(struct kgsl_mmu *mmu)
{
struct kgsl_iommu_pt *iommu_pt;
struct kgsl_iommu *iommu = mmu->priv;
+ struct msm_iommu_drvdata *drvdata = 0;
int i, j, ret = 0;
/*
@@ -806,6 +793,14 @@ static int kgsl_attach_pagetable_iommu_domain(struct kgsl_mmu *mmu)
"iommu pt %p attached to dev %p, ctx_id %d\n",
iommu_pt->domain, iommu_unit->dev[j].dev,
iommu_unit->dev[j].ctx_id);
+ /* Init IOMMU unit clks here */
+ if (!drvdata) {
+ drvdata = dev_get_drvdata(
+ iommu_unit->dev[j].dev->parent);
+ iommu_unit->clks[0] = drvdata->pclk;
+ iommu_unit->clks[1] = drvdata->clk;
+ iommu_unit->clks[2] = drvdata->aclk;
+ }
}
}
}
@@ -848,9 +843,7 @@ static int _get_iommu_ctxs(struct kgsl_mmu *mmu,
ret = -EINVAL;
goto done;
}
- atomic_set(
- &(iommu_unit->dev[iommu_unit->dev_count].clk_enable_count),
- 0);
+ atomic_set(&(iommu_unit->clk_enable_count), 0);
iommu_unit->dev[iommu_unit->dev_count].dev =
msm_iommu_get_ctx(data->iommu_ctxs[i].iommu_ctx_name);
@@ -1560,17 +1553,9 @@ static int kgsl_iommu_start(struct kgsl_mmu *mmu)
mmu->hwpagetable = NULL;
goto done;
}
- status = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
- if (status) {
- KGSL_CORE_ERR("clk enable failed\n");
- goto done;
- }
- status = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_PRIV);
- if (status) {
- kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
- KGSL_CORE_ERR("clk enable failed\n");
- goto done;
- }
+
+ kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_MAX_UNITS);
+
/* Get the lsb value of pagetables set in the IOMMU ttbr0 register as
* that value should not change when we change pagetables, so while
* changing pagetables we can use this lsb value of the pagetable w/o
@@ -1613,8 +1598,7 @@ static int kgsl_iommu_start(struct kgsl_mmu *mmu)
KGSL_IOMMU_SETSTATE_NOP_OFFSET,
cp_nop_packet(1), sizeof(unsigned int));
- kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
- kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_PRIV);
+ kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_MAX_UNITS);
mmu->flags |= KGSL_FLAGS_STARTED;
done:
@@ -1760,12 +1744,12 @@ static void kgsl_iommu_pagefault_resume(struct kgsl_mmu *mmu)
int i, j;
if (atomic_read(&mmu->fault)) {
+ kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_MAX_UNITS);
for (i = 0; i < iommu->unit_count; i++) {
struct kgsl_iommu_unit *iommu_unit =
&iommu->iommu_units[i];
for (j = 0; j < iommu_unit->dev_count; j++) {
if (iommu_unit->dev[j].fault) {
- kgsl_iommu_enable_clk(mmu, j);
_iommu_lock(iommu);
KGSL_IOMMU_SET_CTX_REG(iommu,
iommu_unit,
@@ -1775,12 +1759,12 @@ static void kgsl_iommu_pagefault_resume(struct kgsl_mmu *mmu)
iommu_unit,
iommu_unit->dev[j].ctx_id,
FSR, 0);
- kgsl_iommu_disable_clk(mmu, j);
_iommu_unlock(iommu);
iommu_unit->dev[j].fault = 0;
}
}
}
+ kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_MAX_UNITS);
atomic_set(&mmu->fault, 0);
}
}
@@ -1855,11 +1839,11 @@ kgsl_iommu_get_current_ptbase(struct kgsl_mmu *mmu)
if (in_interrupt())
return 0;
/* Return the current pt base by reading IOMMU pt_base register */
- kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
- pt_base = KGSL_IOMMU_GET_CTX_REG_TTBR0(iommu,
+ kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_MAX_UNITS);
+ pt_base = KGSL_IOMMU_GET_CTX_REG_Q(iommu,
(&iommu->iommu_units[0]),
- KGSL_IOMMU_CONTEXT_USER);
- kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
+ KGSL_IOMMU_CONTEXT_USER, TTBR0);
+ kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_MAX_UNITS);
return pt_base & KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
}
@@ -1886,11 +1870,7 @@ static int kgsl_iommu_default_setstate(struct kgsl_mmu *mmu,
mmu->hwpagetable);
uint64_t pt_val;
- ret = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
- if (ret) {
- KGSL_DRV_ERR(mmu->device, "Failed to enable iommu clocks\n");
- return ret;
- }
+ kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_MAX_UNITS);
/* For v0 SMMU GPU needs to be idle for tlb invalidate as well */
if (msm_soc_version_supports_iommu_v0()) {
@@ -1967,7 +1947,8 @@ unlock:
_iommu_unlock(iommu);
/* Disable smmu clock */
- kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
+ kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_MAX_UNITS);
+
return ret;
}
@@ -2037,24 +2018,12 @@ static int kgsl_iommu_set_pf_policy(struct kgsl_mmu *mmu,
if (msm_soc_version_supports_iommu_v0())
return ret;
- ret = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
-
- if (ret) {
- KGSL_DRV_ERR(mmu->device, "Failed to enable iommu clocks\n");
- return ret;
- }
- ret = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_PRIV);
+ kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_MAX_UNITS);
- if (ret) {
- KGSL_DRV_ERR(mmu->device, "Failed to enable iommu clocks\n");
- kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
- return ret;
- }
/* Need to idle device before changing options */
ret = mmu->device->ftbl->idle(mmu->device);
if (ret) {
- kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
- kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_PRIV);
+ kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_MAX_UNITS);
return ret;
}
@@ -2077,11 +2046,51 @@ static int kgsl_iommu_set_pf_policy(struct kgsl_mmu *mmu,
SCTLR, sctlr_val);
}
}
- kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
- kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_PRIV);
+
+ kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_MAX_UNITS);
return ret;
}
+/**
+ * kgsl_iommu_set_pagefault() - Checks if a IOMMU device has faulted
+ * @mmu: MMU pointer of the device
+ *
+ * This function is called to set the pagefault bits for the device so
+ * that recovery can run with pagefault in consideration
+ */
+static void kgsl_iommu_set_pagefault(struct kgsl_mmu *mmu)
+{
+ int i, j;
+ struct kgsl_iommu *iommu = mmu->priv;
+ unsigned int fsr;
+
+ /* fault already detected then return early */
+ if (atomic_read(&mmu->fault))
+ return;
+
+ kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_MAX_UNITS);
+
+ /* Loop through all IOMMU devices to check for fault */
+ for (i = 0; i < iommu->unit_count; i++) {
+ for (j = 0; j < iommu->iommu_units[i].dev_count; j++) {
+ fsr = KGSL_IOMMU_GET_CTX_REG(iommu,
+ (&(iommu->iommu_units[i])),
+ iommu->iommu_units[i].dev[j].ctx_id, FSR);
+ if (fsr) {
+ unsigned long far =
+ KGSL_IOMMU_GET_CTX_REG_FAR(iommu,
+ (&(iommu->iommu_units[i])),
+ iommu->iommu_units[i].dev[j].ctx_id);
+ kgsl_iommu_fault_handler(NULL,
+ iommu->iommu_units[i].dev[j].dev, far, 0, NULL);
+ break;
+ }
+ }
+ }
+
+ kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_MAX_UNITS);
+}
+
struct kgsl_mmu_ops iommu_ops = {
.mmu_init = kgsl_iommu_init,
.mmu_close = kgsl_iommu_close,
@@ -2105,6 +2114,7 @@ struct kgsl_mmu_ops iommu_ops = {
.mmu_sync_lock = kgsl_iommu_sync_lock,
.mmu_sync_unlock = kgsl_iommu_sync_unlock,
.mmu_set_pf_policy = kgsl_iommu_set_pf_policy,
+ .mmu_set_pagefault = kgsl_iommu_set_pagefault
};
struct kgsl_mmu_pt_ops iommu_pt_ops = {
diff --git a/drivers/gpu/msm/kgsl_iommu.h b/drivers/gpu/msm/kgsl_iommu.h
index 92424a3f9e4..5496cb8b8e2 100644
--- a/drivers/gpu/msm/kgsl_iommu.h
+++ b/drivers/gpu/msm/kgsl_iommu.h
@@ -76,6 +76,7 @@ enum kgsl_iommu_reg_map {
KGSL_IOMMU_CTX_TTBR0,
KGSL_IOMMU_CTX_TTBR1,
KGSL_IOMMU_CTX_FSR,
+ KGSL_IOMMU_CTX_FAR,
KGSL_IOMMU_CTX_TLBIALL,
KGSL_IOMMU_CTX_RESUME,
KGSL_IOMMU_CTX_TLBLKCR,
@@ -97,10 +98,16 @@ struct kgsl_iommu_register_list {
* Max number of iommu units that the gpu core can have
* On APQ8064, KGSL can control a maximum of 2 IOMMU units.
*/
-#define KGSL_IOMMU_MAX_UNITS 2
+enum kgsl_iommu_units {
+ KGSL_IOMMU_UNIT_0 = 0,
+ KGSL_IOMMU_UNIT_1 = 1,
+ KGSL_IOMMU_MAX_UNITS = 2,
+};
/* Max number of iommu contexts per IOMMU unit */
#define KGSL_IOMMU_MAX_DEVS_PER_UNIT 2
+/* Max number of iommu clks per IOMMU unit */
+#define KGSL_IOMMU_MAX_CLKS 4
/* Macros to read/write IOMMU registers */
#define KGSL_IOMMU_SET_CTX_REG_Q(iommu, iommu_unit, ctx, REG, val) \
@@ -136,11 +143,21 @@ struct kgsl_iommu_register_list {
KGSL_IOMMU_GET_CTX_REG_Q(iommu, iommu_unit, ctx, TTBR0)
#define KGSL_IOMMU_SET_CTX_REG_TTBR0(iommu, iommu_unit, ctx, val) \
KGSL_IOMMU_SET_CTX_REG_Q(iommu, iommu_unit, ctx, TTBR0, val)
+
+#define KGSL_IOMMU_GET_CTX_REG_FAR(iommu, iommu_unit, ctx) \
+ KGSL_IOMMU_GET_CTX_REG_Q(iommu, iommu_unit, ctx, FAR)
+#define KGSL_IOMMU_SET_CTX_REG_FAR(iommu, iommu_unit, ctx, val) \
+ KGSL_IOMMU_GET_CTX_REG_Q(iommu, iommu_unit, ctx, FAR, val)
#else
#define KGSL_IOMMU_GET_CTX_REG_TTBR0(iommu, iommu_unit, ctx) \
KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit, ctx, TTBR0)
#define KGSL_IOMMU_SET_CTX_REG_TTBR0(iommu, iommu_unit, ctx, val) \
KGSL_IOMMU_SET_CTX_REG(iommu, iommu_unit, ctx, TTBR0, val)
+
+#define KGSL_IOMMU_GET_CTX_REG_FAR(iommu, iommu_unit, ctx) \
+ KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit, ctx, FAR)
+#define KGSL_IOMMU_SET_CTX_REG_FAR(iommu, iommu_unit, ctx, val) \
+ KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit, ctx, FAR, val)
#endif
/* Gets the lsb value of pagetable */
@@ -161,7 +178,6 @@ struct kgsl_iommu_register_list {
* are on, else the clocks are off
* fault: Flag when set indicates that this iommu device has caused a page
* fault
- * @clk_enable_count: The ref count of clock enable calls
*/
struct kgsl_iommu_device {
struct device *dev;
@@ -171,7 +187,6 @@ struct kgsl_iommu_device {
bool clk_enabled;
struct kgsl_device *kgsldev;
int fault;
- atomic_t clk_enable_count;
};
/*
@@ -187,6 +202,8 @@ struct kgsl_iommu_device {
* @iommu_halt_enable: Valid only on IOMMU-v1, when set indicates that the iommu
* unit supports halting of the IOMMU, which can be enabled while programming
* the IOMMU registers for synchronization
+ * @clk_enable_count: The ref count of clock enable calls
+ * @clks: iommu unit clks
*/
struct kgsl_iommu_unit {
struct kgsl_iommu_device dev[KGSL_IOMMU_MAX_DEVS_PER_UNIT];
@@ -194,6 +211,8 @@ struct kgsl_iommu_unit {
struct kgsl_memdesc reg_map;
unsigned int ahb_base;
int iommu_halt_enable;
+ atomic_t clk_enable_count;
+ struct clk *clks[KGSL_IOMMU_MAX_CLKS];
};
/*
@@ -243,13 +262,13 @@ struct kgsl_iommu_pt {
* struct kgsl_iommu_disable_clk_param - Parameter struct for disble clk event
* @mmu: The mmu pointer
* @rb_level: the rb level in which the timestamp of the event belongs to
- * @ctx_id: The IOMMU context whose clock is to be turned off
+ * @unit: The IOMMU unit whose clock is to be turned off
* @ts: Timestamp on which clock is to be disabled
*/
struct kgsl_iommu_disable_clk_param {
struct kgsl_mmu *mmu;
int rb_level;
- int ctx_id;
+ int unit;
unsigned int ts;
};
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index 63392bc2046..4ff9d944c05 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -242,8 +242,17 @@ static int kgsl_setup_pt(struct kgsl_pagetable *pt)
return 0;
}
-static void _kgsl_destroy_pagetable(struct kgsl_pagetable *pagetable)
+static void kgsl_destroy_pagetable(struct kref *kref)
{
+ struct kgsl_pagetable *pagetable = container_of(kref,
+ struct kgsl_pagetable, refcount);
+
+ unsigned long flags;
+
+ spin_lock_irqsave(&kgsl_driver.ptlock, flags);
+ list_del(&pagetable->list);
+ spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
+
pagetable_remove_sysfs_objects(pagetable);
kgsl_unmap_global_pt_entries(pagetable);
@@ -259,29 +268,6 @@ static void _kgsl_destroy_pagetable(struct kgsl_pagetable *pagetable)
kfree(pagetable);
}
-static void kgsl_destroy_pagetable(struct kref *kref)
-{
- struct kgsl_pagetable *pagetable = container_of(kref,
- struct kgsl_pagetable, refcount);
- unsigned long flags;
-
- spin_lock_irqsave(&kgsl_driver.ptlock, flags);
- list_del(&pagetable->list);
- spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
-
- _kgsl_destroy_pagetable(pagetable);
-}
-
-static void kgsl_destroy_pagetable_locked(struct kref *kref)
-{
- struct kgsl_pagetable *pagetable = container_of(kref,
- struct kgsl_pagetable, refcount);
-
- list_del(&pagetable->list);
-
- _kgsl_destroy_pagetable(pagetable);
-}
-
static inline void kgsl_put_pagetable(struct kgsl_pagetable *pagetable)
{
if (pagetable)
@@ -296,12 +282,9 @@ kgsl_get_pagetable(unsigned long name)
spin_lock_irqsave(&kgsl_driver.ptlock, flags);
list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
- if (kref_get_unless_zero(&pt->refcount)) {
- if (pt->name == name) {
- ret = pt;
- break;
- }
- kref_put(&pt->refcount, kgsl_destroy_pagetable_locked);
+ if (name == pt->name && kref_get_unless_zero(&pt->refcount)) {
+ ret = pt;
+ break;
}
}
@@ -448,14 +431,9 @@ kgsl_mmu_get_ptname_from_ptbase(struct kgsl_mmu *mmu, phys_addr_t pt_base)
return KGSL_MMU_GLOBAL_PT;
spin_lock(&kgsl_driver.ptlock);
list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
- if (kref_get_unless_zero(&pt->refcount)) {
- if (mmu->mmu_ops->mmu_pt_equal(mmu, pt, pt_base)) {
- ptid = (int) pt->name;
- kref_put(&pt->refcount,
- kgsl_destroy_pagetable_locked);
- break;
- }
- kref_put(&pt->refcount, kgsl_destroy_pagetable_locked);
+ if (mmu->mmu_ops->mmu_pt_equal(mmu, pt, pt_base)) {
+ ptid = (int) pt->name;
+ break;
}
}
spin_unlock(&kgsl_driver.ptlock);
@@ -475,23 +453,16 @@ kgsl_mmu_log_fault_addr(struct kgsl_mmu *mmu, phys_addr_t pt_base,
return 0;
spin_lock(&kgsl_driver.ptlock);
list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
- if (kref_get_unless_zero(&pt->refcount)) {
- if (mmu->mmu_ops->mmu_pt_equal(mmu, pt, pt_base)) {
- if ((addr & ~(PAGE_SIZE-1)) == pt->fault_addr) {
- ret = 1;
- kref_put(&pt->refcount,
- kgsl_destroy_pagetable_locked);
- break;
- } else {
- pt->fault_addr =
- (addr & ~(PAGE_SIZE-1));
- ret = 0;
- kref_put(&pt->refcount,
- kgsl_destroy_pagetable_locked);
- break;
- }
+ if (mmu->mmu_ops->mmu_pt_equal(mmu, pt, pt_base)) {
+ if ((addr & ~(PAGE_SIZE-1)) == pt->fault_addr) {
+ ret = 1;
+ break;
+ } else {
+ pt->fault_addr =
+ (addr & ~(PAGE_SIZE-1));
+ ret = 0;
+ break;
}
- kref_put(&pt->refcount, kgsl_destroy_pagetable_locked);
}
}
spin_unlock(&kgsl_driver.ptlock);
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index ea94a579c93..72200851bd9 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -91,11 +91,11 @@ struct kgsl_mmu_ops {
(struct kgsl_mmu *mmu);
void (*mmu_disable_clk_on_ts)
(struct kgsl_mmu *mmu,
- uint32_t ts, int ctx_id);
- int (*mmu_enable_clk)
- (struct kgsl_mmu *mmu, int ctx_id);
+ uint32_t ts, int unit);
+ void (*mmu_enable_clk)
+ (struct kgsl_mmu *mmu, int unit);
void (*mmu_disable_clk)
- (struct kgsl_mmu *mmu, int ctx_id);
+ (struct kgsl_mmu *mmu, int unit);
uint64_t (*mmu_get_default_ttbr0)(struct kgsl_mmu *mmu,
unsigned int unit_id,
enum kgsl_iommu_context_id ctx_id);
@@ -117,6 +117,7 @@ struct kgsl_mmu_ops {
(struct kgsl_mmu *mmu, unsigned int *cmds);
int (*mmu_hw_halt_supported)(struct kgsl_mmu *mmu, int iommu_unit_num);
int (*mmu_set_pf_policy)(struct kgsl_mmu *mmu, unsigned int pf_policy);
+ void (*mmu_set_pagefault)(struct kgsl_mmu *mmu);
};
struct kgsl_mmu_pt_ops {
@@ -259,27 +260,25 @@ static inline phys_addr_t kgsl_mmu_get_default_ttbr0(struct kgsl_mmu *mmu,
return 0;
}
-static inline int kgsl_mmu_enable_clk(struct kgsl_mmu *mmu,
- int ctx_id)
+static inline void kgsl_mmu_enable_clk(struct kgsl_mmu *mmu, int unit)
{
if (mmu->mmu_ops && mmu->mmu_ops->mmu_enable_clk)
- return mmu->mmu_ops->mmu_enable_clk(mmu, ctx_id);
+ mmu->mmu_ops->mmu_enable_clk(mmu, unit);
else
- return 0;
+ return;
}
-static inline void kgsl_mmu_disable_clk(struct kgsl_mmu *mmu, int ctx_id)
+static inline void kgsl_mmu_disable_clk(struct kgsl_mmu *mmu, int unit)
{
if (mmu->mmu_ops && mmu->mmu_ops->mmu_disable_clk)
- mmu->mmu_ops->mmu_disable_clk(mmu, ctx_id);
+ mmu->mmu_ops->mmu_disable_clk(mmu, unit);
}
static inline void kgsl_mmu_disable_clk_on_ts(struct kgsl_mmu *mmu,
- unsigned int ts,
- int ctx_id)
+ unsigned int ts, int unit)
{
if (mmu->mmu_ops && mmu->mmu_ops->mmu_disable_clk_on_ts)
- mmu->mmu_ops->mmu_disable_clk_on_ts(mmu, ts, ctx_id);
+ mmu->mmu_ops->mmu_disable_clk_on_ts(mmu, ts, unit);
}
static inline unsigned int kgsl_mmu_get_reg_gpuaddr(struct kgsl_mmu *mmu,
@@ -394,4 +393,10 @@ static inline int kgsl_mmu_set_pagefault_policy(struct kgsl_mmu *mmu,
return 0;
}
+static inline void kgsl_mmu_set_pagefault(struct kgsl_mmu *mmu)
+{
+ if (mmu->mmu_ops && mmu->mmu_ops->mmu_set_pagefault)
+ return mmu->mmu_ops->mmu_set_pagefault(mmu);
+}
+
#endif /* __KGSL_MMU_H */
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 83bf20b149a..c09772f6600 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -510,7 +510,7 @@ static ssize_t kgsl_pwrctrl_idle_timer_show(struct device *dev,
device->pwrctrl.interval_timeout * mul);
}
-static ssize_t kgsl_pwrctrl_pmqos_latency_store(struct device *dev,
+static ssize_t kgsl_pwrctrl_pmqos_active_latency_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -526,13 +526,13 @@ static ssize_t kgsl_pwrctrl_pmqos_latency_store(struct device *dev,
return ret;
kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
- device->pwrctrl.pm_qos_latency = val;
+ device->pwrctrl.pm_qos_active_latency = val;
kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
return count;
}
-static ssize_t kgsl_pwrctrl_pmqos_latency_show(struct device *dev,
+static ssize_t kgsl_pwrctrl_pmqos_active_latency_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
@@ -540,7 +540,7 @@ static ssize_t kgsl_pwrctrl_pmqos_latency_show(struct device *dev,
if (device == NULL)
return 0;
return snprintf(buf, PAGE_SIZE, "%d\n",
- device->pwrctrl.pm_qos_latency);
+ device->pwrctrl.pm_qos_active_latency);
}
static ssize_t kgsl_pwrctrl_gpubusy_show(struct device *dev,
@@ -820,9 +820,9 @@ static DEVICE_ATTR(thermal_pwrlevel, 0644,
static DEVICE_ATTR(num_pwrlevels, 0444,
kgsl_pwrctrl_num_pwrlevels_show,
NULL);
-static DEVICE_ATTR(pmqos_latency, 0644,
- kgsl_pwrctrl_pmqos_latency_show,
- kgsl_pwrctrl_pmqos_latency_store);
+static DEVICE_ATTR(pmqos_active_latency, 0644,
+ kgsl_pwrctrl_pmqos_active_latency_show,
+ kgsl_pwrctrl_pmqos_active_latency_store);
static DEVICE_ATTR(reset_count, 0444,
kgsl_pwrctrl_reset_count_show,
NULL);
@@ -853,7 +853,7 @@ static const struct device_attribute *pwrctrl_attr_list[] = {
&dev_attr_min_pwrlevel,
&dev_attr_thermal_pwrlevel,
&dev_attr_num_pwrlevels,
- &dev_attr_pmqos_latency,
+ &dev_attr_pmqos_active_latency,
&dev_attr_reset_count,
&dev_attr_force_clk_on,
&dev_attr_force_bus_on,
@@ -1130,7 +1130,8 @@ int kgsl_pwrctrl_init(struct kgsl_device *device)
pwr->interval_timeout = pdata->idle_timeout;
pwr->strtstp_sleepwake = pdata->strtstp_sleepwake;
- pwr->pm_qos_latency = pdata->pm_qos_latency;
+ pwr->pm_qos_active_latency = pdata->pm_qos_active_latency;
+ pwr->pm_qos_wakeup_latency = pdata->pm_qos_wakeup_latency;
pm_runtime_enable(device->parentdev);
@@ -1545,8 +1546,6 @@ int kgsl_pwrctrl_wake(struct kgsl_device *device, int priority)
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
mod_timer(&device->idle_timer, jiffies +
device->pwrctrl.interval_timeout);
- pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
- device->pwrctrl.pm_qos_latency);
case KGSL_STATE_ACTIVE:
kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
break;
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h
index 522f26fdef3..90fd6f3287c 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.h
+++ b/drivers/gpu/msm/kgsl_pwrctrl.h
@@ -61,6 +61,7 @@ struct kgsl_pwr_constraint {
} pwrlevel;
} hint;
unsigned long expires;
+ uint32_t owner_id;
};
/**
@@ -84,7 +85,8 @@ struct kgsl_pwr_constraint {
* @irq_name - resource name for the IRQ
* @clk_stats - structure of clock statistics
* @pm_qos_req_dma - the power management quality of service structure
- * @pm_qos_latency - allowed CPU latency in microseconds
+ * @pm_qos_active_latency - allowed CPU latency in microseconds when active
+ * @pm_qos_wakeup_latency - allowed CPU latency in microseconds during wakeup
* @bus_control - true if the bus calculation is independent
* @bus_mod - modifier from the current power level for the bus vote
* @bus_index - default bus index into the bus_ib table
@@ -114,7 +116,8 @@ struct kgsl_pwrctrl {
const char *irq_name;
struct kgsl_clk_stats clk_stats;
struct pm_qos_request pm_qos_req_dma;
- unsigned int pm_qos_latency;
+ unsigned int pm_qos_active_latency;
+ unsigned int pm_qos_wakeup_latency;
unsigned int irq_last;
bool bus_control;
int bus_mod;
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index b7db01e3123..9e5517cf9be 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -389,18 +389,6 @@ static int kgsl_page_alloc_vmfault(struct kgsl_memdesc *memdesc,
return VM_FAULT_SIGBUS;
}
-static int kgsl_page_alloc_vmflags(struct kgsl_memdesc *memdesc)
-{
-
- return VM_DONTDUMP | VM_DONTEXPAND;
-}
-
-static int kgsl_contiguous_vmflags(struct kgsl_memdesc *memdesc)
-{
-
- return VM_DONTDUMP | VM_PFNMAP | VM_DONTEXPAND;
-}
-
/*
* kgsl_page_alloc_unmap_kernel() - Unmap the memory in memdesc
*
@@ -536,7 +524,7 @@ static void kgsl_cma_coherent_free(struct kgsl_memdesc *memdesc)
/* Global - also used by kgsl_drm.c */
static struct kgsl_memdesc_ops kgsl_page_alloc_ops = {
.free = kgsl_page_alloc_free,
- .vmflags = kgsl_page_alloc_vmflags,
+ .vmflags = VM_DONTDUMP | VM_DONTEXPAND | VM_DONTCOPY,
.vmfault = kgsl_page_alloc_vmfault,
.map_kernel = kgsl_page_alloc_map_kernel,
.unmap_kernel = kgsl_page_alloc_unmap_kernel,
@@ -545,7 +533,7 @@ static struct kgsl_memdesc_ops kgsl_page_alloc_ops = {
/* CMA ops - used during NOMMU mode */
static struct kgsl_memdesc_ops kgsl_cma_ops = {
.free = kgsl_cma_coherent_free,
- .vmflags = kgsl_contiguous_vmflags,
+ .vmflags = VM_DONTDUMP | VM_PFNMAP | VM_DONTEXPAND | VM_DONTCOPY,
.vmfault = kgsl_contiguous_vmfault,
};
@@ -564,6 +552,10 @@ int kgsl_cache_range_op(struct kgsl_memdesc *memdesc, size_t offset,
void *addr = (memdesc->hostptr) ?
memdesc->hostptr : (void *) memdesc->useraddr;
+ /* Make sure that size is non-zero */
+ if (!size)
+ return -EINVAL;
+
/* Check that offset+length does not exceed memdesc->size */
if ((offset + size) > memdesc->size)
return -ERANGE;
diff --git a/drivers/gpu/msm/kgsl_sync.c b/drivers/gpu/msm/kgsl_sync.c
index f0f436bea76..822b08308e9 100644
--- a/drivers/gpu/msm/kgsl_sync.c
+++ b/drivers/gpu/msm/kgsl_sync.c
@@ -123,6 +123,8 @@ int kgsl_add_fence_event(struct kgsl_device *device,
int ret = -EINVAL;
char fence_name[sizeof(fence->name)] = {};
+ priv.fence_fd = -1;
+
if (len != sizeof(priv))
return -EINVAL;
@@ -130,10 +132,12 @@ int kgsl_add_fence_event(struct kgsl_device *device,
if (event == NULL)
return -ENOMEM;
+ kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
+
context = kgsl_context_get_owner(owner, context_id);
if (context == NULL)
- goto fail_pt;
+ goto unlock;
event->context = context;
event->timestamp = timestamp;
@@ -142,7 +146,7 @@ int kgsl_add_fence_event(struct kgsl_device *device,
if (pt == NULL) {
KGSL_DRV_ERR(device, "kgsl_sync_pt_create failed\n");
ret = -ENOMEM;
- goto fail_pt;
+ goto unlock;
}
snprintf(fence_name, sizeof(fence_name),
"%s-pid-%d-ctx-%d-ts-%d",
@@ -156,20 +160,24 @@ int kgsl_add_fence_event(struct kgsl_device *device,
kgsl_sync_pt_destroy(pt);
KGSL_DRV_ERR(device, "sync_fence_create failed\n");
ret = -ENOMEM;
- goto fail_fence;
+ goto unlock;
}
priv.fence_fd = get_unused_fd_flags(0);
if (priv.fence_fd < 0) {
- KGSL_DRV_ERR(device, "invalid fence fd\n");
- ret = -EINVAL;
- goto fail_fd;
+ KGSL_DRV_ERR(device, "Unable to get a file descriptor: %d\n",
+ priv.fence_fd);
+ ret = priv.fence_fd;
+ goto unlock;
}
sync_fence_install(fence, priv.fence_fd);
+ /* Unlock the mutex before copying to user */
+ kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
+
if (copy_to_user(data, &priv, sizeof(priv))) {
ret = -EFAULT;
- goto fail_copy_fd;
+ goto out;
}
/*
@@ -178,20 +186,22 @@ int kgsl_add_fence_event(struct kgsl_device *device,
*/
ret = kgsl_add_event(device, &context->events, timestamp,
kgsl_fence_event_cb, event);
+
if (ret)
- goto fail_event;
+ goto out;
return 0;
-fail_event:
-fail_copy_fd:
- /* clean up sync_fence_install */
- put_unused_fd(priv.fence_fd);
-fail_fd:
- /* clean up sync_fence_create */
- sync_fence_put(fence);
-fail_fence:
-fail_pt:
+unlock:
+ kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
+
+out:
+ if (priv.fence_fd >= 0)
+ put_unused_fd(priv.fence_fd);
+
+ if (fence)
+ sync_fence_put(fence);
+
kgsl_context_put(context);
kfree(event);
return ret;
diff --git a/drivers/video/msm/mdss/mdss_mdp.h b/drivers/video/msm/mdss/mdss_mdp.h
index e9403a53b4e..b854c0a3309 100644
--- a/drivers/video/msm/mdss/mdss_mdp.h
+++ b/drivers/video/msm/mdss/mdss_mdp.h
@@ -407,7 +407,6 @@ struct mdss_mdp_pipe {
u8 blend_op;
u8 overfetch_disable;
u32 transp;
- u8 has_buf;
u32 bg_color;
u32 hscl_en;
diff --git a/drivers/video/msm/mdss/mdss_mdp_ctl.c b/drivers/video/msm/mdss/mdss_mdp_ctl.c
index 10d17dc2fe9..8bb4ea82600 100644
--- a/drivers/video/msm/mdss/mdss_mdp_ctl.c
+++ b/drivers/video/msm/mdss/mdss_mdp_ctl.c
@@ -1378,16 +1378,23 @@ static inline int mdss_mdp_set_split_ctl(struct mdss_mdp_ctl *ctl,
struct mdss_mdp_ctl *split_ctl)
{
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct mdss_overlay_private *mdp5_data = NULL;
+ bool mixer_swap = false;
if (!ctl || !split_ctl || !mdata)
return -ENODEV;
+ if (ctl->mfd) {
+ mdp5_data = mfd_to_mdp5_data(ctl->mfd);
+ mixer_swap = mdp5_data->mixer_swap;
+ }
+
/* setup split ctl mixer as right mixer of original ctl so that
* original ctl can work the same way as dual pipe solution */
ctl->mixer_right = split_ctl->mixer_left;
if ((mdata->mdp_rev >= MDSS_MDP_HW_REV_103) && ctl->is_video_mode)
- ctl->split_flush_en = true;
+ ctl->split_flush_en = !mixer_swap;
return 0;
}
@@ -2016,14 +2023,20 @@ int mdss_mdp_ctl_stop(struct mdss_mdp_ctl *ctl, int power_state)
mdss_mdp_hist_intr_setup(&mdata->hist_intr, MDSS_IRQ_SUSPEND);
- if (ctl->stop_fnc)
+ if (ctl->stop_fnc) {
ret = ctl->stop_fnc(ctl, power_state);
- else
+ if (ctl->panel_data->panel_info.fbc.enabled)
+ mdss_mdp_ctl_fbc_enable(0, ctl->mixer_left,
+ &ctl->panel_data->panel_info);
+ } else {
pr_warn("no stop func for ctl=%d\n", ctl->num);
+ }
if (sctl && sctl->stop_fnc) {
ret = sctl->stop_fnc(sctl, power_state);
- mdss_mdp_ctl_split_display_enable(0, ctl, sctl);
+ if (ctl->panel_data->panel_info.fbc.enabled)
+ mdss_mdp_ctl_fbc_enable(0, sctl->mixer_left,
+ &sctl->panel_data->panel_info);
}
if (ret) {
pr_warn("error powering off intf ctl=%d\n", ctl->num);
@@ -2035,6 +2048,9 @@ int mdss_mdp_ctl_stop(struct mdss_mdp_ctl *ctl, int power_state)
goto end;
}
+ if (sctl)
+ mdss_mdp_ctl_split_display_enable(0, ctl, sctl);
+
mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_TOP, 0);
if (sctl)
mdss_mdp_ctl_write(sctl, MDSS_MDP_REG_CTL_TOP, 0);
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c b/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
index 6da8fdb1bb9..41fe0966b3b 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
@@ -39,8 +39,8 @@ struct mdss_mdp_cmd_ctx {
#endif
u32 pp_num;
u8 ref_cnt;
- struct completion pp_comp;
struct completion stop_comp;
+ wait_queue_head_t pp_waitq;
struct list_head vsync_handlers;
int panel_power_state;
atomic_t koff_cnt;
@@ -59,15 +59,22 @@ struct mdss_mdp_cmd_ctx {
atomic_t pp_done_cnt;
struct mdss_panel_recovery recovery;
struct mdss_mdp_cmd_ctx *sync_ctx; /* for left + right, partial update */
+ u32 pp_timeout_report_cnt;
};
struct mdss_mdp_cmd_ctx mdss_mdp_cmd_ctx_list[MAX_SESSIONS];
static int mdss_mdp_cmd_do_notifier(struct mdss_mdp_cmd_ctx *ctx);
-static bool __mdss_mdp_cmd_panel_power_on(struct mdss_mdp_cmd_ctx *ctx)
+static bool __mdss_mdp_cmd_is_panel_power_off(struct mdss_mdp_cmd_ctx *ctx)
{
- return (ctx->panel_power_state == MDSS_PANEL_POWER_ON);
+ return mdss_panel_is_power_off(ctx->panel_power_state);
+}
+
+static bool __mdss_mdp_cmd_is_panel_power_on_interactive(
+ struct mdss_mdp_cmd_ctx *ctx)
+{
+ return mdss_panel_is_power_on_interactive(ctx->panel_power_state);
}
static inline u32 mdss_mdp_cmd_line_count(struct mdss_mdp_ctl *ctl)
@@ -120,86 +127,89 @@ exit:
static int mdss_mdp_cmd_tearcheck_cfg(struct mdss_mdp_ctl *ctl,
- struct mdss_mdp_mixer *mixer)
+ struct mdss_mdp_mixer *mixer, bool enable)
{
- struct mdss_mdp_pp_tear_check *te;
+ struct mdss_mdp_pp_tear_check *te = NULL;
struct mdss_panel_info *pinfo;
- u32 vsync_clk_speed_hz, total_lines, vclks_line, cfg;
+ u32 vsync_clk_speed_hz, total_lines, vclks_line, cfg = 0;
if (IS_ERR_OR_NULL(ctl->panel_data)) {
pr_err("no panel data\n");
return -ENODEV;
}
- pinfo = &ctl->panel_data->panel_info;
- te = &ctl->panel_data->panel_info.te;
+ if (enable) {
+ pinfo = &ctl->panel_data->panel_info;
+ te = &ctl->panel_data->panel_info.te;
- mdss_mdp_vsync_clk_enable(1);
+ mdss_mdp_vsync_clk_enable(1);
- vsync_clk_speed_hz =
- mdss_mdp_get_clk_rate(MDSS_CLK_MDP_VSYNC);
+ vsync_clk_speed_hz =
+ mdss_mdp_get_clk_rate(MDSS_CLK_MDP_VSYNC);
- total_lines = mdss_panel_get_vtotal(pinfo);
+ total_lines = mdss_panel_get_vtotal(pinfo);
- total_lines *= pinfo->mipi.frame_rate;
+ total_lines *= pinfo->mipi.frame_rate;
- vclks_line = (total_lines) ? vsync_clk_speed_hz / total_lines : 0;
+ vclks_line = (total_lines) ? vsync_clk_speed_hz/total_lines : 0;
- cfg = BIT(19);
- pr_debug("%s : cfg1 = %d\n", __func__, cfg);
- if (pinfo->mipi.hw_vsync_mode) {
- cfg |= BIT(20);
- pr_debug("%s : cfg2 = %d\n", __func__, cfg);
- }
+ cfg = BIT(19);
+ if (pinfo->mipi.hw_vsync_mode)
+ cfg |= BIT(20);
- if (te->refx100)
- vclks_line = vclks_line * pinfo->mipi.frame_rate *
- 100 / te->refx100;
- else {
- pr_warn("refx100 cannot be zero! Use 6000 as default\n");
- vclks_line = vclks_line * pinfo->mipi.frame_rate *
- 100 / 6000;
- }
+ if (te->refx100)
+ vclks_line = vclks_line * pinfo->mipi.frame_rate *
+ 100 / te->refx100;
+ else {
+ pr_warn("refx100 cannot be zero! Use 6000 as default\n");
+ vclks_line = vclks_line * pinfo->mipi.frame_rate *
+ 100 / 6000;
+ }
- cfg |= vclks_line;
+ cfg |= vclks_line;
- pr_debug("%s: cfg=%d yres=%d vclks=%x height=%d init=%d rd=%d start=%d ",
- __func__, cfg, pinfo->yres, vclks_line, te->sync_cfg_height,
- te->vsync_init_val, te->rd_ptr_irq, te->start_pos);
- pr_debug("thrd_start =%d thrd_cont=%d tear_ckeck_en=%d\n",
- te->sync_threshold_start, te->sync_threshold_continue,
- te->tear_check_en);
+ pr_debug("%s: yres=%d vclks=%x height=%d init=%d rd=%d start=%d ",
+ __func__, pinfo->yres, vclks_line, te->sync_cfg_height,
+ te->vsync_init_val, te->rd_ptr_irq, te->start_pos);
+ pr_debug("thrd_start =%d thrd_cont=%d\n",
+ te->sync_threshold_start, te->sync_threshold_continue);
+ }
mdss_mdp_pingpong_write(mixer, MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC, cfg);
mdss_mdp_pingpong_write(mixer, MDSS_MDP_REG_PP_SYNC_CONFIG_HEIGHT,
- te->sync_cfg_height);
+ te ? te->sync_cfg_height : 0);
mdss_mdp_pingpong_write(mixer, MDSS_MDP_REG_PP_VSYNC_INIT_VAL,
- te->vsync_init_val);
+ te ? te->vsync_init_val : 0);
mdss_mdp_pingpong_write(mixer, MDSS_MDP_REG_PP_RD_PTR_IRQ,
- te->rd_ptr_irq);
+ te ? te->rd_ptr_irq : 0);
mdss_mdp_pingpong_write(mixer, MDSS_MDP_REG_PP_START_POS,
- te->start_pos);
+ te ? te->start_pos : 0);
mdss_mdp_pingpong_write(mixer, MDSS_MDP_REG_PP_SYNC_THRESH,
- ((te->sync_threshold_continue << 16) |
- te->sync_threshold_start));
+ te ? ((te->sync_threshold_continue << 16) |
+ te->sync_threshold_start) : 0);
mdss_mdp_pingpong_write(mixer, MDSS_MDP_REG_PP_TEAR_CHECK_EN,
- te->tear_check_en);
+ te ? te->tear_check_en : 0);
+
return 0;
}
-static int mdss_mdp_cmd_tearcheck_setup(struct mdss_mdp_ctl *ctl)
+static int mdss_mdp_cmd_tearcheck_setup(struct mdss_mdp_ctl *ctl, bool enable)
{
- struct mdss_mdp_mixer *mixer;
int rc = 0;
+ struct mdss_mdp_mixer *mixer;
+
mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_LEFT);
if (mixer) {
- rc = mdss_mdp_cmd_tearcheck_cfg(ctl, mixer);
+ rc = mdss_mdp_cmd_tearcheck_cfg(ctl, mixer, enable);
if (rc)
goto err;
}
- mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_RIGHT);
- if (mixer)
- rc = mdss_mdp_cmd_tearcheck_cfg(ctl, mixer);
+
+ if (!(ctl->opmode & MDSS_MDP_CTL_OP_PACK_3D_ENABLE)) {
+ mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_RIGHT);
+ if (mixer)
+ rc = mdss_mdp_cmd_tearcheck_cfg(ctl, mixer, enable);
+ }
err:
return rc;
}
@@ -210,7 +220,7 @@ static inline void mdss_mdp_cmd_clk_on(struct mdss_mdp_cmd_ctx *ctx)
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
int irq_en;
- if (!__mdss_mdp_cmd_panel_power_on(ctx))
+ if (__mdss_mdp_cmd_is_panel_power_off(ctx))
return;
mutex_lock(&ctx->clk_mtx);
@@ -390,7 +400,6 @@ static void mdss_mdp_cmd_underflow_recovery(void *data)
atomic_dec(&ctx->koff_cnt);
mdss_mdp_irq_disable_nosync(MDSS_MDP_IRQ_PING_PONG_COMP,
ctx->pp_num);
- complete_all(&ctx->pp_comp);
}
spin_unlock_irqrestore(&ctx->koff_lock, flags);
}
@@ -419,25 +428,24 @@ static void mdss_mdp_cmd_pingpong_done(void *arg)
spin_lock(&ctx->koff_lock);
mdss_mdp_irq_disable_nosync(MDSS_MDP_IRQ_PING_PONG_COMP, ctx->pp_num);
- complete_all(&ctx->pp_comp);
MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt), ctx->clk_enabled,
ctx->rdptr_enabled);
#if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
xlog(__func__, ctl->num, atomic_read(&ctx->koff_cnt), ctx->clk_enabled, ctx->rdptr_enabled, 0, 0);
#endif
- if (atomic_read(&ctx->koff_cnt)) {
- if (atomic_dec_return(&ctx->koff_cnt)) {
+ if (atomic_add_unless(&ctx->koff_cnt, -1, 0)) {
+ if (atomic_read(&ctx->koff_cnt))
pr_err("%s: too many kickoffs=%d!\n", __func__,
atomic_read(&ctx->koff_cnt));
- atomic_set(&ctx->koff_cnt, 0);
- }
if (mdss_mdp_cmd_do_notifier(ctx)) {
atomic_inc(&ctx->pp_done_cnt);
schedule_work(&ctx->pp_done_work);
}
- } else
+ wake_up_all(&ctx->pp_waitq);
+ } else {
pr_err("%s: should not have pingpong interrupt!\n", __func__);
+ }
pr_debug("%s: ctl_num=%d intf_num=%d ctx=%d kcnt=%d\n", __func__,
ctl->num, ctl->intf_num, ctx->pp_num, atomic_read(&ctx->koff_cnt));
@@ -640,7 +648,6 @@ static int mdss_mdp_cmd_wait4pingpong(struct mdss_mdp_ctl *ctl, void *arg)
struct mdss_mdp_cmd_ctx *ctx;
struct mdss_panel_data *pdata;
unsigned long flags;
- int need_wait = 0;
int rc = 0;
ctx = (struct mdss_mdp_cmd_ctx *) ctl->priv_data;
@@ -651,11 +658,6 @@ static int mdss_mdp_cmd_wait4pingpong(struct mdss_mdp_ctl *ctl, void *arg)
pdata = ctl->panel_data;
- spin_lock_irqsave(&ctx->koff_lock, flags);
- if (atomic_read(&ctx->koff_cnt) > 0)
- need_wait = 1;
- spin_unlock_irqrestore(&ctx->koff_lock, flags);
-
ctl->roi_bkup.w = ctl->width;
ctl->roi_bkup.h = ctl->height;
@@ -663,68 +665,62 @@ static int mdss_mdp_cmd_wait4pingpong(struct mdss_mdp_ctl *ctl, void *arg)
ctx->rdptr_enabled, ctl->roi_bkup.w,
ctl->roi_bkup.h);
-#if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
- xlog(__func__, ctl->num, atomic_read(&ctx->koff_cnt), ctx->clk_enabled, ctx->rdptr_enabled, 0, 0);
-#endif
- pr_debug("%s: need_wait=%d intf_num=%d ctx=%p\n",
- __func__, need_wait, ctl->intf_num, ctx);
-
- if (need_wait) {
- rc = wait_for_completion_timeout(
- &ctx->pp_comp, msecs_to_jiffies(200));
-
- if (rc <= 0) {
- u32 status, mask;
-
- mask = BIT(MDSS_MDP_IRQ_PING_PONG_COMP + ctx->pp_num);
- status = mask & readl_relaxed(ctl->mdata->mdp_base +
- MDSS_MDP_REG_INTR_STATUS);
-
- if (status || pdata->panel_info.panel_dead) {
- if (status)
- WARN(1, "pp done but irq not triggered\n");
- else if( pdata->panel_info.panel_dead)
- WARN(1, "panel dead: refresh condition\n");
-
- mdss_mdp_irq_clear(ctl->mdata,
- MDSS_MDP_IRQ_PING_PONG_COMP,
- ctx->pp_num);
- local_irq_save(flags);
- mdss_mdp_cmd_pingpong_done(ctl);
- local_irq_restore(flags);
- rc = 1;
- }
-
- if (status)
- rc = try_wait_for_completion(&ctx->pp_comp);
+ pr_debug("%s: intf_num=%d ctx=%p koff_cnt=%d\n", __func__,
+ ctl->intf_num, ctx, atomic_read(&ctx->koff_cnt));
+
+ rc = wait_event_timeout(ctx->pp_waitq,
+ atomic_read(&ctx->koff_cnt) == 0,
+ KOFF_TIMEOUT);
+
+ trace_mdp_cmd_wait_pingpong(ctl->num,
+ atomic_read(&ctx->koff_cnt));
+
+ if (rc <= 0) {
+ u32 status, mask;
+
+ mask = BIT(MDSS_MDP_IRQ_PING_PONG_COMP + ctx->pp_num);
+ status = mask & readl_relaxed(ctl->mdata->mdp_base +
+ MDSS_MDP_REG_INTR_STATUS);
+ if (status) {
+ WARN(1, "pp done but irq not triggered\n");
+ mdss_mdp_irq_clear(ctl->mdata,
+ MDSS_MDP_IRQ_PING_PONG_COMP,
+ ctx->pp_num);
+ local_irq_save(flags);
+ mdss_mdp_cmd_pingpong_done(ctl);
+ local_irq_restore(flags);
+ rc = 1;
}
- if (rc <= 0) {
+ rc = atomic_read(&ctx->koff_cnt) == 0;
+ }
+
+ if (rc <= 0) {
+ if (!ctx->pp_timeout_report_cnt) {
WARN(1, "cmd kickoff timed out (%d) ctl=%d\n",
- rc, ctl->num);
- mdss_dsi_debug_check_te(pdata);
+ rc, ctl->num);
MDSS_XLOG_TOUT_HANDLER("mdp", "dsi0", "dsi1",
- "edp", "hdmi", "panic");
-#if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
- dumpreg();
- mdp5_dump_regs();
- mdss_mdp_debug_bus();
- xlog_dump();
- pr_err("mdp clk rate=%ld\n", mdss_mdp_get_clk_rate(MDSS_CLK_MDP_SRC));
- panic("Pingpong Timeout");
-#endif
- rc = -EPERM;
- mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_TIMEOUT);
- } else {
- rc = 0;
+ "edp", "hdmi", "panic");
}
+ ctx->pp_timeout_report_cnt++;
+ rc = -EPERM;
+ mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_TIMEOUT);
+ atomic_add_unless(&ctx->koff_cnt, -1, 0);
+ } else {
+ rc = 0;
+ ctx->pp_timeout_report_cnt = 0;
}
#if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
xlog(__func__,ctl->num, atomic_read(&ctx->koff_cnt), ctx->clk_enabled, ctx->rdptr_enabled, 0, rc);
#endif
+ /* signal any pending ping pong done events */
+ while (atomic_add_unless(&ctx->pp_done_cnt, -1, 0))
+ mdss_mdp_ctl_notify(ctx->ctl, MDP_NOTIFY_FRAME_DONE);
+
MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt), ctx->clk_enabled,
- ctx->rdptr_enabled, rc);
+ ctx->rdptr_enabled, rc);
+
return rc;
}
@@ -791,6 +787,38 @@ static int mdss_mdp_cmd_set_stream_size(struct mdss_mdp_ctl *ctl)
return rc;
}
+static int mdss_mdp_cmd_panel_on(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_ctl *sctl)
+{
+ struct mdss_mdp_cmd_ctx *ctx, *sctx = NULL;
+ int rc = 0;
+
+ ctx = (struct mdss_mdp_cmd_ctx *) ctl->priv_data;
+ if (!ctx) {
+ pr_err("invalid ctx\n");
+ return -ENODEV;
+ }
+
+ if (sctl)
+ sctx = (struct mdss_mdp_cmd_ctx *) sctl->priv_data;
+
+ if (!__mdss_mdp_cmd_is_panel_power_on_interactive(ctx)) {
+ rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_UNBLANK, NULL);
+ WARN(rc, "intf %d unblank error (%d)\n", ctl->intf_num, rc);
+
+ rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_PANEL_ON, NULL);
+ WARN(rc, "intf %d panel on error (%d)\n", ctl->intf_num, rc);
+
+ ctx->panel_power_state = MDSS_PANEL_POWER_ON;
+ if (sctx)
+ sctx->panel_power_state = MDSS_PANEL_POWER_ON;
+ } else {
+ pr_debug("%s: panel already on\n", __func__);
+ }
+
+ return rc;
+}
+
/*
* There are 3 partial update possibilities
* left only ==> enable left pingpong_done
@@ -809,8 +837,6 @@ int mdss_mdp_cmd_kickoff(struct mdss_mdp_ctl *ctl, void *arg)
{
struct mdss_mdp_ctl *sctl = NULL;
struct mdss_mdp_cmd_ctx *ctx, *sctx = NULL;
- unsigned long flags;
- int rc;
ATRACE_BEGIN(__func__);
ctx = (struct mdss_mdp_cmd_ctx *) ctl->priv_data;
@@ -843,32 +869,22 @@ int mdss_mdp_cmd_kickoff(struct mdss_mdp_ctl *ctl, void *arg)
PERF_HW_MDP_STATE, PERF_STATUS_BUSY);
}
- if (!__mdss_mdp_cmd_panel_power_on(ctx)) {
- rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_UNBLANK, NULL);
- WARN(rc, "intf %d unblank error (%d)\n", ctl->intf_num, rc);
-
- rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_PANEL_ON, NULL);
- WARN(rc, "intf %d panel on error (%d)\n", ctl->intf_num, rc);
-
- ctx->panel_power_state = MDSS_PANEL_POWER_ON;
- if (sctx)
- sctx->panel_power_state = MDSS_PANEL_POWER_ON;
- }
+ /*
+ * Turn on the panel, if not already. This is because the panel is
+ * turned on only when we send the first frame and not during cmd
+ * start. This is to ensure that no artifacts are seen on the panel.
+ */
+ if (__mdss_mdp_cmd_is_panel_power_off(ctx))
+ mdss_mdp_cmd_panel_on(ctl, sctl);
MDSS_XLOG(ctl->num, ctl->roi.x, ctl->roi.y, ctl->roi.w,
ctl->roi.h);
- spin_lock_irqsave(&ctx->koff_lock, flags);
atomic_inc(&ctx->koff_cnt);
- INIT_COMPLETION(ctx->pp_comp);
- if (sctx) {
+ if (sctx)
atomic_inc(&sctx->koff_cnt);
- INIT_COMPLETION(sctx->pp_comp);
- }
-#if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
- xlog(__func__, ctl->num, atomic_read(&ctx->koff_cnt), ctx->clk_enabled, ctx->rdptr_enabled, 0, 0);
-#endif
- spin_unlock_irqrestore(&ctx->koff_lock, flags);
+
+ trace_mdp_cmd_kickoff(ctl->num, atomic_read(&ctx->koff_cnt));
mdss_mdp_cmd_clk_on(ctx);
@@ -915,7 +931,7 @@ int mdss_mdp_cmd_restore(struct mdss_mdp_ctl *ctl)
{
pr_debug("%s: called for ctl%d\n", __func__, ctl->num);
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
- if (mdss_mdp_cmd_tearcheck_setup(ctl))
+ if (mdss_mdp_cmd_tearcheck_setup(ctl, true))
pr_warn("%s: tearcheck setup failed\n", __func__);
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
@@ -947,6 +963,12 @@ static void mdss_mdp_cmd_stop_sub(struct mdss_mdp_ctl *ctl,
if (ctx->panel_power_state == panel_power_state)
return;
+ /*
+ * If the panel will be left on, then we do not need to turn off
+ * interface clocks since we may continue to get display updates.
+ */
+ if (mdss_panel_is_power_on(panel_power_state))
+ return;
list_for_each_entry_safe(handle, tmp, &ctx->vsync_handlers, list)
mdss_mdp_cmd_remove_vsync_handler(ctl, handle);
@@ -1003,6 +1025,8 @@ static void mdss_mdp_cmd_stop_sub(struct mdss_mdp_ctl *ctl,
mdss_mdp_cmd_clk_off(ctx);
flush_work(&ctx->pp_done_work);
+ mdss_mdp_cmd_tearcheck_setup(ctl, false);
+
ctx->panel_power_state = panel_power_state;
}
@@ -1037,7 +1061,7 @@ int mdss_mdp_cmd_stop(struct mdss_mdp_ctl *ctl, int panel_power_state)
WARN(ret, "intf %d unblank error (%d)\n", ctl->intf_num, ret);
}
- if (panel_power_state != MDSS_PANEL_POWER_OFF) {
+ if (mdss_panel_is_power_on(panel_power_state)) {
pr_debug("%s: cmd_off with panel always on\n", __func__);
goto end;
}
@@ -1070,8 +1094,9 @@ end:
int mdss_mdp_cmd_start(struct mdss_mdp_ctl *ctl)
{
struct mdss_mdp_cmd_ctx *ctx;
+ struct mdss_mdp_ctl *sctl = NULL;
struct mdss_mdp_mixer *mixer;
- int i, ret;
+ int i, ret = 0;
pr_debug("%s:+\n", __func__);
@@ -1079,7 +1104,9 @@ int mdss_mdp_cmd_start(struct mdss_mdp_ctl *ctl)
return -EINVAL;
ctx = (struct mdss_mdp_cmd_ctx *) ctl->priv_data;
- if (ctx && (ctx->panel_power_state != MDSS_PANEL_POWER_OFF)) {
+
+ sctl = mdss_mdp_get_split_ctl(ctl);
+ if (ctx && mdss_panel_is_power_on(ctx->panel_power_state)) {
pr_debug("%s: cmd_start with panel always on\n",
__func__);
/*
@@ -1090,7 +1117,10 @@ int mdss_mdp_cmd_start(struct mdss_mdp_ctl *ctl)
* enable tearcheck logic.
*/
mdss_mdp_cmd_restore(ctl);
- return 0;
+
+ /* Turn on the panel so that it can exit low power mode */
+ ret = mdss_mdp_cmd_panel_on(ctl, sctl);
+ goto end;
}
mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_LEFT);
@@ -1122,7 +1152,8 @@ int mdss_mdp_cmd_start(struct mdss_mdp_ctl *ctl)
ctx->panel_ndx = ctl->panel_ndx;
#endif
ctx->pp_num = mixer->num;
- init_completion(&ctx->pp_comp);
+ ctx->pp_timeout_report_cnt = 0;
+ init_waitqueue_head(&ctx->pp_waitq);
init_completion(&ctx->stop_comp);
spin_lock_init(&ctx->clk_lock);
spin_lock_init(&ctx->koff_lock);
@@ -1153,8 +1184,7 @@ int mdss_mdp_cmd_start(struct mdss_mdp_ctl *ctl)
mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_PING_PONG_COMP, ctx->pp_num,
mdss_mdp_cmd_pingpong_done, ctl);
- ret = mdss_mdp_cmd_tearcheck_setup(ctl);
-
+ ret = mdss_mdp_cmd_tearcheck_setup(ctl, true);
if (ret) {
pr_err("tearcheck setup failed\n");
return ret;
@@ -1167,8 +1197,10 @@ int mdss_mdp_cmd_start(struct mdss_mdp_ctl *ctl)
ctl->remove_vsync_handler = mdss_mdp_cmd_remove_vsync_handler;
ctl->read_line_cnt_fnc = mdss_mdp_cmd_line_count;
ctl->restore_fnc = mdss_mdp_cmd_restore;
+
+end:
pr_debug("%s:-\n", __func__);
- return 0;
+ return ret;
}
diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
index 88d8fb83184..555cf66ac34 100644
--- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
+++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
@@ -382,6 +382,82 @@ static int __mdp_pipe_tune_perf(struct mdss_mdp_pipe *pipe,
return 0;
}
+static int __mdss_mdp_validate_pxl_extn(struct mdss_mdp_pipe *pipe)
+{
+ int plane;
+
+ for (plane = 0; plane < MAX_PLANES; plane++) {
+ u32 hor_req_pixels, hor_fetch_pixels;
+ u32 hor_ov_fetch, vert_ov_fetch;
+ u32 vert_req_pixels, vert_fetch_pixels;
+ u32 src_w = pipe->src.w >> pipe->horz_deci;
+ u32 src_h = pipe->src.h >> pipe->vert_deci;
+
+ /*
+ * plane 1 and 2 are for chroma and are same. While configuring
+ * HW, programming only one of the chroma components is
+ * sufficient.
+ */
+ if (plane == 2)
+ continue;
+
+ /*
+ * For chroma plane, width is half for the following sub sampled
+ * formats. Except in case of decimation, where hardware avoids
+ * 1 line of decimation instead of downsampling.
+ */
+ if (plane == 1 && !pipe->horz_deci &&
+ ((pipe->src_fmt->chroma_sample == MDSS_MDP_CHROMA_420) ||
+ (pipe->src_fmt->chroma_sample == MDSS_MDP_CHROMA_H2V1))) {
+ src_w >>= 1;
+ }
+
+ if (plane == 1 && !pipe->vert_deci &&
+ ((pipe->src_fmt->chroma_sample == MDSS_MDP_CHROMA_420) ||
+ (pipe->src_fmt->chroma_sample == MDSS_MDP_CHROMA_H1V2)))
+ src_h >>= 1;
+
+ hor_req_pixels = pipe->scale.roi_w[plane] +
+ pipe->scale.num_ext_pxls_left[plane] +
+ pipe->scale.num_ext_pxls_right[plane];
+
+ hor_fetch_pixels = src_w +
+ pipe->scale.left_ftch[plane] +
+ pipe->scale.left_rpt[plane] +
+ pipe->scale.right_ftch[plane] +
+ pipe->scale.right_rpt[plane];
+
+ hor_ov_fetch = src_w + pipe->scale.left_ftch[plane] +
+ pipe->scale.right_ftch[plane];
+
+ vert_req_pixels = pipe->scale.num_ext_pxls_top[plane] +
+ pipe->scale.num_ext_pxls_btm[plane];
+
+ vert_fetch_pixels = pipe->scale.top_ftch[plane] +
+ pipe->scale.top_rpt[plane] +
+ pipe->scale.btm_ftch[plane] +
+ pipe->scale.btm_rpt[plane];
+
+ vert_ov_fetch = src_h + pipe->scale.top_ftch[plane] +
+ pipe->scale.btm_ftch[plane];
+
+ if ((hor_req_pixels != hor_fetch_pixels) ||
+ (hor_ov_fetch > pipe->img_width) ||
+ (vert_req_pixels != vert_fetch_pixels) ||
+ (vert_ov_fetch > pipe->img_height)) {
+ pr_err("err: plane=%d h_req:%d h_fetch:%d v_req:%d v_fetch:%d src_img:[%d,%d]\n",
+ plane,
+ hor_req_pixels, hor_fetch_pixels,
+ vert_req_pixels, vert_fetch_pixels,
+ pipe->img_width, pipe->img_height);
+ pipe->scale.enable_pxl_ext = 0;
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int __mdss_mdp_overlay_setup_scaling(struct mdss_mdp_pipe *pipe)
{
u32 src;
@@ -389,8 +465,11 @@ static int __mdss_mdp_overlay_setup_scaling(struct mdss_mdp_pipe *pipe)
src = pipe->src.w >> pipe->horz_deci;
- if (pipe->scale.enable_pxl_ext)
- return 0;
+ if (pipe->scale.enable_pxl_ext) {
+ rc = __mdss_mdp_validate_pxl_extn(pipe);
+ return rc;
+ }
+
memset(&pipe->scale, 0, sizeof(struct mdp_scale_data));
rc = mdss_mdp_calc_phase_step(src, pipe->dst.w,
&pipe->scale.phase_step_x[0]);
@@ -410,10 +489,12 @@ static int __mdss_mdp_overlay_setup_scaling(struct mdss_mdp_pipe *pipe)
if ((rc == -EOVERFLOW) && (pipe->type == MDSS_MDP_PIPE_TYPE_VIG)) {
/* overflow on Qseed2 scaler is acceptable */
rc = 0;
+ } else if (rc == -EOVERFLOW) {
+ /* overflow expected and should fallback to GPU */
+ rc = -ECANCELED;
} else if (rc) {
pr_err("Vertical scaling calculation failed=%d! %d->%d\n",
rc, src, pipe->dst.h);
- return rc;
}
return rc;
}
@@ -833,7 +914,6 @@ int mdss_mdp_overlay_pipe_setup(struct msm_fb_data_type *mfd,
}
pipe->params_changed++;
- pipe->has_buf = 0;
req->vert_deci = pipe->vert_deci;
@@ -1082,7 +1162,8 @@ int mdss_mdp_overlay_start(struct msm_fb_data_type *mfd)
* If idle pc feature is not enabled, then get a reference to the
* runtime device which will be released when overlay is turned off
*/
- if (!mdp5_data->mdata->idle_pc_enabled) {
+ if (!mdp5_data->mdata->idle_pc_enabled ||
+ (mfd->panel_info->type != MIPI_CMD_PANEL)) {
rc = pm_runtime_get_sync(&mfd->pdev->dev);
if (IS_ERR_VALUE(rc)) {
pr_err("unable to resume with pm_runtime_get_sync rc=%d\n",
@@ -1216,6 +1297,11 @@ static int __overlay_queue_pipes(struct msm_fb_data_type *mfd)
} else {
pr_debug("no buf detected pnum=%d use solid fill\n",
pipe->num);
+ if ((pipe->flags & MDP_SOLID_FILL) == 0) {
+ pr_warn("commit without buffer on pipe %d\n",
+ pipe->num);
+ ret = -EINVAL;
+ }
buf = NULL;
}
@@ -1689,7 +1775,6 @@ static int mdss_mdp_overlay_queue(struct msm_fb_data_type *mfd,
ret = mdss_mdp_data_get(src_data, &req->data, 1, flags);
if (IS_ERR_VALUE(ret))
pr_err("src_data pmem error\n");
- pipe->has_buf = 1;
mdss_mdp_pipe_unmap(pipe);
@@ -1890,7 +1975,6 @@ static void mdss_mdp_overlay_pan_display(struct msm_fb_data_type *mfd)
buf->p[0].addr += offset;
buf->p[0].len = fbi->fix.smem_len - offset;
buf->num_planes = 1;
- pipe->has_buf = 1;
mdss_mdp_pipe_unmap(pipe);
if (fbi->var.xres > MAX_MIXER_WIDTH || mfd->split_display) {
@@ -1906,7 +1990,6 @@ static void mdss_mdp_overlay_pan_display(struct msm_fb_data_type *mfd)
}
pipe->back_buf = *buf;
- pipe->has_buf = 1;
mdss_mdp_pipe_unmap(pipe);
}
mutex_unlock(&mdp5_data->ov_lock);
@@ -2170,27 +2253,135 @@ static struct attribute_group mdp_overlay_sysfs_group = {
.attrs = mdp_overlay_sysfs_attrs,
};
+static void mdss_mdp_hw_cursor_setpos(struct mdss_mdp_mixer *mixer,
+ struct mdss_rect *roi, u32 start_x, u32 start_y)
+{
+ int roi_xy = (roi->y << 16) | roi->x;
+ int start_xy = (start_y << 16) | start_x;
+ int roi_size = (roi->h << 16) | roi->w;
+
+ if (!mixer) {
+ pr_err("mixer not available\n");
+ return;
+ }
+ mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_XY, roi_xy);
+ mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_START_XY, start_xy);
+ mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_SIZE, roi_size);
+}
+
+static void mdss_mdp_hw_cursor_setimage(struct mdss_mdp_mixer *mixer,
+ struct fb_cursor *cursor, u32 cursor_addr, struct mdss_rect *roi)
+{
+ int calpha_en, transp_en, alpha, size;
+ struct fb_image *img = &cursor->image;
+ u32 blendcfg;
+ int roi_size = 0;
+
+ if (!mixer) {
+ pr_err("mixer not available\n");
+ return;
+ }
+
+ if (img->bg_color == 0xffffffff)
+ transp_en = 0;
+ else
+ transp_en = 1;
+
+ alpha = (img->fg_color & 0xff000000) >> 24;
+
+ if (alpha)
+ calpha_en = 0x0; /* xrgb */
+ else
+ calpha_en = 0x2; /* argb */
+
+ roi_size = (roi->h << 16) | roi->w;
+ size = (img->height << 16) | img->width;
+ mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_IMG_SIZE, size);
+ mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_SIZE, roi_size);
+ mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_STRIDE,
+ img->width * 4);
+ mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_BASE_ADDR,
+ cursor_addr);
+ blendcfg = mdp_mixer_read(mixer, MDSS_MDP_REG_LM_CURSOR_BLEND_CONFIG);
+ blendcfg &= ~0x1;
+ blendcfg |= (transp_en << 3) | (calpha_en << 1);
+ mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_BLEND_CONFIG,
+ blendcfg);
+ if (calpha_en)
+ mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_BLEND_PARAM,
+ alpha);
+
+ if (transp_en) {
+ mdp_mixer_write(mixer,
+ MDSS_MDP_REG_LM_CURSOR_BLEND_TRANSP_LOW0,
+ ((img->bg_color & 0xff00) << 8) |
+ (img->bg_color & 0xff));
+ mdp_mixer_write(mixer,
+ MDSS_MDP_REG_LM_CURSOR_BLEND_TRANSP_LOW1,
+ ((img->bg_color & 0xff0000) >> 16));
+ mdp_mixer_write(mixer,
+ MDSS_MDP_REG_LM_CURSOR_BLEND_TRANSP_HIGH0,
+ ((img->bg_color & 0xff00) << 8) |
+ (img->bg_color & 0xff));
+ mdp_mixer_write(mixer,
+ MDSS_MDP_REG_LM_CURSOR_BLEND_TRANSP_HIGH1,
+ ((img->bg_color & 0xff0000) >> 16));
+ }
+}
+
+static void mdss_mdp_hw_cursor_blend_config(struct mdss_mdp_mixer *mixer,
+ struct fb_cursor *cursor)
+{
+ u32 blendcfg;
+ if (!mixer) {
+ pr_err("mixer not availbale\n");
+ return;
+ }
+
+ blendcfg = mdp_mixer_read(mixer, MDSS_MDP_REG_LM_CURSOR_BLEND_CONFIG);
+ if (!cursor->enable != !(blendcfg & 0x1)) {
+ if (cursor->enable) {
+ pr_debug("enable hw cursor on mixer=%d\n", mixer->num);
+ blendcfg |= 0x1;
+ } else {
+ pr_debug("disable hw cursor on mixer=%d\n", mixer->num);
+ blendcfg &= ~0x1;
+ }
+
+ mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_BLEND_CONFIG,
+ blendcfg);
+ mixer->cursor_enabled = cursor->enable;
+ mixer->params_changed++;
+ }
+
+}
+
static int mdss_mdp_hw_cursor_update(struct msm_fb_data_type *mfd,
struct fb_cursor *cursor)
{
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
- struct mdss_mdp_mixer *mixer;
+ struct mdss_mdp_mixer *mixer_left = NULL;
+ struct mdss_mdp_mixer *mixer_right = NULL;
struct fb_image *img = &cursor->image;
- u32 blendcfg;
+ struct fbcurpos cursor_hot;
+ struct mdss_rect roi;
int ret = 0;
u32 xres = mfd->fbi->var.xres;
u32 yres = mfd->fbi->var.yres;
u32 start_x = img->dx;
u32 start_y = img->dy;
- u32 roi_x = 0;
- u32 roi_y = 0;
- int roi_w = 0;
- int roi_h = 0;
- int roi_size = 0;
+ u32 left_lm_w = left_lm_w_from_mfd(mfd);
- mixer = mdss_mdp_mixer_get(mdp5_data->ctl, MDSS_MDP_MIXER_MUX_DEFAULT);
- if (!mixer)
+ mixer_left = mdss_mdp_mixer_get(mdp5_data->ctl,
+ MDSS_MDP_MIXER_MUX_DEFAULT);
+ if (!mixer_left)
return -ENODEV;
+ if (mfd->split_display) {
+ mixer_right = mdss_mdp_mixer_get(mdp5_data->ctl,
+ MDSS_MDP_MIXER_MUX_RIGHT);
+ if (!mixer_right)
+ return -ENODEV;
+ }
if (!mfd->cursor_buf && (cursor->set & FB_CUR_SETIMAGE)) {
mfd->cursor_buf = dma_alloc_coherent(NULL, MDSS_MDP_CURSOR_SIZE,
@@ -2213,9 +2404,6 @@ static int mdss_mdp_hw_cursor_update(struct msm_fb_data_type *mfd,
ret);
return ret;
}
-
- mixer->cursor_hotx = 0;
- mixer->cursor_hoty = 0;
}
if ((img->width > MDSS_MDP_CURSOR_WIDTH) ||
@@ -2223,17 +2411,15 @@ static int mdss_mdp_hw_cursor_update(struct msm_fb_data_type *mfd,
(img->depth != 32) || (start_x >= xres) || (start_y >= yres))
return -EINVAL;
- pr_debug("mixer=%d enable=%x set=%x\n", mixer->num, cursor->enable,
- cursor->set);
-
- mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
- blendcfg = mdp_mixer_read(mixer, MDSS_MDP_REG_LM_CURSOR_BLEND_CONFIG);
+ pr_debug("enable=%x set=%x\n", cursor->enable, cursor->set);
+ memset(&cursor_hot, 0, sizeof(struct fbcurpos));
+ memset(&roi, 0, sizeof(struct mdss_rect));
if (cursor->set & FB_CUR_SETHOT) {
if ((cursor->hot.x < img->width) &&
(cursor->hot.y < img->height)) {
- mixer->cursor_hotx = cursor->hot.x;
- mixer->cursor_hoty = cursor->hot.y;
+ cursor_hot.x = cursor->hot.x;
+ cursor_hot.y = cursor->hot.y;
/* Update cursor position */
cursor->set |= FB_CUR_SETPOS;
} else {
@@ -2242,33 +2428,25 @@ static int mdss_mdp_hw_cursor_update(struct msm_fb_data_type *mfd,
}
}
- if (start_x > mixer->cursor_hotx) {
- start_x -= mixer->cursor_hotx;
+ if (start_x > cursor_hot.x) {
+ start_x -= cursor_hot.x;
} else {
- roi_x = mixer->cursor_hotx - start_x;
+ roi.x = cursor_hot.x - start_x;
start_x = 0;
}
- if (start_y > mixer->cursor_hoty) {
- start_y -= mixer->cursor_hoty;
+ if (start_y > cursor_hot.y) {
+ start_y -= cursor_hot.y;
} else {
- roi_y = mixer->cursor_hoty - start_y;
+ roi.y = cursor_hot.y - start_y;
start_y = 0;
}
- roi_w = min(xres - start_x, img->width - roi_x);
- roi_h = min(yres - start_y, img->height - roi_y);
- roi_size = (roi_h << 16) | roi_w;
+ roi.w = min(xres - start_x, img->width - roi.x);
+ roi.h = min(yres - start_y, img->height - roi.y);
- if (cursor->set & FB_CUR_SETPOS) {
- mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_XY,
- (roi_y << 16) | roi_x);
- mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_START_XY,
- (start_y << 16) | start_x);
- mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_SIZE, roi_size);
- }
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
if (cursor->set & FB_CUR_SETIMAGE) {
- int calpha_en, transp_en, alpha, size;
u32 cursor_addr;
ret = copy_from_user(mfd->cursor_buf, img->data,
img->width * img->height * 4);
@@ -2288,78 +2466,49 @@ static int mdss_mdp_hw_cursor_update(struct msm_fb_data_type *mfd,
}
cursor_addr = mfd->cursor_buf_phys;
}
-
- if (img->bg_color == 0xffffffff)
- transp_en = 0;
- else
- transp_en = 1;
-
- alpha = (img->fg_color & 0xff000000) >> 24;
-
- if (alpha)
- calpha_en = 0x0; /* xrgb */
- else
- calpha_en = 0x2; /* argb */
-
- size = (img->height << 16) | img->width;
- mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_IMG_SIZE, size);
- mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_SIZE, roi_size);
- mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_STRIDE,
- img->width * 4);
- mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_BASE_ADDR,
- cursor_addr);
-
- wmb();
-
- blendcfg &= ~0x1;
- blendcfg |= (transp_en << 3) | (calpha_en << 1);
- mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_BLEND_CONFIG,
- blendcfg);
- if (calpha_en)
- mdp_mixer_write(mixer,
- MDSS_MDP_REG_LM_CURSOR_BLEND_PARAM,
- alpha);
-
- if (transp_en) {
- mdp_mixer_write(mixer,
- MDSS_MDP_REG_LM_CURSOR_BLEND_TRANSP_LOW0,
- ((img->bg_color & 0xff00) << 8) |
- (img->bg_color & 0xff));
- mdp_mixer_write(mixer,
- MDSS_MDP_REG_LM_CURSOR_BLEND_TRANSP_LOW1,
- ((img->bg_color & 0xff0000) >> 16));
- mdp_mixer_write(mixer,
- MDSS_MDP_REG_LM_CURSOR_BLEND_TRANSP_HIGH0,
- ((img->bg_color & 0xff00) << 8) |
- (img->bg_color & 0xff));
- mdp_mixer_write(mixer,
- MDSS_MDP_REG_LM_CURSOR_BLEND_TRANSP_HIGH1,
- ((img->bg_color & 0xff0000) >> 16));
- }
-
- mixer->cursor_hotx = 0;
- mixer->cursor_hoty = 0;
- }
-
- if (!cursor->enable != !(blendcfg & 0x1)) {
- if (cursor->enable) {
- pr_debug("enable hw cursor on mixer=%d\n", mixer->num);
- blendcfg |= 0x1;
- } else {
- pr_debug("disable hw cursor on mixer=%d\n", mixer->num);
- blendcfg &= ~0x1;
- }
-
- mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_BLEND_CONFIG,
- blendcfg);
-
- mixer->cursor_enabled = cursor->enable;
- mixer->params_changed++;
+ mdss_mdp_hw_cursor_setimage(mixer_left, cursor, cursor_addr,
+ &roi);
+ if (mfd->split_display)
+ mdss_mdp_hw_cursor_setimage(mixer_right, cursor,
+ cursor_addr, &roi);
+ }
+
+ if ((start_x + roi.w) <= left_lm_w) {
+ if (cursor->set & FB_CUR_SETPOS)
+ mdss_mdp_hw_cursor_setpos(mixer_left, &roi, start_x,
+ start_y);
+ mdss_mdp_hw_cursor_blend_config(mixer_left, cursor);
+ cursor->enable = false;
+ mdss_mdp_hw_cursor_blend_config(mixer_right, cursor);
+ } else if (start_x >= left_lm_w) {
+ start_x -= left_lm_w;
+ if (cursor->set & FB_CUR_SETPOS)
+ mdss_mdp_hw_cursor_setpos(mixer_right, &roi, start_x,
+ start_y);
+ mdss_mdp_hw_cursor_blend_config(mixer_right, cursor);
+ cursor->enable = false;
+ mdss_mdp_hw_cursor_blend_config(mixer_left, cursor);
+ } else {
+ struct mdss_rect roi_right = roi;
+ roi.w = left_lm_w - start_x;
+ if (cursor->set & FB_CUR_SETPOS)
+ mdss_mdp_hw_cursor_setpos(mixer_left, &roi, start_x,
+ start_y);
+ mdss_mdp_hw_cursor_blend_config(mixer_left, cursor);
+
+ roi_right.x = 0;
+ roi_right.w = (start_x + roi_right.w) - left_lm_w;
+ start_x = 0;
+ if (cursor->set & FB_CUR_SETPOS)
+ mdss_mdp_hw_cursor_setpos(mixer_right, &roi_right,
+ start_x, start_y);
+ mdss_mdp_hw_cursor_blend_config(mixer_right, cursor);
}
- mixer->ctl->flush_bits |= BIT(6) << mixer->num;
+ mixer_left->ctl->flush_bits |= BIT(6) << mixer_left->num;
+ if (mfd->split_display)
+ mixer_right->ctl->flush_bits |= BIT(6) << mixer_right->num;
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
-
return 0;
}
@@ -3182,8 +3331,10 @@ static int mdss_mdp_overlay_on(struct msm_fb_data_type *mfd)
rc = mdss_mdp_overlay_start(mfd);
if (rc)
goto end;
- if (mfd->panel_info->type != WRITEBACK_PANEL)
+ if (mfd->panel_info->type != WRITEBACK_PANEL) {
+ atomic_inc(&mfd->mdp_sync_pt_data.commit_cnt);
rc = mdss_mdp_overlay_kickoff(mfd, NULL);
+ }
} else {
rc = mdss_mdp_ctl_setup(mdp5_data->ctl);
if (rc)
@@ -3323,11 +3474,13 @@ ctl_stop:
&mdp5_data->mdata->active_intf_cnt) == 0)
mdss_mdp_rotator_release_all();
}
- if (!mdp5_data->mdata->idle_pc_enabled) {
+ if (!mdp5_data->mdata->idle_pc_enabled ||
+ (mfd->panel_info->type != MIPI_CMD_PANEL)) {
rc = pm_runtime_put(&mfd->pdev->dev);
if (rc)
pr_err("unable to suspend w/pm_runtime_put (%d)\n",
rc);
+
}
}
mutex_unlock(&mdp5_data->ov_lock);
diff --git a/drivers/video/msm/mdss/mdss_mdp_pipe.c b/drivers/video/msm/mdss/mdss_mdp_pipe.c
index 05b72fdc837..1b4bb0d0dd2 100644
--- a/drivers/video/msm/mdss/mdss_mdp_pipe.c
+++ b/drivers/video/msm/mdss/mdss_mdp_pipe.c
@@ -53,6 +53,18 @@ static inline u32 mdss_mdp_pipe_read(struct mdss_mdp_pipe *pipe, u32 reg)
return readl_relaxed(pipe->base + reg);
}
+static inline bool is_unused_smp_allowed(void)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ switch (MDSS_GET_MAJOR_MINOR(mdata->mdp_rev)) {
+ case MDSS_GET_MAJOR_MINOR(MDSS_MDP_HW_REV_103):
+ return true;
+ default:
+ return false;
+ }
+}
+
static u32 mdss_mdp_smp_mmb_reserve(struct mdss_mdp_pipe_smp_map *smp_map,
size_t n)
{
@@ -72,7 +84,7 @@ static u32 mdss_mdp_smp_mmb_reserve(struct mdss_mdp_pipe_smp_map *smp_map,
* that calls for change in smp configuration (addition/removal
* of smp blocks), so that fallback solution happens.
*/
- if (i != 0 && n != i) {
+ if (i != 0 && (((n < i) && !is_unused_smp_allowed()) || (n > i))) {
pr_debug("Can't change mmb config, num_blks: %d alloc: %d\n",
n, i);
return 0;
@@ -892,6 +904,7 @@ int mdss_mdp_pipe_fetch_halt(struct mdss_mdp_pipe *pipe)
mdata->vbif_base + MMSS_VBIF_XIN_HALT_CTRL0);
if (sw_reset_avail) {
+ reg_val = readl_relaxed(mdata->mdp_base + sw_reset_off);
writel_relaxed(reg_val & ~BIT(pipe->sw_reset.bit_off),
mdata->mdp_base + sw_reset_off);
wmb();
@@ -1324,7 +1337,7 @@ int mdss_mdp_pipe_queue_data(struct mdss_mdp_pipe *pipe,
((pipe->type == MDSS_MDP_PIPE_TYPE_DMA) &&
(pipe->mixer_left->type == MDSS_MDP_MIXER_TYPE_WRITEBACK) &&
(ctl->mdata->mixer_switched)) || ctl->roi_changed;
- if ((!(pipe->flags & MDP_VPU_PIPE) && src_data == NULL) ||
+ if ((!(pipe->flags & MDP_VPU_PIPE) && (src_data == NULL)) ||
(pipe->flags & MDP_SOLID_FILL)) {
pipe->params_changed = 0;
mdss_mdp_pipe_solidfill_setup(pipe);
@@ -1358,10 +1371,9 @@ int mdss_mdp_pipe_queue_data(struct mdss_mdp_pipe *pipe,
opmode);
}
- if ((pipe->flags & MDP_VPU_PIPE) && (src_data == NULL ||
- !pipe->has_buf)) {
- pr_debug("%s src_data=%p has_buf=%d pipe num=%dx",
- __func__, src_data, pipe->has_buf, pipe->num);
+ if ((pipe->flags & MDP_VPU_PIPE) && (src_data == NULL)) {
+ pr_debug("%s src_data=%p pipe num=%dx",
+ __func__, src_data, pipe->num);
goto update_nobuf;
}
diff --git a/drivers/video/msm/mdss/mdss_mdp_rotator.c b/drivers/video/msm/mdss/mdss_mdp_rotator.c
index 9abe12a3014..a3163ccf8d1 100644
--- a/drivers/video/msm/mdss/mdss_mdp_rotator.c
+++ b/drivers/video/msm/mdss/mdss_mdp_rotator.c
@@ -473,19 +473,21 @@ int mdss_mdp_rotator_setup(struct msm_fb_data_type *mfd,
list_add(&rot->list, &mdp5_data->rot_proc_list);
} else if (req->id & MDSS_MDP_ROT_SESSION_MASK) {
rot = mdss_mdp_rotator_session_get(req->id);
-
if (!rot) {
pr_err("rotator session=%x not found\n", req->id);
ret = -ENODEV;
goto rot_err;
}
- if (rot->format != fmt->format)
- format_changed = true;
+
if (work_pending(&rot->commit_work)) {
mutex_unlock(&rotator_lock);
flush_work(&rot->commit_work);
mutex_lock(&rotator_lock);
}
+
+ if (rot->format != fmt->format)
+ format_changed = true;
+
} else {
pr_err("invalid rotator session id=%x\n", req->id);
ret = -EINVAL;
@@ -588,6 +590,7 @@ static int mdss_mdp_rotator_finish(struct mdss_mdp_rotator_session *rot)
cancel_work_sync(&rot->commit_work);
mutex_lock(&rotator_lock);
}
+
mdss_mdp_rotator_busy_wait(rot);
list_del(&rot->head);
}
diff --git a/drivers/video/msm/mdss/mdss_mdp_splash_logo.c b/drivers/video/msm/mdss/mdss_mdp_splash_logo.c
index a2c8b612993..ce2cb977a52 100644
--- a/drivers/video/msm/mdss/mdss_mdp_splash_logo.c
+++ b/drivers/video/msm/mdss/mdss_mdp_splash_logo.c
@@ -288,7 +288,6 @@ static struct mdss_mdp_pipe *mdss_mdp_splash_get_pipe(
buf->p[0].addr = mfd->splash_info.iova;
buf->p[0].len = image_size;
buf->num_planes = 1;
- pipe->has_buf = 1;
mdss_mdp_pipe_unmap(pipe);
return pipe;
diff --git a/drivers/video/msm/mdss/mdss_mdp_wb.c b/drivers/video/msm/mdss/mdss_mdp_wb.c
index cea2fb5f3c1..d081c2e3681 100644
--- a/drivers/video/msm/mdss/mdss_mdp_wb.c
+++ b/drivers/video/msm/mdss/mdss_mdp_wb.c
@@ -416,11 +416,28 @@ static struct mdss_mdp_wb_data *get_user_node(struct msm_fb_data_type *mfd,
int ret;
if (!list_empty(&wb->register_queue)) {
+ struct ion_client *iclient = mdss_get_ionclient();
+ struct ion_handle *ihdl;
+
+ if (!iclient) {
+ pr_err("iclient is NULL\n");
+ return NULL;
+ }
+
+ ihdl = ion_import_dma_buf(iclient, data->memory_id);
+ if (IS_ERR_OR_NULL(ihdl)) {
+ pr_err("unable to import fd %d\n", data->memory_id);
+ return NULL;
+ }
+ /* only interested in ptr address, so we can free handle */
+ ion_free(iclient, ihdl);
+
list_for_each_entry(node, &wb->register_queue, registered_entry)
- if ((node->buf_info.memory_id == data->memory_id) &&
+ if ((node->buf_data.p[0].srcp_ihdl == ihdl) &&
(node->buf_info.offset == data->offset)) {
- pr_debug("found node fd=%x off=%x addr=%pa\n",
- data->memory_id, data->offset,
+ pr_debug("found fd=%d hdl=%p off=%x addr=%pa\n",
+ data->memory_id, ihdl,
+ data->offset,
&node->buf_data.p[0].addr);
return node;
}
@@ -481,8 +498,10 @@ static void mdss_mdp_wb_free_node(struct mdss_mdp_wb_data *node)
if (node->user_alloc) {
buf = &node->buf_data.p[0];
- pr_debug("free user node mem_id=%d offset=%u addr=0x%pa\n",
+
+ pr_debug("free user mem_id=%d ihdl=%p, offset=%u addr=0x%pa\n",
node->buf_info.memory_id,
+ buf->srcp_ihdl,
node->buf_info.offset,
&buf->addr);
@@ -563,6 +582,7 @@ static int mdss_mdp_wb_dequeue(struct msm_fb_data_type *mfd,
{
struct mdss_mdp_wb *wb = mfd_to_wb(mfd);
struct mdss_mdp_wb_data *node = NULL;
+ struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
int ret;
if (!wb) {
@@ -570,6 +590,11 @@ static int mdss_mdp_wb_dequeue(struct msm_fb_data_type *mfd,
return -ENODEV;
}
+ if (!ctl) {
+ pr_err("unable to dequeue, ctl is not initialized\n");
+ return -ENODEV;
+ }
+
ret = wait_event_interruptible(wb->wait_q, is_buffer_ready(wb));
if (ret) {
pr_err("failed to get dequeued buffer\n");
@@ -579,6 +604,7 @@ static int mdss_mdp_wb_dequeue(struct msm_fb_data_type *mfd,
mutex_lock(&wb->lock);
if (wb->state == WB_STOPING) {
pr_debug("wfd stopped\n");
+ mdss_mdp_display_wait4comp(ctl);
wb->state = WB_STOP;
ret = -ENOBUFS;
} else if (!list_empty(&wb->busy_queue)) {
diff --git a/include/linux/msm_kgsl.h b/include/linux/msm_kgsl.h
index b3e09042959..437bf5c2250 100644
--- a/include/linux/msm_kgsl.h
+++ b/include/linux/msm_kgsl.h
@@ -83,7 +83,8 @@ struct kgsl_pwrlevel {
* @csdev: Pointer to the coresight device for this device
* @coresight_pdata: Coresight configuration for specific device
* @chipid: Chip ID for the device's GPU
- * @pm_qos_latency: latency value for cpu
+ * @pm_qos_active_latency: GPU PM QoS latency request for active state
+ * @pm_qos_wakeup_latency: GPU PM QoS latency request during wakeup
*/
struct kgsl_device_platform_data {
struct kgsl_pwrlevel pwrlevel[KGSL_MAX_PWRLEVELS];
@@ -100,7 +101,8 @@ struct kgsl_device_platform_data {
struct coresight_device *csdev;
struct coresight_platform_data *coresight_pdata;
unsigned int chipid;
- unsigned int pm_qos_latency;
+ unsigned int pm_qos_active_latency;
+ unsigned int pm_qos_wakeup_latency;
};
#ifdef CONFIG_MSM_KGSL_DRM