aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/msm/kgsl_iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/msm/kgsl_iommu.c')
-rw-r--r--drivers/gpu/msm/kgsl_iommu.c133
1 files changed, 66 insertions, 67 deletions
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index a791d67c8e0..5454498f6db 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -457,7 +457,7 @@ done:
* Disables iommu clocks
* Return - void
*/
-static void kgsl_iommu_disable_clk(struct kgsl_mmu *mmu)
+static void kgsl_iommu_disable_clk(struct kgsl_mmu *mmu, int ctx_id)
{
struct kgsl_iommu *iommu = mmu->priv;
struct msm_iommu_drvdata *iommu_drvdata;
@@ -466,8 +466,15 @@ static void kgsl_iommu_disable_clk(struct kgsl_mmu *mmu)
for (i = 0; i < iommu->unit_count; i++) {
struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
for (j = 0; j < iommu_unit->dev_count; j++) {
- if (!iommu_unit->dev[j].clk_enabled)
+ if (ctx_id != iommu_unit->dev[j].ctx_id)
continue;
+ atomic_dec(&iommu_unit->dev[j].clk_enable_count);
+ BUG_ON(
+ atomic_read(&iommu_unit->dev[j].clk_enable_count) < 0);
+ /*
+ * the clock calls have a refcount so call them on every
+ * enable/disable call
+ */
iommu_drvdata = dev_get_drvdata(
iommu_unit->dev[j].dev->parent);
if (iommu_drvdata->aclk)
@@ -475,7 +482,6 @@ static void kgsl_iommu_disable_clk(struct kgsl_mmu *mmu)
if (iommu_drvdata->clk)
clk_disable_unprepare(iommu_drvdata->clk);
clk_disable_unprepare(iommu_drvdata->pclk);
- iommu_unit->dev[j].clk_enabled = false;
}
}
}
@@ -496,32 +502,14 @@ static void kgsl_iommu_clk_disable_event(struct kgsl_device *device, void *data,
unsigned int id, unsigned int ts,
u32 type)
{
- struct kgsl_mmu *mmu = data;
- struct kgsl_iommu *iommu = mmu->priv;
-
- if (!iommu->clk_event_queued) {
- if (0 > timestamp_cmp(ts, iommu->iommu_last_cmd_ts))
- KGSL_DRV_ERR(device,
- "IOMMU disable clock event being cancelled, "
- "iommu_last_cmd_ts: %x, retired ts: %x\n",
- iommu->iommu_last_cmd_ts, ts);
- return;
- }
+ struct kgsl_iommu_disable_clk_param *param = data;
- if (0 <= timestamp_cmp(ts, iommu->iommu_last_cmd_ts)) {
- kgsl_iommu_disable_clk(mmu);
- iommu->clk_event_queued = false;
- } else {
- /* add new event to fire when ts is reached, this can happen
- * if we queued an event and someone requested the clocks to
- * be disbaled on a later timestamp */
- if (kgsl_add_event(device, id, iommu->iommu_last_cmd_ts,
- kgsl_iommu_clk_disable_event, mmu, mmu)) {
- KGSL_DRV_ERR(device,
- "Failed to add IOMMU disable clk event\n");
- iommu->clk_event_queued = false;
- }
- }
+ if ((0 <= timestamp_cmp(ts, param->ts)) ||
+ (KGSL_EVENT_CANCELLED == type))
+ kgsl_iommu_disable_clk(param->mmu, param->ctx_id);
+ else
+ /* something went wrong with the event handling mechanism */
+ BUG_ON(1);
}
/*
@@ -531,6 +519,8 @@ static void kgsl_iommu_clk_disable_event(struct kgsl_device *device, void *data,
* @ts_valid - Indicates whether ts parameter is valid, if this parameter
* is false then it means that the calling function wants to disable the
* IOMMU clocks immediately without waiting for any timestamp
+ * @ctx_id: Context id of the IOMMU context for which clocks are to be
+ * turned off
*
* Creates an event to disable the IOMMU clocks on timestamp and if event
* already exists then updates the timestamp of disabling the IOMMU clocks
@@ -539,28 +529,25 @@ static void kgsl_iommu_clk_disable_event(struct kgsl_device *device, void *data,
* Return - void
*/
static void
-kgsl_iommu_disable_clk_on_ts(struct kgsl_mmu *mmu, unsigned int ts,
- bool ts_valid)
+kgsl_iommu_disable_clk_on_ts(struct kgsl_mmu *mmu,
+ unsigned int ts, int ctx_id)
{
- struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu_disable_clk_param *param;
- if (iommu->clk_event_queued) {
- if (ts_valid && (0 <
- timestamp_cmp(ts, iommu->iommu_last_cmd_ts)))
- iommu->iommu_last_cmd_ts = ts;
- } else {
- if (ts_valid) {
- iommu->iommu_last_cmd_ts = ts;
- iommu->clk_event_queued = true;
- if (kgsl_add_event(mmu->device, KGSL_MEMSTORE_GLOBAL,
- ts, kgsl_iommu_clk_disable_event, mmu, mmu)) {
- KGSL_DRV_ERR(mmu->device,
- "Failed to add IOMMU disable clk event\n");
- iommu->clk_event_queued = false;
- }
- } else {
- kgsl_iommu_disable_clk(mmu);
- }
+ param = kzalloc(sizeof(*param), GFP_KERNEL);
+ if (!param) {
+ KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*param));
+ return;
+ }
+ param->mmu = mmu;
+ param->ctx_id = ctx_id;
+ param->ts = ts;
+
+ if (kgsl_add_event(mmu->device, KGSL_MEMSTORE_GLOBAL,
+ ts, kgsl_iommu_clk_disable_event, param, mmu)) {
+ KGSL_DRV_ERR(mmu->device,
+ "Failed to add IOMMU disable clk event\n");
+ kfree(param);
}
}
@@ -583,8 +570,7 @@ static int kgsl_iommu_enable_clk(struct kgsl_mmu *mmu,
for (i = 0; i < iommu->unit_count; i++) {
struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
for (j = 0; j < iommu_unit->dev_count; j++) {
- if (iommu_unit->dev[j].clk_enabled ||
- ctx_id != iommu_unit->dev[j].ctx_id)
+ if (ctx_id != iommu_unit->dev[j].ctx_id)
continue;
iommu_drvdata =
dev_get_drvdata(iommu_unit->dev[j].dev->parent);
@@ -610,12 +596,25 @@ static int kgsl_iommu_enable_clk(struct kgsl_mmu *mmu,
goto done;
}
}
- iommu_unit->dev[j].clk_enabled = true;
+ atomic_inc(&iommu_unit->dev[j].clk_enable_count);
}
}
done:
- if (ret)
- kgsl_iommu_disable_clk(mmu);
+ if (ret) {
+ struct kgsl_iommu_unit *iommu_unit;
+ if (iommu->unit_count == i)
+ i--;
+ iommu_unit = &iommu->iommu_units[i];
+ do {
+ for (j--; j >= 0; j--)
+ kgsl_iommu_disable_clk(mmu, ctx_id);
+ i--;
+ if (i >= 0) {
+ iommu_unit = &iommu->iommu_units[i];
+ j = iommu_unit->dev_count;
+ }
+ } while (i >= 0);
+ }
return ret;
}
@@ -842,6 +841,9 @@ static int _get_iommu_ctxs(struct kgsl_mmu *mmu,
ret = -EINVAL;
goto done;
}
+ atomic_set(
+ &(iommu_unit->dev[iommu_unit->dev_count].clk_enable_count),
+ 0);
iommu_unit->dev[iommu_unit->dev_count].dev =
msm_iommu_get_ctx(data->iommu_ctxs[i].iommu_ctx_name);
@@ -1625,6 +1627,7 @@ static int kgsl_iommu_start(struct kgsl_mmu *mmu)
}
status = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_PRIV);
if (status) {
+ kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
KGSL_CORE_ERR("clk enable failed\n");
goto done;
}
@@ -1670,14 +1673,11 @@ static int kgsl_iommu_start(struct kgsl_mmu *mmu)
KGSL_IOMMU_SETSTATE_NOP_OFFSET,
cp_nop_packet(1), sizeof(unsigned int));
- kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
+ kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
+ kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_PRIV);
mmu->flags |= KGSL_FLAGS_STARTED;
done:
- if (status) {
- kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
- kgsl_detach_pagetable_iommu_domain(mmu);
- }
return status;
}
@@ -1800,6 +1800,7 @@ static void kgsl_iommu_pagefault_resume(struct kgsl_mmu *mmu)
iommu_unit,
iommu_unit->dev[j].ctx_id,
FSR, 0);
+ kgsl_iommu_disable_clk(mmu, j);
_iommu_unlock(iommu);
iommu_unit->dev[j].fault = 0;
}
@@ -1812,7 +1813,6 @@ static void kgsl_iommu_pagefault_resume(struct kgsl_mmu *mmu)
static void kgsl_iommu_stop(struct kgsl_mmu *mmu)
{
- struct kgsl_iommu *iommu = mmu->priv;
/*
* stop device mmu
*
@@ -1828,9 +1828,7 @@ static void kgsl_iommu_stop(struct kgsl_mmu *mmu)
kgsl_iommu_pagefault_resume(mmu);
}
/* switch off MMU clocks and cancel any events it has queued */
- iommu->clk_event_queued = false;
kgsl_cancel_events(mmu->device, mmu);
- kgsl_iommu_disable_clk(mmu);
}
static int kgsl_iommu_close(struct kgsl_mmu *mmu)
@@ -1883,7 +1881,7 @@ kgsl_iommu_get_current_ptbase(struct kgsl_mmu *mmu)
pt_base = KGSL_IOMMU_GET_CTX_REG_TTBR0(iommu,
(&iommu->iommu_units[0]),
KGSL_IOMMU_CONTEXT_USER);
- kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
+ kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
return pt_base & KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
}
@@ -1911,7 +1909,6 @@ static int kgsl_iommu_default_setstate(struct kgsl_mmu *mmu,
phys_addr_t pt_val;
ret = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
-
if (ret) {
KGSL_DRV_ERR(mmu->device, "Failed to enable iommu clocks\n");
return ret;
@@ -1992,7 +1989,7 @@ unlock:
_iommu_unlock(iommu);
/* Disable smmu clock */
- kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
+ kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
return ret;
}
@@ -2072,13 +2069,14 @@ static int kgsl_iommu_set_pf_policy(struct kgsl_mmu *mmu,
if (ret) {
KGSL_DRV_ERR(mmu->device, "Failed to enable iommu clocks\n");
- kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
+ kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
return ret;
}
/* Need to idle device before changing options */
ret = mmu->device->ftbl->idle(mmu->device);
if (ret) {
- kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
+ kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
+ kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_PRIV);
return ret;
}
@@ -2101,7 +2099,8 @@ static int kgsl_iommu_set_pf_policy(struct kgsl_mmu *mmu,
SCTLR, sctlr_val);
}
}
- kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
+ kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
+ kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_PRIV);
return ret;
}