diff options
| author | Tim Murray <timmurray@google.com> | 2016-07-25 19:21:17 -0700 |
|---|---|---|
| committer | Arvin Quilao <arquilao@gmail.com> | 2017-04-17 02:08:56 +0000 |
| commit | 9182f6aa815948d60ff6bbd905b09c9ccbed670b (patch) | |
| tree | 0ef4c78d433c63a13d7b91e19d4e99d73cc15288 | |
| parent | 2a70ff1c02c7830bd6f14ebbd8052b1f326fbaae (diff) | |
kgsl: convert some workqueues to use kthreads
adreno_dispatch_work and _kgsl_event_worker are both low-latency
low-runtime functions that are in the critical path of GPU
rendering. Moving them out of workqueues and into a dedicated FIFO
kthread avoids significant jitter.
bug 30342017
Change-Id: I5feb4e829064d422b4b9af2acb449afd1f981899
| -rw-r--r-- | drivers/gpu/msm/adreno_dispatch.c | 6 | ||||
| -rw-r--r-- | drivers/gpu/msm/adreno_dispatch.h | 2 | ||||
| -rw-r--r-- | drivers/gpu/msm/kgsl.c | 14 | ||||
| -rw-r--r-- | drivers/gpu/msm/kgsl.h | 5 | ||||
| -rw-r--r-- | drivers/gpu/msm/kgsl_events.c | 8 |
5 files changed, 26 insertions, 9 deletions
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c index 9e526fb329a..0c3134dcd86 100644 --- a/drivers/gpu/msm/adreno_dispatch.c +++ b/drivers/gpu/msm/adreno_dispatch.c @@ -1879,7 +1879,7 @@ static int adreno_dispatch_process_cmdqueue(struct adreno_device *adreno_dev, * * Process expired commands and send new ones. */ -static void adreno_dispatcher_work(struct work_struct *work) +static void adreno_dispatcher_work(struct kthread_work *work) { struct adreno_dispatcher *dispatcher = container_of(work, struct adreno_dispatcher, work); @@ -1954,7 +1954,7 @@ void adreno_dispatcher_schedule(struct kgsl_device *device) struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher; - queue_work(device->work_queue, &dispatcher->work); + queue_kthread_work(&kgsl_driver.worker, &dispatcher->work); } /** @@ -2249,7 +2249,7 @@ int adreno_dispatcher_init(struct adreno_device *adreno_dev) if (adreno_is_a304(adreno_dev)) _fault_timer_interval = 400; - INIT_WORK(&dispatcher->work, adreno_dispatcher_work); + init_kthread_work(&dispatcher->work, adreno_dispatcher_work); init_completion(&dispatcher->idle_gate); complete_all(&dispatcher->idle_gate); diff --git a/drivers/gpu/msm/adreno_dispatch.h b/drivers/gpu/msm/adreno_dispatch.h index c2721ad0c88..d7637146fe9 100644 --- a/drivers/gpu/msm/adreno_dispatch.h +++ b/drivers/gpu/msm/adreno_dispatch.h @@ -72,7 +72,7 @@ struct adreno_dispatcher { atomic_t fault; struct plist_head pending; spinlock_t plist_lock; - struct work_struct work; + struct kthread_work work; struct kobject kobj; struct completion idle_gate; }; diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index 2a92bf5a951..52b246687eb 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -4816,6 +4816,8 @@ static void kgsl_core_exit(void) static int __init kgsl_core_init(void) { int result = 0; + struct sched_param param = { .sched_priority = 2 }; + /* alloc major and minor device numbers */ result = alloc_chrdev_region(&kgsl_driver.major, 0, KGSL_DEVICE_MAX, "kgsl"); @@ -4877,6 +4879,18 @@ static int __init kgsl_core_init(void) kgsl_mmu_set_mmutype(ksgl_mmu_type); + init_kthread_worker(&kgsl_driver.worker); + + kgsl_driver.worker_thread = kthread_run(kthread_worker_fn, + &kgsl_driver.worker, "kgsl_worker_thread"); + + if (IS_ERR(kgsl_driver.worker_thread)) { + pr_err("unable to start kgsl thread\n"); + goto err; + } + + sched_setscheduler(kgsl_driver.worker_thread, SCHED_FIFO, ¶m); + kgsl_events_init(); /* create the memobjs kmem cache */ diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h index 661119b8c1c..f0b66b7f496 100644 --- a/drivers/gpu/msm/kgsl.h +++ b/drivers/gpu/msm/kgsl.h @@ -24,6 +24,7 @@ #include <linux/cdev.h> #include <linux/regulator/consumer.h> #include <linux/mm.h> +#include <linux/kthread.h> /* The number of memstore arrays limits the number of contexts allowed. * If more contexts are needed, update multiple for MEMSTORE_SIZE @@ -94,6 +95,8 @@ struct kgsl_driver { unsigned int mapped_max; } stats; unsigned int full_cache_threshold; + struct kthread_worker worker; + struct task_struct *worker_thread; }; extern struct kgsl_driver kgsl_driver; @@ -220,7 +223,7 @@ struct kgsl_event { void *priv; struct list_head node; unsigned int created; - struct work_struct work; + struct kthread_work work; int result; struct kgsl_event_group *group; }; diff --git a/drivers/gpu/msm/kgsl_events.c b/drivers/gpu/msm/kgsl_events.c index 7f2f21e0d62..90ff200184d 100644 --- a/drivers/gpu/msm/kgsl_events.c +++ b/drivers/gpu/msm/kgsl_events.c @@ -32,7 +32,7 @@ static inline void signal_event(struct kgsl_device *device, { list_del(&event->node); event->result = result; - queue_work(device->events_wq, &event->work); + queue_kthread_work(&kgsl_driver.worker, &event->work); } /** @@ -42,7 +42,7 @@ static inline void signal_event(struct kgsl_device *device, * Each event callback has its own work struct and is run on a event specific * workqeuue. This is the worker that queues up the event callback function. */ -static void _kgsl_event_worker(struct work_struct *work) +static void _kgsl_event_worker(struct kthread_work *work) { struct kgsl_event *event = container_of(work, struct kgsl_event, work); int id = KGSL_CONTEXT_ID(event->context); @@ -238,7 +238,7 @@ int kgsl_add_event(struct kgsl_device *device, struct kgsl_event_group *group, event->created = jiffies; event->group = group; - INIT_WORK(&event->work, _kgsl_event_worker); + init_kthread_work(&event->work, _kgsl_event_worker); trace_kgsl_register_event(KGSL_CONTEXT_ID(context), timestamp, func); @@ -253,7 +253,7 @@ int kgsl_add_event(struct kgsl_device *device, struct kgsl_event_group *group, if (timestamp_cmp(retired, timestamp) >= 0) { event->result = KGSL_EVENT_RETIRED; - queue_work(device->events_wq, &event->work); + queue_kthread_work(&kgsl_driver.worker, &event->work); spin_unlock(&group->lock); return 0; } |
