/* * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) "cpu-boost: " fmt #include #include #include #include #include #include #include #include #include #include #include #include struct cpu_sync { struct delayed_work boost_rem; struct delayed_work input_boost_rem; int cpu; spinlock_t lock; bool pending; int src_cpu; unsigned int boost_min; unsigned int input_boost_min; }; static DEFINE_PER_CPU(struct cpu_sync, sync_info); static DEFINE_PER_CPU(struct task_struct *, thread); static struct workqueue_struct *cpu_boost_wq; static struct work_struct input_boost_work; static unsigned int boost_ms; module_param(boost_ms, uint, 0644); static unsigned int sync_threshold; module_param(sync_threshold, uint, 0644); static unsigned int input_boost_freq; module_param(input_boost_freq, uint, 0644); static unsigned int input_boost_ms = 40; module_param(input_boost_ms, uint, 0644); static u64 last_input_time; #define MIN_INPUT_INTERVAL (150 * USEC_PER_MSEC) /* * The CPUFREQ_ADJUST notifier is used to override the current policy min to * make sure policy min >= boost_min. The cpufreq framework then does the job * of enforcing the new policy. */ static int boost_adjust_notify(struct notifier_block *nb, unsigned long val, void *data) { struct cpufreq_policy *policy = data; unsigned int cpu = policy->cpu; struct cpu_sync *s = &per_cpu(sync_info, cpu); unsigned int b_min = s->boost_min; unsigned int ib_min = s->input_boost_min; unsigned int min; if (val != CPUFREQ_ADJUST) return NOTIFY_OK; if (!b_min && !ib_min) return NOTIFY_OK; min = max(b_min, ib_min); pr_debug("CPU%u policy min before boost: %u kHz\n", cpu, policy->min); pr_debug("CPU%u boost min: %u kHz\n", cpu, min); cpufreq_verify_within_limits(policy, min, UINT_MAX); pr_debug("CPU%u policy min after boost: %u kHz\n", cpu, policy->min); return NOTIFY_OK; } static struct notifier_block boost_adjust_nb = { .notifier_call = boost_adjust_notify, }; static void do_boost_rem(struct work_struct *work) { struct cpu_sync *s = container_of(work, struct cpu_sync, boost_rem.work); pr_debug("Removing boost for CPU%d\n", s->cpu); s->boost_min = 0; /* Force policy re-evaluation to trigger adjust notifier. */ cpufreq_update_policy(s->cpu); } static void do_input_boost_rem(struct work_struct *work) { struct cpu_sync *s = container_of(work, struct cpu_sync, input_boost_rem.work); pr_debug("Removing input boost for CPU%d\n", s->cpu); s->input_boost_min = 0; /* Force policy re-evaluation to trigger adjust notifier. */ cpufreq_update_policy(s->cpu); } static int boost_migration_should_run(unsigned int cpu) { struct cpu_sync *s = &per_cpu(sync_info, cpu); return s->pending; } static void run_boost_migration(unsigned int cpu) { int dest_cpu = cpu; int src_cpu, ret; struct cpu_sync *s = &per_cpu(sync_info, dest_cpu); struct cpufreq_policy dest_policy; struct cpufreq_policy src_policy; unsigned long flags; spin_lock_irqsave(&s->lock, flags); s->pending = false; src_cpu = s->src_cpu; spin_unlock_irqrestore(&s->lock, flags); ret = cpufreq_get_policy(&src_policy, src_cpu); if (ret) return; ret = cpufreq_get_policy(&dest_policy, dest_cpu); if (ret) return; if (src_policy.min == src_policy.cpuinfo.min_freq) { pr_debug("No sync. Source CPU%d@%dKHz at min freq\n", src_cpu, src_policy.cur); return; } cancel_delayed_work_sync(&s->boost_rem); if (sync_threshold) s->boost_min = min(sync_threshold, src_policy.cur); else s->boost_min = src_policy.cur; /* Force policy re-evaluation to trigger adjust notifier. */ get_online_cpus(); if (cpu_online(src_cpu)) /* * Send an unchanged policy update to the source * CPU. Even though the policy isn't changed from * its existing boosted or non-boosted state * notifying the source CPU will let the governor * know a boost happened on another CPU and that it * should re-evaluate the frequency at the next timer * event without interference from a min sample time. */ cpufreq_update_policy(src_cpu); if (cpu_online(dest_cpu)) { cpufreq_update_policy(dest_cpu); queue_delayed_work_on(dest_cpu, cpu_boost_wq, &s->boost_rem, msecs_to_jiffies(boost_ms)); } else { s->boost_min = 0; } put_online_cpus(); } static struct smp_hotplug_thread cpuboost_threads = { .store = &thread, .thread_should_run = boost_migration_should_run, .thread_fn = run_boost_migration, .thread_comm = "boost_sync/%u", }; static int boost_migration_notify(struct notifier_block *nb, unsigned long dest_cpu, void *arg) { unsigned long flags; struct cpu_sync *s = &per_cpu(sync_info, dest_cpu); if (!boost_ms) return NOTIFY_OK; /* Avoid deadlock in try_to_wake_up() */ if (thread == current) return NOTIFY_OK; pr_debug("Migration: CPU%d --> CPU%d\n", (int) arg, (int) dest_cpu); spin_lock_irqsave(&s->lock, flags); s->pending = true; s->src_cpu = (int) arg; spin_unlock_irqrestore(&s->lock, flags); return NOTIFY_OK; } static struct notifier_block boost_migration_nb = { .notifier_call = boost_migration_notify, }; static void do_input_boost(struct work_struct *work) { unsigned int i, ret; struct cpu_sync *i_sync_info; struct cpufreq_policy policy; get_online_cpus(); for_each_online_cpu(i) { i_sync_info = &per_cpu(sync_info, i); ret = cpufreq_get_policy(&policy, i); if (ret) continue; if (policy.cur >= input_boost_freq) continue; cancel_delayed_work_sync(&i_sync_info->input_boost_rem); i_sync_info->input_boost_min = input_boost_freq; cpufreq_update_policy(i); queue_delayed_work_on(i_sync_info->cpu, cpu_boost_wq, &i_sync_info->input_boost_rem, msecs_to_jiffies(input_boost_ms)); } put_online_cpus(); } static void cpuboost_input_event(struct input_handle *handle, unsigned int type, unsigned int code, int value) { u64 now; if (!input_boost_freq) return; now = ktime_to_us(ktime_get()); if (now - last_input_time < MIN_INPUT_INTERVAL) return; if (work_pending(&input_boost_work)) return; queue_work(cpu_boost_wq, &input_boost_work); last_input_time = ktime_to_us(ktime_get()); } static int cpuboost_input_connect(struct input_handler *handler, struct input_dev *dev, const struct input_device_id *id) { struct input_handle *handle; int error; handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL); if (!handle) return -ENOMEM; handle->dev = dev; handle->handler = handler; handle->name = "cpufreq"; error = input_register_handle(handle); if (error) goto err2; error = input_open_device(handle); if (error) goto err1; return 0; err1: input_unregister_handle(handle); err2: kfree(handle); return error; } static void cpuboost_input_disconnect(struct input_handle *handle) { input_close_device(handle); input_unregister_handle(handle); kfree(handle); } static const struct input_device_id cpuboost_ids[] = { /* multi-touch touchscreen */ { .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_ABSBIT, .evbit = { BIT_MASK(EV_ABS) }, .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] = BIT_MASK(ABS_MT_POSITION_X) | BIT_MASK(ABS_MT_POSITION_Y) }, }, /* touchpad */ { .flags = INPUT_DEVICE_ID_MATCH_KEYBIT | INPUT_DEVICE_ID_MATCH_ABSBIT, .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) }, .absbit = { [BIT_WORD(ABS_X)] = BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) }, }, /* Keypad */ { .flags = INPUT_DEVICE_ID_MATCH_EVBIT, .evbit = { BIT_MASK(EV_KEY) }, }, { }, }; static struct input_handler cpuboost_input_handler = { .event = cpuboost_input_event, .connect = cpuboost_input_connect, .disconnect = cpuboost_input_disconnect, .name = "cpu-boost", .id_table = cpuboost_ids, }; static int cpu_boost_init(void) { int cpu, ret; struct cpu_sync *s; cpufreq_register_notifier(&boost_adjust_nb, CPUFREQ_POLICY_NOTIFIER); cpu_boost_wq = alloc_workqueue("cpuboost_wq", WQ_HIGHPRI, 0); if (!cpu_boost_wq) return -EFAULT; INIT_WORK(&input_boost_work, do_input_boost); for_each_possible_cpu(cpu) { s = &per_cpu(sync_info, cpu); s->cpu = cpu; spin_lock_init(&s->lock); INIT_DELAYED_WORK(&s->boost_rem, do_boost_rem); INIT_DELAYED_WORK(&s->input_boost_rem, do_input_boost_rem); } atomic_notifier_chain_register(&migration_notifier_head, &boost_migration_nb); ret = smpboot_register_percpu_thread(&cpuboost_threads); if (ret) pr_err("Cannot register cpuboost threads.\n"); ret = input_register_handler(&cpuboost_input_handler); if (ret) pr_err("Cannot register cpuboost input handler.\n"); return ret; } late_initcall(cpu_boost_init);