aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/auditsc.c333
-rw-r--r--kernel/cgroup.c2
-rw-r--r--kernel/events/core.c41
-rw-r--r--kernel/power/Kconfig12
-rw-r--r--kernel/power/Makefile3
-rw-r--r--kernel/power/main.c6
-rw-r--r--kernel/power/suspend.c41
-rw-r--r--kernel/power/user_sysfs_private.c1375
-rw-r--r--kernel/printk/printk.c68
-rw-r--r--kernel/sched/Makefile1
-rw-r--r--kernel/sched/core.c3
-rw-r--r--kernel/sched/core_ctl.c1115
-rw-r--r--kernel/sched/qhmp_core.c15
-rw-r--r--kernel/time/alarmtimer.c8
-rw-r--r--kernel/trace/power-traces.c2
15 files changed, 2827 insertions, 198 deletions
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index cc3416f0..3a6e0101 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -73,6 +73,7 @@
#include <linux/compat.h>
#include <linux/ctype.h>
#include <linux/string.h>
+#include <linux/uaccess.h>
#include <uapi/linux/limits.h>
#include "audit.h"
@@ -82,7 +83,8 @@
#define AUDITSC_SUCCESS 1
#define AUDITSC_FAILURE 2
-/* no execve audit message should be longer than this (userspace limits) */
+/* no execve audit message should be longer than this (userspace limits),
+ * see the note near the top of audit_log_execve_info() about this value */
#define MAX_EXECVE_AUDIT_LEN 7500
/* max length to print of cmdline/proctitle value during audit */
@@ -1010,185 +1012,178 @@ static int audit_log_pid_context(struct audit_context *context, pid_t pid,
return rc;
}
-/*
- * to_send and len_sent accounting are very loose estimates. We aren't
- * really worried about a hard cap to MAX_EXECVE_AUDIT_LEN so much as being
- * within about 500 bytes (next page boundary)
- *
- * why snprintf? an int is up to 12 digits long. if we just assumed when
- * logging that a[%d]= was going to be 16 characters long we would be wasting
- * space in every audit message. In one 7500 byte message we can log up to
- * about 1000 min size arguments. That comes down to about 50% waste of space
- * if we didn't do the snprintf to find out how long arg_num_len was.
- */
-static int audit_log_single_execve_arg(struct audit_context *context,
- struct audit_buffer **ab,
- int arg_num,
- size_t *len_sent,
- const char __user *p,
- char *buf)
+static void audit_log_execve_info(struct audit_context *context,
+ struct audit_buffer **ab)
{
- char arg_num_len_buf[12];
- const char __user *tmp_p = p;
- /* how many digits are in arg_num? 5 is the length of ' a=""' */
- size_t arg_num_len = snprintf(arg_num_len_buf, 12, "%d", arg_num) + 5;
- size_t len, len_left, to_send;
- size_t max_execve_audit_len = MAX_EXECVE_AUDIT_LEN;
- unsigned int i, has_cntl = 0, too_long = 0;
- int ret;
-
- /* strnlen_user includes the null we don't want to send */
- len_left = len = strnlen_user(p, MAX_ARG_STRLEN) - 1;
-
- /*
- * We just created this mm, if we can't find the strings
- * we just copied into it something is _very_ wrong. Similar
- * for strings that are too long, we should not have created
- * any.
- */
- if (unlikely((len == -1) || len > MAX_ARG_STRLEN - 1)) {
- WARN_ON(1);
- send_sig(SIGKILL, current, 0);
- return -1;
+ long len_max;
+ long len_rem;
+ long len_full;
+ long len_buf;
+ long len_abuf;
+ long len_tmp;
+ bool require_data;
+ bool encode;
+ unsigned int iter;
+ unsigned int arg;
+ char *buf_head;
+ char *buf;
+ const char __user *p = (const char __user *)current->mm->arg_start;
+
+ /* NOTE: this buffer needs to be large enough to hold all the non-arg
+ * data we put in the audit record for this argument (see the
+ * code below) ... at this point in time 96 is plenty */
+ char abuf[96];
+
+ /* NOTE: we set MAX_EXECVE_AUDIT_LEN to a rather arbitrary limit, the
+ * current value of 7500 is not as important as the fact that it
+ * is less than 8k, a setting of 7500 gives us plenty of wiggle
+ * room if we go over a little bit in the logging below */
+ WARN_ON_ONCE(MAX_EXECVE_AUDIT_LEN > 7500);
+ len_max = MAX_EXECVE_AUDIT_LEN;
+
+ /* scratch buffer to hold the userspace args */
+ buf_head = kmalloc(MAX_EXECVE_AUDIT_LEN + 1, GFP_KERNEL);
+ if (!buf_head) {
+ audit_panic("out of memory for argv string");
+ return;
}
+ buf = buf_head;
- /* walk the whole argument looking for non-ascii chars */
+ audit_log_format(*ab, "argc=%d", context->execve.argc);
+
+ len_rem = len_max;
+ len_buf = 0;
+ len_full = 0;
+ require_data = true;
+ encode = false;
+ iter = 0;
+ arg = 0;
do {
- if (len_left > MAX_EXECVE_AUDIT_LEN)
- to_send = MAX_EXECVE_AUDIT_LEN;
- else
- to_send = len_left;
- ret = copy_from_user(buf, tmp_p, to_send);
- /*
- * There is no reason for this copy to be short. We just
- * copied them here, and the mm hasn't been exposed to user-
- * space yet.
- */
- if (ret) {
- WARN_ON(1);
- send_sig(SIGKILL, current, 0);
- return -1;
- }
- buf[to_send] = '\0';
- has_cntl = audit_string_contains_control(buf, to_send);
- if (has_cntl) {
- /*
- * hex messages get logged as 2 bytes, so we can only
- * send half as much in each message
- */
- max_execve_audit_len = MAX_EXECVE_AUDIT_LEN / 2;
- break;
- }
- len_left -= to_send;
- tmp_p += to_send;
- } while (len_left > 0);
-
- len_left = len;
-
- if (len > max_execve_audit_len)
- too_long = 1;
-
- /* rewalk the argument actually logging the message */
- for (i = 0; len_left > 0; i++) {
- int room_left;
-
- if (len_left > max_execve_audit_len)
- to_send = max_execve_audit_len;
- else
- to_send = len_left;
-
- /* do we have space left to send this argument in this ab? */
- room_left = MAX_EXECVE_AUDIT_LEN - arg_num_len - *len_sent;
- if (has_cntl)
- room_left -= (to_send * 2);
- else
- room_left -= to_send;
- if (room_left < 0) {
- *len_sent = 0;
- audit_log_end(*ab);
- *ab = audit_log_start(context, GFP_KERNEL, AUDIT_EXECVE);
- if (!*ab)
- return 0;
- }
+ /* NOTE: we don't ever want to trust this value for anything
+ * serious, but the audit record format insists we
+ * provide an argument length for really long arguments,
+ * e.g. > MAX_EXECVE_AUDIT_LEN, so we have no choice but
+ * to use strncpy_from_user() to obtain this value for
+ * recording in the log, although we don't use it
+ * anywhere here to avoid a double-fetch problem */
+ if (len_full == 0)
+ len_full = strnlen_user(p, MAX_ARG_STRLEN) - 1;
+
+ /* read more data from userspace */
+ if (require_data) {
+ /* can we make more room in the buffer? */
+ if (buf != buf_head) {
+ memmove(buf_head, buf, len_buf);
+ buf = buf_head;
+ }
+
+ /* fetch as much as we can of the argument */
+ len_tmp = strncpy_from_user(&buf_head[len_buf], p,
+ len_max - len_buf);
+ if (len_tmp == -EFAULT) {
+ /* unable to copy from userspace */
+ send_sig(SIGKILL, current, 0);
+ goto out;
+ } else if (len_tmp == (len_max - len_buf)) {
+ /* buffer is not large enough */
+ require_data = true;
+ /* NOTE: if we are going to span multiple
+ * buffers force the encoding so we stand
+ * a chance at a sane len_full value and
+ * consistent record encoding */
+ encode = true;
+ len_full = len_full * 2;
+ p += len_tmp;
+ } else {
+ require_data = false;
+ if (!encode)
+ encode = audit_string_contains_control(
+ buf, len_tmp);
+ /* try to use a trusted value for len_full */
+ if (len_full < len_max)
+ len_full = (encode ?
+ len_tmp * 2 : len_tmp);
+ p += len_tmp + 1;
+ }
+ len_buf += len_tmp;
+ buf_head[len_buf] = '\0';
- /*
- * first record needs to say how long the original string was
- * so we can be sure nothing was lost.
- */
- if ((i == 0) && (too_long))
- audit_log_format(*ab, " a%d_len=%zu", arg_num,
- has_cntl ? 2*len : len);
-
- /*
- * normally arguments are small enough to fit and we already
- * filled buf above when we checked for control characters
- * so don't bother with another copy_from_user
- */
- if (len >= max_execve_audit_len)
- ret = copy_from_user(buf, p, to_send);
- else
- ret = 0;
- if (ret) {
- WARN_ON(1);
- send_sig(SIGKILL, current, 0);
- return -1;
+ /* length of the buffer in the audit record? */
+ len_abuf = (encode ? len_buf * 2 : len_buf + 2);
}
- buf[to_send] = '\0';
-
- /* actually log it */
- audit_log_format(*ab, " a%d", arg_num);
- if (too_long)
- audit_log_format(*ab, "[%d]", i);
- audit_log_format(*ab, "=");
- if (has_cntl)
- audit_log_n_hex(*ab, buf, to_send);
- else
- audit_log_string(*ab, buf);
-
- p += to_send;
- len_left -= to_send;
- *len_sent += arg_num_len;
- if (has_cntl)
- *len_sent += to_send * 2;
- else
- *len_sent += to_send;
- }
- /* include the null we didn't log */
- return len + 1;
-}
-static void audit_log_execve_info(struct audit_context *context,
- struct audit_buffer **ab)
-{
- int i, len;
- size_t len_sent = 0;
- const char __user *p;
- char *buf;
+ /* write as much as we can to the audit log */
+ if (len_buf > 0) {
+ /* NOTE: some magic numbers here - basically if we
+ * can't fit a reasonable amount of data into the
+ * existing audit buffer, flush it and start with
+ * a new buffer */
+ if ((sizeof(abuf) + 8) > len_rem) {
+ len_rem = len_max;
+ audit_log_end(*ab);
+ *ab = audit_log_start(context,
+ GFP_KERNEL, AUDIT_EXECVE);
+ if (!*ab)
+ goto out;
+ }
- p = (const char __user *)current->mm->arg_start;
+ /* create the non-arg portion of the arg record */
+ len_tmp = 0;
+ if (require_data || (iter > 0) ||
+ ((len_abuf + sizeof(abuf)) > len_rem)) {
+ if (iter == 0) {
+ len_tmp += snprintf(&abuf[len_tmp],
+ sizeof(abuf) - len_tmp,
+ " a%d_len=%lu",
+ arg, len_full);
+ }
+ len_tmp += snprintf(&abuf[len_tmp],
+ sizeof(abuf) - len_tmp,
+ " a%d[%d]=", arg, iter++);
+ } else
+ len_tmp += snprintf(&abuf[len_tmp],
+ sizeof(abuf) - len_tmp,
+ " a%d=", arg);
+ WARN_ON(len_tmp >= sizeof(abuf));
+ abuf[sizeof(abuf) - 1] = '\0';
+
+ /* log the arg in the audit record */
+ audit_log_format(*ab, "%s", abuf);
+ len_rem -= len_tmp;
+ len_tmp = len_buf;
+ if (encode) {
+ if (len_abuf > len_rem)
+ len_tmp = len_rem / 2; /* encoding */
+ audit_log_n_hex(*ab, buf, len_tmp);
+ len_rem -= len_tmp * 2;
+ len_abuf -= len_tmp * 2;
+ } else {
+ if (len_abuf > len_rem)
+ len_tmp = len_rem - 2; /* quotes */
+ audit_log_n_string(*ab, buf, len_tmp);
+ len_rem -= len_tmp + 2;
+ /* don't subtract the "2" because we still need
+ * to add quotes to the remaining string */
+ len_abuf -= len_tmp;
+ }
+ len_buf -= len_tmp;
+ buf += len_tmp;
+ }
- audit_log_format(*ab, "argc=%d", context->execve.argc);
+ /* ready to move to the next argument? */
+ if ((len_buf == 0) && !require_data) {
+ arg++;
+ iter = 0;
+ len_full = 0;
+ require_data = true;
+ encode = false;
+ }
+ } while (arg < context->execve.argc);
- /*
- * we need some kernel buffer to hold the userspace args. Just
- * allocate one big one rather than allocating one of the right size
- * for every single argument inside audit_log_single_execve_arg()
- * should be <8k allocation so should be pretty safe.
- */
- buf = kmalloc(MAX_EXECVE_AUDIT_LEN + 1, GFP_KERNEL);
- if (!buf) {
- audit_panic("out of memory for argv string");
- return;
- }
+ /* NOTE: the caller handles the final audit_log_end() call */
- for (i = 0; i < context->execve.argc; i++) {
- len = audit_log_single_execve_arg(context, ab, i,
- &len_sent, p, buf);
- if (len <= 0)
- break;
- p += len;
- }
- kfree(buf);
+out:
+ kfree(buf_head);
}
static void show_special(struct audit_context *context, int *call_panic)
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index bf2543b6..359a50e6 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -5566,7 +5566,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
struct task_struct *task;
int count = 0;
- seq_printf(seq, "css_set %p\n", cset);
+ seq_printf(seq, "css_set %pK\n", cset);
list_for_each_entry(task, &cset->tasks, cg_list) {
if (count++ > MAX_TASKS_SHOWN_PER_CSS)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 7ee39bfa..3cea606d 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -171,9 +171,12 @@ static struct srcu_struct pmus_srcu;
* 0 - disallow raw tracepoint access for unpriv
* 1 - disallow cpu events for unpriv
* 2 - disallow kernel profiling for unpriv
+ * 3 - disallow all unpriv perf event use
*/
#ifdef CONFIG_PERF_EVENTS_USERMODE
int sysctl_perf_event_paranoid __read_mostly = -1;
+#elif defined CONFIG_SECURITY_PERF_EVENTS_RESTRICT
+int sysctl_perf_event_paranoid __read_mostly = 3;
#else
int sysctl_perf_event_paranoid __read_mostly = 1;
#endif
@@ -5728,9 +5731,6 @@ struct swevent_htable {
/* Recursion avoidance in each contexts */
int recursion[PERF_NR_CONTEXTS];
-
- /* Keeps track of cpu being initialized/exited */
- bool online;
};
static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
@@ -5977,14 +5977,8 @@ static int perf_swevent_add(struct perf_event *event, int flags)
hwc->state = !(flags & PERF_EF_START);
head = find_swevent_head(swhash, event);
- if (!head) {
- /*
- * We can race with cpu hotplug code. Do not
- * WARN if the cpu just got unplugged.
- */
- WARN_ON_ONCE(swhash->online);
+ if (WARN_ON_ONCE(!head))
return -EINVAL;
- }
hlist_add_head_rcu(&event->hlist_entry, head);
@@ -6051,7 +6045,6 @@ static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
int err = 0;
mutex_lock(&swhash->hlist_mutex);
-
if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
struct swevent_hlist *hlist;
@@ -6813,7 +6806,6 @@ skip_type:
__perf_event_init_context(&cpuctx->ctx);
lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
- cpuctx->ctx.type = cpu_context;
cpuctx->ctx.pmu = pmu;
__perf_cpu_hrtimer_init(cpuctx, cpu);
@@ -7311,6 +7303,9 @@ SYSCALL_DEFINE5(perf_event_open,
if (flags & ~PERF_FLAG_ALL)
return -EINVAL;
+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
err = perf_copy_attr(attr_uptr, &attr);
if (err)
return err;
@@ -7458,7 +7453,19 @@ SYSCALL_DEFINE5(perf_event_open,
* task or CPU context:
*/
if (move_group) {
- if (group_leader->ctx->type != ctx->type)
+ /*
+ * Make sure we're both on the same task, or both
+ * per-cpu events.
+ */
+ if (group_leader->ctx->task != ctx->task)
+ goto err_context;
+
+ /*
+ * Make sure we're both events for the same CPU;
+ * grouping events for different CPUs is broken; since
+ * you can never concurrently schedule them anyhow.
+ */
+ if (group_leader->cpu != event->cpu)
goto err_context;
} else {
if (group_leader->ctx != ctx)
@@ -8173,7 +8180,6 @@ static void perf_event_init_cpu(int cpu)
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
mutex_lock(&swhash->hlist_mutex);
- swhash->online = true;
if (swhash->hlist_refcount > 0) {
struct swevent_hlist *hlist;
@@ -8273,14 +8279,7 @@ static void perf_event_start_swclock(int cpu)
static void perf_event_exit_cpu(int cpu)
{
- struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
-
perf_event_exit_cpu_context(cpu);
-
- mutex_lock(&swhash->hlist_mutex);
- swhash->online = false;
- swevent_hlist_release(swhash);
- mutex_unlock(&swhash->hlist_mutex);
}
#else
static inline void perf_event_exit_cpu(int cpu) { }
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 67ecc6b5..dbb6a548 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -325,3 +325,15 @@ config SUSPEND_TIME
Prints the time spent in suspend in the kernel log, and
keeps statistics on the time spent in suspend in
/sys/kernel/debug/suspend_time
+#lenovo.sw chenyb1 20150310 add for pm log begin
+config PM_SYNC_BEFORE_SUSPEND
+ bool "Sync file systems before suspend"
+ depends on PM
+ default y
+config LENOVO_PM_LOG
+ def_bool y
+
+config LENOVO_PM_LOG_TLMM
+ def_bool n
+ depends on LENOVO_PM_LOG
+#lenovo.sw chenyb1 20150310 add for pm log end
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index 74c713ba..9c804658 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -16,3 +16,6 @@ obj-$(CONFIG_SUSPEND_TIME) += suspend_time.o
obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o
obj-$(CONFIG_SUSPEND) += wakeup_reason.o
+# Add user defined sysfs lenovo.sw chenyb1 20150310 add for pm log begin
+obj-$(CONFIG_LENOVO_PM_LOG) += user_sysfs_private.o
+#lenovo.sw chenyb1 20150310 add for pm log end
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 9a59d042..7928e64f 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -463,7 +463,11 @@ static ssize_t autosleep_store(struct kobject *kobj,
{
suspend_state_t state = decode_state(buf, n);
int error;
-
+//chenyb1, 20140113, Add to show sleep enter state, START
+#ifdef CONFIG_LENOVO_PM_LOG
+ printk("%s(), state=%d, buf=%s, n=%d\n", __func__, state, buf,(int)n);
+#endif //CONFIG_LENOVO_PM_LOG
+//chenyb1, 20140113, Add to show sleep enter state, END
if (state == PM_SUSPEND_ON
&& strcmp(buf, "off") && strcmp(buf, "off\n"))
return -EINVAL;
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index a212348c..b1af1cda 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -271,6 +271,13 @@ void __weak arch_suspend_enable_irqs(void)
*
* This function should be called after devices have been suspended.
*/
+/*lenovo.sw begin chenyb1, 20130516, Add for sysfs tlmm_before_sleep */
+#ifdef CONFIG_LENOVO_PM_LOG
+extern void vreg_before_sleep_save_configs(void);
+extern void tlmm_before_sleep_set_configs(void);
+extern void tlmm_before_sleep_save_configs(void);
+#endif//#ifdef CONFIG_LENOVO_PM_LOG
+/*lenovo.sw end chenyb1, 20130516, Add for sysfs tlmm_before_sleep */
static int suspend_enter(suspend_state_t state, bool *wakeup)
{
char suspend_abort[MAX_SUSPEND_ABORT_LEN];
@@ -322,6 +329,16 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
goto Platform_wake;
}
+ /*lenovo.sw begin chenyb1, 20130516, Add for sysfs tlmm_before_sleep. */
+#ifdef CONFIG_LENOVO_PM_LOG
+ vreg_before_sleep_save_configs();
+#if 1 //TBD
+ tlmm_before_sleep_set_configs();
+ tlmm_before_sleep_save_configs();
+#endif
+#endif//#ifdef CONFIG_LENOVO_PM_LOG
+ /*lenovo.sw end chenyb1, 20130516, Add for sysfs tlmm_before_sleep. */
+
error = disable_nonboot_cpus();
if (error || suspend_test(TEST_CPUS)) {
log_suspend_abort_reason("Disabling non-boot cpus failed");
@@ -463,11 +480,19 @@ static int enter_state(suspend_state_t state)
if (state == PM_SUSPEND_FREEZE)
freeze_begin();
+/* chenyb1 add to disable sys_sync in suspend begin
+ * to advoid super delay which block suspend and android WDT
+ * no need to do the sync in build-in battery phone, the mem won't be shutdown
+ * low speed emmc or external storage would trigger super delay
+ */
+#ifdef CONFIG_PM_SYNC_BEFORE_SUSPEND
trace_suspend_resume(TPS("sync_filesystems"), 0, true);
printk(KERN_INFO "PM: Syncing filesystems ... ");
sys_sync();
printk("done.\n");
trace_suspend_resume(TPS("sync_filesystems"), 0, false);
+#endif
+/* chenyb1 add to disable sys_sync in suspend end */
pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
error = suspend_prepare(state);
@@ -510,6 +535,12 @@ static void pm_suspend_marker(char *annotation)
* Check if the value of @state represents one of the supported states,
* execute enter_state() and update system suspend statistics.
*/
+/* chenyb1, 20130524, Add sleeplog, START */
+#ifdef CONFIG_LENOVO_PM_LOG
+extern void log_suspend_enter(void);
+extern void log_suspend_exit(int error);
+#endif //#ifdef CONFIG_LENOVO_PM_LOG
+/* chenyb1, 20130524, Add sleeplog, END */
int pm_suspend(suspend_state_t state)
{
int error;
@@ -518,6 +549,11 @@ int pm_suspend(suspend_state_t state)
return -EINVAL;
pm_suspend_marker("entry");
+ /* chenyb1, 20130524, Add sleeplog START*/
+#ifdef CONFIG_LENOVO_PM_LOG
+ log_suspend_enter();
+#endif
+ /* chenyb1, 20130524, Add sleeplog END*/
error = enter_state(state);
if (error) {
suspend_stats.fail++;
@@ -525,6 +561,11 @@ int pm_suspend(suspend_state_t state)
} else {
suspend_stats.success++;
}
+ /* chenyb1, 20130524, Add sleeplog START*/
+#ifdef CONFIG_LENOVO_PM_LOG
+ log_suspend_exit(error);
+#endif //#ifdef CONFIG_LENOVO_PM_LOG
+ /* chenyb1, 20130524, Add sleeplog END*/
pm_suspend_marker("exit");
return error;
}
diff --git a/kernel/power/user_sysfs_private.c b/kernel/power/user_sysfs_private.c
new file mode 100644
index 00000000..e226d55f
--- /dev/null
+++ b/kernel/power/user_sysfs_private.c
@@ -0,0 +1,1375 @@
+/*
+* Copyright (C) 2012 lenovo, Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+*/
+
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/kobject.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ctype.h>
+#include <linux/uaccess.h> /* sys_sync */
+#include <linux/rtc.h> /* sys_sync */
+/* yangjq, 2011-12-16, Add for vreg, START */
+#include <linux/platform_device.h>
+/* yangjq, 2011-12-16, Add for vreg, END */
+#include <linux/err.h>
+////#include <mach/pmic.h>
+#include <linux/regulator/consumer.h>
+
+#include <linux/io.h>
+#include <linux/of_gpio.h>
+
+static u32 *tz_config = NULL;
+static int tz_pin_num = 0;
+static struct kobject *sysfs_private_kobj;
+
+#define GPIO_CFG(gpio, func, dir, pull, drvstr) \
+ ((((gpio) & 0x3FF) << 4) | \
+ ((func) & 0xf) | \
+ (((dir) & 0x1) << 14) | \
+ (((pull) & 0x3) << 15) | \
+ (((drvstr) & 0xF) << 17))
+
+/* GP PIN TYPE REG MASKS */
+#define TLMM_GP_DRV_SHFT 6
+#define TLMM_GP_DRV_MASK 0x7
+#define TLMM_GP_PULL_SHFT 0
+#define TLMM_GP_PULL_MASK 0x3
+#define TLMM_GP_DIR_SHFT 9
+#define TLMM_GP_DIR_MASK 1
+#define TLMM_GP_FUNC_SHFT 2
+#define TLMM_GP_FUNC_MASK 0xF
+#define GPIO_OUT_BIT 1
+#define GPIO_IN_BIT 0
+#define GPIO_OE_BIT 9
+/**
+ * extract GPIO pin from bit-field used for gpio_tlmm_config
+ */
+#define GPIO_PIN(gpio_cfg) (((gpio_cfg) >> 4) & 0x3ff)
+#define GPIO_FUNC(gpio_cfg) (((gpio_cfg) >> 0) & 0xf)
+#define GPIO_DIR(gpio_cfg) (((gpio_cfg) >> 14) & 0x1)
+#define GPIO_PULL(gpio_cfg) (((gpio_cfg) >> 15) & 0x3)
+#define GPIO_DRVSTR(gpio_cfg) (((gpio_cfg) >> 17) & 0xf)
+#define TLMM_GP_CFG(reg_base, pin) (reg_base + 0x0 + \
+ 0x1000* (pin))
+#define TLMM_GP_INOUT(reg_base, pin) (reg_base + 0x4 + \
+ 0x1000 * (pin))
+/* chenyb1, 20130515, Add sysfs for gpio's debug, START */
+#define TLMM_NUM_GPIO 141
+
+#define HAL_OUTPUT_VAL(config) \
+ (((config)&0x40000000)>>30)
+
+extern void *tlmm_reg_base;
+static int tlmm_get_cfg(unsigned gpio, unsigned* cfg)
+{
+ unsigned flags;
+
+ if(tlmm_reg_base == NULL)
+ return -1;
+ BUG_ON(gpio >= TLMM_NUM_GPIO);
+ //printk("%s(), gpio=%d, addr=0x%08x\n", __func__, gpio, (unsigned int)GPIO_CONFIG(gpio));
+
+#if 0
+ flags = ((GPIO_DIR(config) << 9) & (0x1 << 9)) |
+ ((GPIO_DRVSTR(config) << 6) & (0x7 << 6)) |
+ ((GPIO_FUNC(config) << 2) & (0xf << 2)) |
+ ((GPIO_PULL(config) & 0x3));
+#else
+ flags = readl_relaxed(TLMM_GP_CFG(tlmm_reg_base, gpio));
+#endif
+ printk("%s(), %d, flags=%x\n", __func__, __LINE__, flags);
+ *cfg = GPIO_CFG(gpio, (flags >> TLMM_GP_FUNC_SHFT) & 0xf, (flags >> TLMM_GP_DIR_SHFT) & 0x1, flags & 0x3, (flags >> TLMM_GP_DRV_SHFT) & 0x7);
+
+ return 0;
+}
+
+int tlmm_set_config(unsigned config)
+{
+ unsigned int flags;
+ unsigned gpio = GPIO_PIN(config);
+ void __iomem *cfg_reg = TLMM_GP_CFG(tlmm_reg_base, gpio );
+
+ if(tlmm_reg_base == NULL)
+ return -1;
+ if (gpio > TLMM_NUM_GPIO)
+ return -EINVAL;
+
+ printk("%s(), %d,gpio=%d\n", __func__, __LINE__, gpio);
+
+ config = (config & ~0x40000000);
+ flags = readl_relaxed(cfg_reg);
+ printk("%s(), %d, flags=%x\n", __func__, __LINE__, flags);
+
+ flags = ((GPIO_DIR(config) & TLMM_GP_DIR_MASK) << TLMM_GP_DIR_SHFT) |
+ ((GPIO_DRVSTR(config) & TLMM_GP_DRV_MASK) << TLMM_GP_DRV_SHFT) |
+ ((GPIO_FUNC(config) & TLMM_GP_FUNC_MASK) << TLMM_GP_FUNC_SHFT) |
+ ((GPIO_PULL(config) & TLMM_GP_PULL_MASK));
+
+ printk("%s(), %d, flags=%x\n", __func__, __LINE__, flags);
+ writel_relaxed(flags, cfg_reg);
+
+#if 0
+ /*set func*/
+ cfg_reg = TLMMV4_GP_CFG(tlmm_reg_base, gpio);
+ flags = readl_relaxed(cfg_reg);
+ flags &= ~(TLMMV4_GP_FUNC_MASK << TLMMV4_GP_FUNC_SHFT);
+ printk("%s(), %d, flags=%x\n", __func__, __LINE__, flags);
+
+ flags |= (GPIO_FUNC(config) << TLMMV4_GP_FUNC_SHFT);
+ printk("%s(), %d, flags=%x\n", __func__, __LINE__, flags);
+ writel_relaxed(flags, cfg_reg);
+
+ /* set DIR */
+ cfg_reg = TLMMV4_GP_CFG(tlmm_reg_base, gpio);
+ flags = readl_relaxed(cfg_reg);
+ if (GPIO_DIR(config))
+ {
+ flags |= BIT(GPIO_OE_BIT);
+ }
+ else
+ {
+ flags &= ~BIT(GPIO_OE_BIT);
+ }
+ printk("%s(), %d, flags=%x\n", __func__, __LINE__, flags);
+ writel_relaxed(flags, cfg_reg);
+
+ /* set PULL */
+ flags = readl_relaxed(cfg_reg);
+ flags |= GPIO_PULL(config) & 0x3;
+ printk("%s(), %d, flags=%x\n", __func__, __LINE__, flags);
+ writel_relaxed(flags, cfg_reg);
+
+ /* set DRVSTR */
+ flags = readl_relaxed(cfg_reg);
+ flags |= drv_str_to_rval(GPIO_DRVSTR(config));
+ printk("%s(), %d, flags=%x\n", __func__, __LINE__, flags);
+ writel_relaxed(flags, cfg_reg);
+#endif
+ return 0;
+}
+static int tlmm_dump_cfg(char* buf,unsigned gpio, unsigned cfg, int output_val)
+{
+ static char* drvstr_str[] = { "2", "4", "6", "8", "10", "12", "14", "16" }; // mA
+ static char* pull_str[] = { "N", "D", "K", "U" }; // "NO_PULL", "PULL_DOWN", "KEEPER", "PULL_UP"
+ static char* dir_str[] = { "I", "O" }; // "Input", "Output"
+ char func_str[20];
+
+ char* p = buf;
+
+ int drvstr = GPIO_DRVSTR(cfg);
+ int pull = GPIO_PULL(cfg);
+ int dir = GPIO_DIR(cfg);
+ int func = GPIO_FUNC(cfg);
+
+ //printk("%s(), drvstr=%d, pull=%d, dir=%d, func=%d\n", __func__, drvstr, pull, dir, func);
+ sprintf(func_str, "%d", func);
+
+ p += sprintf(p, "%d:0x%x %s%s%s%s", gpio, cfg,
+ func_str, pull_str[pull], dir_str[dir], drvstr_str[drvstr]);
+
+ p += sprintf(p, " = %d", output_val);
+
+ p += sprintf(p, "\n");
+
+ return p - buf;
+}
+
+static int tlmm_dump_header(char* buf)
+{
+ char* p = buf;
+ p += sprintf(p, "bit 0~3: function. (0 is GPIO)\n");
+ p += sprintf(p, "bit 4~13: gpio number\n");
+ p += sprintf(p, "bit 14: 0: input, 1: output\n");
+ p += sprintf(p, "bit 15~16: pull: NO_PULL, PULL_DOWN, KEEPER, PULL_UP\n");
+ p += sprintf(p, "bit 17~20: driver strength. \n");
+ p += sprintf(p, "0:GPIO\n");
+ p += sprintf(p, "N:NO_PULL D:PULL_DOWN K:KEEPER U:PULL_UP\n");
+ p += sprintf(p, "I:Input O:Output\n");
+ p += sprintf(p, "2:2, 4, 6, 8, 10, 12, 14, 16 mA (driver strength)\n\n");
+ return p - buf;
+}
+
+static int tlmm_get_inout(unsigned gpio)
+{
+ void __iomem *inout_reg = TLMM_GP_INOUT(tlmm_reg_base, gpio);
+
+ if(tlmm_reg_base == NULL)
+ return -1;
+ return readl_relaxed(inout_reg) & BIT(GPIO_IN_BIT);
+}
+
+void tlmm_set_inout(unsigned gpio, unsigned val)
+{
+ void __iomem *inout_reg = TLMM_GP_INOUT(tlmm_reg_base, gpio);
+
+ if(tlmm_reg_base == NULL)
+ return;
+ writel_relaxed(val ? BIT(GPIO_OUT_BIT) : 0, inout_reg);
+}
+
+int tlmm_dump_info(char* buf, int tlmm_num)
+{
+ unsigned i, j;
+ char* p = buf;
+ unsigned cfg;
+ int output_val = 0;
+ int tz_flag = 0;
+
+ if(tlmm_num >= 0 && tlmm_num < TLMM_NUM_GPIO) {
+ tlmm_get_cfg(tlmm_num, &cfg);
+ output_val = tlmm_get_inout(tlmm_num);
+
+ p += tlmm_dump_cfg(p, tlmm_num, cfg, output_val);
+ } else {
+
+ p += tlmm_dump_header(p);
+ p += sprintf(p, "Standard Format: gpio_num function pull direction strength [output_value]\n");
+ p += sprintf(p, "Shortcut Format: gpio_num output_value\n");
+ p += sprintf(p, " e.g. 'echo 20 0 D O 2 1' ==> set pin 20 as GPIO output and the output = 1 \n");
+ p += sprintf(p, " e.g. 'echo 20 1' ==> set output gpio pin 20 output = 1 \n");
+ printk("%s(), %d, TLMM_BASE=%lx\n", __func__, __LINE__, (unsigned long int)(void *)tlmm_reg_base);
+ for(i = 0; i < TLMM_NUM_GPIO; ++i) {
+ for(j = 0; j < tz_pin_num; j++) {
+ if(i == tz_config[j]) {
+ tz_flag = 1;
+ continue;
+ }
+ }
+ if(tz_flag == 1) {
+ tz_flag = 0;
+ continue;
+ }
+ tlmm_get_cfg(i, &cfg);
+ output_val = tlmm_get_inout(i);
+
+ p += tlmm_dump_cfg(p, i, cfg, output_val);
+ }
+ printk("%s(), %d\n", __func__, __LINE__);
+ p+= sprintf(p, "(%ld)\n", (unsigned long)(p - buf)); // only for debug reference
+ }
+ return p - buf;
+}
+
+/* save tlmm config before sleep */
+static int before_sleep_fetched;
+module_param(before_sleep_fetched,int,0644);
+static unsigned before_sleep_configs[TLMM_NUM_GPIO];
+void tlmm_before_sleep_save_configs(void)
+{
+ unsigned i;
+
+ //only save tlmm configs when it has been fetched
+ if (!before_sleep_fetched)
+ return;
+
+ printk("%s(), before_sleep_fetched=%d\n", __func__, before_sleep_fetched);
+ before_sleep_fetched = false;
+ for(i = 0; i < TLMM_NUM_GPIO; ++i) {
+ unsigned cfg;
+ int output_val = 0;
+
+ tlmm_get_cfg(i, &cfg);
+ output_val = tlmm_get_inout(i);
+
+ before_sleep_configs[i] = cfg | (output_val << 30);
+ }
+}
+
+int tlmm_before_sleep_dump_info(char* buf)
+{
+ unsigned i;
+ char* p = buf;
+
+ p += sprintf(p, "tlmm_before_sleep:\n");
+ if (!before_sleep_fetched) {
+ before_sleep_fetched = true;
+
+ p += tlmm_dump_header(p);
+
+ for(i = 0; i < TLMM_NUM_GPIO; ++i) {
+ unsigned cfg;
+ int output_val = 0;
+
+ cfg = before_sleep_configs[i];
+ output_val = HAL_OUTPUT_VAL(cfg);
+ //cfg &= ~0x40000000;
+ p += tlmm_dump_cfg(p, i, cfg, output_val);
+ }
+ p+= sprintf(p, "(%ld)\n", (unsigned long)(p - buf)); // only for debug reference
+ }
+ return p - buf;
+}
+
+/* set tlmms config before sleep */
+static int before_sleep_table_enabled = 0;
+module_param(before_sleep_table_enabled,int,0644);
+static unsigned before_sleep_table_configs[TLMM_NUM_GPIO];
+void tlmm_before_sleep_set_configs(void)
+{
+ int res;
+ unsigned i;
+
+ //only set tlmms before sleep when it's enabled
+ if (!before_sleep_table_enabled)
+ return;
+
+ printk("%s(), before_sleep_table_enabled=%d\n", __func__, before_sleep_table_enabled);
+ for(i = 0; i < TLMM_NUM_GPIO; ++i) {
+ unsigned cfg;
+ int gpio;
+ int dir;
+ int func;
+ int output_val = 0;
+
+ cfg = before_sleep_table_configs[i];
+
+ gpio = GPIO_PIN(cfg);
+ if(gpio != i)//(cfg & ~0x20000000) == 0 ||
+ continue;
+
+ output_val = HAL_OUTPUT_VAL(cfg);
+ //Clear the output value
+ //cfg &= ~0x40000000;
+ dir = GPIO_DIR(cfg);
+ func = GPIO_FUNC(cfg);
+
+ printk("%s(), [%d]: 0x%x\n", __func__, i, cfg);
+ res = tlmm_set_config(cfg & ~0x40000000);
+ if(res < 0) {
+ printk("Error: Config failed.\n");
+ }
+
+ if((func == 0) && (dir == 1)) // gpio output
+ tlmm_set_inout(i, output_val);
+ }
+}
+
+int tlmm_before_sleep_table_set_cfg(unsigned gpio, unsigned cfg)
+{
+ //BUG_ON(gpio >= TLMM_NUM_GPIO && GPIO_PIN(cfg) != 0xff);
+ if (gpio >= TLMM_NUM_GPIO && gpio != 255 && gpio != 256) {
+ printk("gpio >= TLMM_NUM_GPIO && gpio != 255 && gpio != 256!\n");
+ return -1;
+ }
+
+ if(gpio < TLMM_NUM_GPIO)
+ {
+ before_sleep_table_configs[gpio] = cfg;// | 0x20000000
+ before_sleep_table_enabled = true;
+ }
+ else if(gpio == 255)
+ before_sleep_table_enabled = true;
+ else if(gpio == 256)
+ before_sleep_table_enabled = false;
+
+ return 0;
+}
+
+int tlmm_before_sleep_table_dump_info(char* buf)
+{
+ unsigned i;
+ char* p = buf;
+
+ p += tlmm_dump_header(p);
+ p += sprintf(p, "Format: gpio_num function pull direction strength [output_value]\n");
+ p += sprintf(p, " e.g. 'echo 20 0 D O 2 1' ==> set pin 20 as GPIO output and the output = 1 \n");
+ p += sprintf(p, " e.g. 'echo 20' ==> disable pin 20's setting \n");
+ p += sprintf(p, " e.g. 'echo 255' ==> enable sleep table's setting \n");
+ p += sprintf(p, " e.g. 'echo 256' ==> disable sleep table's setting \n");
+
+ for(i = 0; i < TLMM_NUM_GPIO; ++i) {
+ unsigned cfg;
+ int output_val = 0;
+
+ cfg = before_sleep_table_configs[i];
+ output_val = HAL_OUTPUT_VAL(cfg);
+ //cfg &= ~0x40000000;
+ p += tlmm_dump_cfg(p, i, cfg, output_val);
+ }
+ p+= sprintf(p, "(%ld)\n", (unsigned long)(p - buf)); // only for debug reference
+ return p - buf;
+}
+/* yangjq, 20130515, Add sysfs for gpio's debug, END */
+
+
+#define private_attr(_name) \
+static struct kobj_attribute _name##_attr = { \
+ .attr = { \
+ .name = __stringify(_name), \
+ .mode = 0644, \
+ }, \
+ .show = _name##_show, \
+ .store = _name##_store, \
+}
+
+#ifdef CONFIG_LENOVO_PM_LOG_TLMM//TBD
+//chenyb1, 2015-2-3, Add a sysfs interface for modem's sim card checking, START
+#define TLMM_GPIO_SIM 60
+static ssize_t tlmm_sim_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ char *p = buf;
+ int output_val = 0;
+
+ output_val = tlmm_get_inout(TLMM_GPIO_SIM);
+ p += sprintf(p, "%d", output_val);
+
+ return (p - buf);
+}
+
+static ssize_t tlmm_sim_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ printk(KERN_ERR "%s: no support yet.\n", __func__);
+
+ return -EPERM;
+}
+//yangjq, 2015-2-3, Add a sysfs interface for modem's sim card checking, END
+
+static int tlmm_num = -1;
+static ssize_t tlmm_num_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ char* p = buf;
+ p += sprintf(p, "A single gpio[0, %d] to be checked by cat tlmm\n", TLMM_NUM_GPIO);
+ p += sprintf(p, "-1 to check all %d gpios by cat tlmm\n", TLMM_NUM_GPIO+1);
+ p += sprintf(p, "%d\n", tlmm_num);
+ p += sprintf(p, "TLMM_BASE=%lx\n", (unsigned long int)(void *)tlmm_reg_base);
+
+ printk("%s(), %d, TLMM_BASE=%lx\n", __func__, __LINE__, (unsigned long int)(void *)tlmm_reg_base);
+ return p - buf;
+}
+
+static ssize_t tlmm_num_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ int gpio;
+ int res;
+
+ res = sscanf(buf, "%d", &gpio);
+ printk("res=%d. %d\n", res, gpio);
+
+ if(res != 1)
+ goto tlmm_num_store_wrong_para;
+
+ if(gpio >= TLMM_NUM_GPIO)
+ goto tlmm_num_store_wrong_para;
+
+ tlmm_num = gpio;
+ printk("tlmm_num: %d\n", tlmm_num);
+
+ goto tlmm_num_store_ok;
+
+tlmm_num_store_wrong_para:
+ printk("Wrong Input.\n");
+ printk("Format: gpio_num\n");
+ printk(" gpio_num: 0 ~ 145\n");
+
+tlmm_num_store_ok:
+ return n;
+}
+
+static ssize_t tlmm_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ char* p = buf;
+#if 1 //TBD
+ p += tlmm_dump_info(buf, tlmm_num);
+#endif
+ return p - buf;
+}
+
+static ssize_t tlmm_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ char pull_c, dir_c;
+ int gpio, func, pull, dir, drvstr, output_val;
+ unsigned cfg;
+ int res;
+
+ res = sscanf(buf, "%d %d %c %c %d %d", &gpio, &func, &pull_c, &dir_c, &drvstr, &output_val);
+ printk("res=%d. %d %d %c %c %d %d\n", res, gpio, func, pull_c, dir_c, drvstr, output_val);
+
+ //Add a shortcut wrting format to change an output gpio's value
+ if(res == 2 && gpio < TLMM_NUM_GPIO && (func == 0 || func == 1)) {
+ output_val = func;
+ goto tlmm_store_only_output_val;
+ }
+ if((res != 5) && (res != 6))
+ goto tlmm_store_wrong_para;
+
+ if(gpio >= TLMM_NUM_GPIO)
+ goto tlmm_store_wrong_para;
+
+ if('N' == pull_c)
+ pull = 0;
+ else if('D' == pull_c)
+ pull = 1;
+ else if('K' == pull_c)
+ pull = 2;
+ else if('U' == pull_c)
+ pull = 3;
+ else
+ goto tlmm_store_wrong_para;
+
+ if('I' == dir_c)
+ dir = 0;
+ else if('O' == dir_c)
+ dir = 1;
+ else
+ goto tlmm_store_wrong_para;
+
+ drvstr = drvstr/2 - 1; // 2mA -> 0, 4mA -> 1, 6mA -> 2, ...
+ if(drvstr > 7)
+ goto tlmm_store_wrong_para;
+
+ if(output_val > 1)
+ goto tlmm_store_wrong_para;
+
+ printk("final set: %d %d %d %d %d %d\n", gpio, func, pull, dir, drvstr, output_val);
+
+ cfg = GPIO_CFG(gpio, func, dir, pull, drvstr);
+#if 1
+ res = tlmm_set_config(cfg);
+ if(res < 0) {
+ printk("Error: Config failed.\n");
+ goto tlmm_store_wrong_para;
+ }
+#endif
+ printk("final set: %d %d %d %d %d %d\n", gpio, func, pull, dir, drvstr, output_val);
+ if((func == 0) && (dir == 1)) // gpio output
+tlmm_store_only_output_val:
+ tlmm_set_inout(gpio, output_val);
+
+ goto tlmm_store_ok;
+
+tlmm_store_wrong_para:
+ printk("Wrong Input.\n");
+ printk("Standard Format: gpio_num function pull direction strength [output_value]\n");
+ printk("Shortcut Format: gpio_num output_value\n");
+ printk(" gpio_num: 0 ~ 145\n");
+ printk(" function: number, where 0 is GPIO\n");
+ printk(" pull: 'N': NO_PULL, 'D':PULL_DOWN, 'K':KEEPER, 'U': PULL_UP\n");
+ printk(" direction: 'I': Input, 'O': Output\n");
+ printk(" strength: 2, 4, 6, 8, 10, 12, 14, 16\n");
+ printk(" output_value: Optional. 0 or 1. vaild if GPIO output\n");
+ printk(" e.g. 'echo 20 0 D I 2' ==> set pin 20 as GPIO input \n");
+ printk(" e.g. 'echo 20 0 D O 2 1' ==> set pin 20 as GPIO output and the output = 1 \n");
+ printk(" e.g. 'echo 20 1' ==> set output gpio pin 20 output = 1 \n");
+
+tlmm_store_ok:
+ return n;
+}
+
+/* Set GPIO's sleep config from sysfs */
+static ssize_t tlmm_before_sleep_table_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ char* p = buf;
+#if 1 //TBD
+ p += tlmm_before_sleep_table_dump_info(buf);
+#endif
+ return p - buf;
+}
+
+static ssize_t tlmm_before_sleep_table_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ char pull_c, dir_c;
+ int gpio, func = 0, pull = 0, dir = 0, drvstr = 0, output_val = 0;
+ int ignore;
+ unsigned cfg;
+ int res;
+
+ res = sscanf(buf, "%d %d %c %c %d %d", &gpio, &func, &pull_c, &dir_c, &drvstr, &output_val);
+ printk("res=%d. %d %d %c %c %d %d\n", res, gpio, func, pull_c, dir_c, drvstr, output_val);
+
+ if(1 == res) { // if only gpio, means ingore(disable) the gpio's sleep config
+ ignore = 1;
+ printk("final set: to disable gpio %d sleep config\n", gpio);
+ }
+ else {
+ ignore = 0;
+
+ if((res != 5) && (res != 6))
+ goto tlmm_before_sleep_table_store_wrong_para;
+
+ if(gpio >= TLMM_NUM_GPIO)
+ goto tlmm_before_sleep_table_store_wrong_para;
+
+ if('N' == pull_c)
+ pull = 0;
+ else if('D' == pull_c)
+ pull = 1;
+ else if('K' == pull_c)
+ pull = 2;
+ else if('U' == pull_c)
+ pull = 3;
+ else
+ goto tlmm_before_sleep_table_store_wrong_para;
+
+ if('I' == dir_c)
+ dir = 0;
+ else if('O' == dir_c)
+ dir = 1;
+ else
+ goto tlmm_before_sleep_table_store_wrong_para;
+
+ drvstr = drvstr/2 - 1; // 2mA -> 0, 4mA -> 1, 6mA -> 2, ...
+ if(drvstr > 7)
+ goto tlmm_before_sleep_table_store_wrong_para;
+
+ printk("final set: %d %d %d %d %d\n", gpio, func, pull, dir, drvstr);
+ }
+
+ cfg = GPIO_CFG(ignore ? 0xff : gpio, func, dir, pull, drvstr);
+#if 1 //TBD
+ res = tlmm_before_sleep_table_set_cfg(gpio, cfg | (output_val << 30));
+ if(res < 0) {
+ printk("Error: Config failed.\n");
+ goto tlmm_before_sleep_table_store_wrong_para;
+ }
+#endif
+
+ goto tlmm_before_sleep_table_store_ok;
+
+tlmm_before_sleep_table_store_wrong_para:
+ printk("Wrong Input.\n");
+ printk("Format: refer to tlmm's format except 'echo gpio_num > xxx' to disable the gpio's setting\n");
+
+tlmm_before_sleep_table_store_ok:
+ return n;
+}
+
+extern int tlmm_before_sleep_dump_info(char* buf);
+static ssize_t tlmm_before_sleep_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ char* p = buf;
+#if 1 //TBD
+ p += tlmm_before_sleep_dump_info(buf);
+#endif
+ return p - buf;
+}
+
+static ssize_t tlmm_before_sleep_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ printk(KERN_ERR "%s: no support.\n", __func__);
+ return n;
+}
+#endif
+extern int vreg_dump_info(char* buf);
+static ssize_t vreg_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ char* p = buf;
+ p += vreg_dump_info(buf);
+ return p - buf;
+}
+
+//extern void vreg_config(struct vreg *vreg, unsigned on, unsigned mv);
+#if 0
+extern void regulator_config(struct regulator *reg, unsigned on, unsigned mv);
+#endif
+static ssize_t vreg_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ printk(KERN_ERR "%s: no support.\n", __func__);
+ return n;
+}
+
+extern int vreg_before_sleep_dump_info(char* buf);
+static ssize_t vreg_before_sleep_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ char* p = buf;
+ p += vreg_before_sleep_dump_info(buf);
+ return p - buf;
+}
+
+static ssize_t vreg_before_sleep_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ printk(KERN_ERR "%s: no support.\n", __func__);
+ return n;
+}
+
+static ssize_t clk_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+#if 0
+ extern int clk_dump_info(char* buf);
+#endif //0
+ char *s = buf;
+
+ // show all enabled clocks
+#if 0
+ //s += sprintf(s, "\nEnabled Clocks:\n");
+ s += clk_dump_info(s);
+#else
+ //Use interface /sys/kernel/debug/clk/enabled_clocks provided by krait instead
+ s += sprintf(s, "cat /sys/kernel/debug/clk/enabled_clocks to show Enabled Clocks\n");
+#endif //0
+
+ return (s - buf);
+}
+
+static ssize_t clk_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ printk(KERN_ERR "%s: no support.\n", __func__);
+
+ return -EPERM;
+}
+/* chenyb1 add thermal config for benchmark 20150612 begin*/
+unsigned int thermal_bm_flag = 0;
+static ssize_t thermal_bm_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ printk(KERN_ERR "%s,thermal_bm_flag=%d\n", __func__, thermal_bm_flag);
+
+ return snprintf(buf, 10, "%d\n", thermal_bm_flag);
+}
+
+static ssize_t thermal_bm_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ const char *s = buf;
+
+ thermal_bm_flag = s[0] - '0';
+ sysfs_notify(sysfs_private_kobj, NULL, "thermal_bm");
+ printk(KERN_ERR "%s,thermal_bm_flag=%d\n", __func__, thermal_bm_flag);
+
+ return n;
+}
+/* chenyb1 add thermal config for benchmark 20150612 begin*/
+
+extern unsigned long acpu_clk_get_rate(int cpu);
+extern int wakelock_dump_info(char* buf);
+static ssize_t pm_status_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ char *s = buf;
+ unsigned long rate; // khz
+ int cpu;
+
+ // show CPU clocks
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
+ s += sprintf(s, "APPS[%d]:", cpu);
+ if (cpu_online(cpu)) {
+#if 0
+ //acpuclk_get_rate doesn't work because acpuclk_data is no longer available in krait
+ rate = acpuclk_get_rate(cpu); // khz
+ s += sprintf(s, "(%3lu MHz); \n", rate / 1000);
+#else
+ //Call acpu_clk_get_rate added in clock-krait-8974.c
+ rate = acpu_clk_get_rate(cpu); // hz
+ s += sprintf(s, "(%3lu MHz); \n", rate / 1000000);
+#endif
+ } else {
+ s += sprintf(s, "sleep; \n");
+ }
+ }
+
+ s += wakelock_dump_info(s);
+
+ return (s - buf);
+}
+
+static ssize_t pm_status_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ printk(KERN_ERR "%s: no support yet.\n", __func__);
+
+ return -EPERM;
+}
+
+static unsigned pm_wakeup_fetched = true;
+static ssize_t pm_wakeup_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ char *s = buf;
+
+ if (!pm_wakeup_fetched) {
+ pm_wakeup_fetched = true;
+ s += sprintf(s, "true");
+ } else
+ s += sprintf(s, "false");
+
+ return (s - buf);
+}
+
+static ssize_t pm_wakeup_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ printk(KERN_ERR "%s: no support yet.\n", __func__);
+
+ return -EPERM;
+}
+
+// create a sys interface for power monitor
+#define PM_MONITOR_BUF_LEN 128
+static char pm_monitor_buf[PM_MONITOR_BUF_LEN] = {0};
+static ssize_t pm_monitor_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PM_MONITOR_BUF_LEN, "%s", pm_monitor_buf);
+}
+
+static ssize_t pm_monitor_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ if (n>(PM_MONITOR_BUF_LEN-1) || n<=0)
+ {
+ printk(KERN_ERR "%s: %d\n", __func__, (int)n);
+ return -EPERM;
+ }
+
+ snprintf(pm_monitor_buf, PM_MONITOR_BUF_LEN, "%s", buf);
+ pm_monitor_buf[n] = '\0';
+ printk(KERN_ERR "%s: %s,%d\n", __func__, pm_monitor_buf, (int)n);
+
+ return n;
+}
+
+#ifdef CONFIG_LENOVO_PM_LOG_TLMM//TBD
+private_attr(tlmm_sim);
+private_attr(tlmm_num);
+private_attr(tlmm);
+private_attr(tlmm_before_sleep_table);
+private_attr(tlmm_before_sleep);
+#endif
+private_attr(vreg_before_sleep);
+private_attr(vreg);
+private_attr(clk);
+private_attr(thermal_bm);
+private_attr(pm_status);
+private_attr(pm_wakeup);
+private_attr(pm_monitor);
+
+static struct attribute *g_private_attr[] = {
+#ifdef CONFIG_LENOVO_PM_LOG_TLMM//TBD
+ &tlmm_sim_attr.attr,
+ &tlmm_num_attr.attr,
+ &tlmm_attr.attr,
+ &tlmm_before_sleep_table_attr.attr,
+ &tlmm_before_sleep_attr.attr,
+#endif
+ &vreg_attr.attr,
+ &vreg_before_sleep_attr.attr,
+ &clk_attr.attr,
+ &thermal_bm_attr.attr,
+ &pm_status_attr.attr,
+ &pm_wakeup_attr.attr,
+ &pm_monitor_attr.attr,
+ NULL,
+};
+
+static struct attribute_group private_attr_group = {
+ .attrs = g_private_attr,
+};
+
+#define SLEEP_LOG
+#ifdef SLEEP_LOG
+#define WRITE_SLEEP_LOG
+#define MAX_WAKEUP_IRQ 8
+
+enum {
+ DEBUG_SLEEP_LOG = 1U << 0,
+ DEBUG_WRITE_LOG = 1U << 1,
+ DEBUG_WAKEUP_IRQ = 1U << 2,
+ DEBUG_RPM_SPM_LOG = 1U << 3,
+ DEBUG_RPM_CXO_LOG = 1U << 4,
+ DEBUG_ADSP_CXO_LOG = 1U << 5,
+ DEBUG_MODEM_CXO_LOG = 1U << 6,
+ DEBUG_WCNSS_CXO_LOG = 1U << 7,
+};
+static int debug_mask;// = DEBUG_WRITE_LOG;
+module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+struct sleep_log_t {
+ char time[18];
+ long timesec;
+ unsigned int log;
+ uint32_t maoints[2];
+ int wakeup_irq[MAX_WAKEUP_IRQ];
+ int wakeup_gpio;
+//31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 09 08 07 06 05 04 03 02 01 00
+//bit1-0=00 :try to sleep; bit 1-0 = 01 : leave from sleep ;bit1-0=10:fail to sleep
+//bit31-bit24 : return value
+};
+
+struct rpm_smem_state_t {
+ uint32_t wakeup_ints[2];
+};
+struct rpm_smem_state_t rpm_smem_state_data;
+
+#define TRY_TO_SLEEP (0)
+#define LEAVE_FORM_SLEEP (1)
+#define FAIL_TO_SLEEP (2)
+
+#define SLEEP_LOG_LENGTH 80
+
+struct sleep_log_t sleep_log_array[SLEEP_LOG_LENGTH];
+int sleep_log_pointer = 0;
+int sleep_log_count = 0;
+int enter_times = 0;
+
+static int irq_wakeup_saved = MAX_WAKEUP_IRQ;
+static int irq_wakeup_irq[MAX_WAKEUP_IRQ];
+static int irq_wakeup_gpio;
+
+char sleep_log_name[60];
+struct file *sleep_log_file = NULL;
+
+#ifdef WRITE_SLEEP_LOG
+static int sleep_log_write(void)
+{
+ char buf[256];
+ char *p, *p0;
+ int i, j, pos;
+ mm_segment_t old_fs;
+ p = buf;
+ p0 = p;
+
+ if (sleep_log_file == NULL)
+ sleep_log_file = filp_open(sleep_log_name, O_RDWR | O_APPEND | O_CREAT,
+ 0644);
+ if (IS_ERR(sleep_log_file)) {
+ printk("error occured while opening file %s, exiting...\n",
+ sleep_log_name);
+ return 0;
+ }
+
+ if (sleep_log_count > 1) {
+ for (i = 0; i < 2; i++) {
+ if (sleep_log_pointer == 0)
+ pos = SLEEP_LOG_LENGTH - 2 + i;
+ else
+ pos = sleep_log_pointer - 2 + i;
+ switch (sleep_log_array[pos].log & 0xF) {
+ case TRY_TO_SLEEP:
+ p += sprintf(p, ">[%ld]%s\n", sleep_log_array[pos].timesec,
+ sleep_log_array[pos].time);
+ break;
+ case LEAVE_FORM_SLEEP:
+ p += sprintf(p, "<[%ld]%s(0x%x,0x%x,",
+ sleep_log_array[pos].timesec,
+ sleep_log_array[pos].time,
+ sleep_log_array[pos].maoints[0],
+ sleep_log_array[pos].maoints[1]);
+ for (j = 0; j < MAX_WAKEUP_IRQ && sleep_log_array[pos].wakeup_irq[j]; j++)
+ p += sprintf(p, " %d", sleep_log_array[pos].wakeup_irq[j]);
+
+ if (sleep_log_array[pos].wakeup_gpio)
+ p += sprintf(p, ", gpio %d", sleep_log_array[pos].wakeup_gpio);
+
+ p += sprintf(p, ")\n");
+ break;
+ case FAIL_TO_SLEEP:
+ p += sprintf(p, "^[%ld]%s(%d)\n", sleep_log_array[pos].timesec,
+ sleep_log_array[pos].time,
+ (char) (sleep_log_array[pos].log >> 24));
+ break;
+ }
+ }
+ }
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ vfs_write(sleep_log_file, p0, p - p0,
+ &sleep_log_file->f_pos);
+ set_fs(old_fs);
+
+ if (sleep_log_file != NULL) {
+ filp_close(sleep_log_file, NULL);
+ sleep_log_file = NULL;
+ }
+ return 0;
+}
+#else //WRITE_SLEEP_LOG
+static int sleep_log_write(void)
+{
+ return 0;
+}
+#endif //WRITE_SLEEP_LOG
+
+static int save_irq_wakeup_internal(int irq)
+{
+ int i;
+ int ret;
+
+ ret = 0;
+ if (irq_wakeup_saved < MAX_WAKEUP_IRQ) {
+ for (i = 0; i < irq_wakeup_saved; i++) {
+ if (irq == irq_wakeup_irq[i])
+ break;
+ }
+ if (i == irq_wakeup_saved)
+ ret = irq_wakeup_irq[irq_wakeup_saved++] = irq;
+ }
+ return ret;
+}
+
+int save_irq_wakeup_gpio(int irq, int gpio)
+{
+ struct irq_desc *desc;
+ int ret;
+
+ ret = 0;
+ if (debug_mask & DEBUG_WAKEUP_IRQ) {
+ desc = irq_to_desc(irq);
+ if (desc != NULL) {
+ //if (irqd_is_wakeup_set(&desc->irq_data)) {
+ ret = save_irq_wakeup_internal(irq);
+ if (ret) {
+ if (gpio != 0 && irq_wakeup_gpio == 0) {
+ irq_wakeup_gpio = gpio;
+ irq_wakeup_saved = MAX_WAKEUP_IRQ;
+ }
+#ifdef CONFIG_KALLSYMS
+ printk("%s(), irq=%d, gpio=%d, %s, handler=(%pS)\n", __func__, irq, gpio,
+ desc->action && desc->action->name ? desc->action->name : "",
+ desc->action ? (void *)desc->action->handler : 0);
+#else
+ printk("%s(), irq=%d, gpio=%d, %s, handler=0x%08x\n", __func__, irq, gpio,
+ desc->action && desc->action->name ? desc->action->name : "",
+ desc->action ? (unsigned int)desc->action->handler : 0);
+#endif
+ }
+// }//if (irqd_is_wakeup_set(&desc->irq_data)) {
+ }
+ }
+
+ return ret;
+}
+
+static void clear_irq_wakeup_saved(void)
+{
+ if (debug_mask & DEBUG_WAKEUP_IRQ) {
+ memset(irq_wakeup_irq, 0, sizeof(irq_wakeup_irq));
+ irq_wakeup_gpio = 0;
+ irq_wakeup_saved = 0;
+ }
+}
+
+static void set_irq_wakeup_saved(void)
+{
+ if (debug_mask & DEBUG_WAKEUP_IRQ)
+ irq_wakeup_saved = MAX_WAKEUP_IRQ;
+}
+
+void log_suspend_enter(void)
+{
+ extern void smem_set_reserved(int index, int data);
+ struct timespec ts_;
+ struct rtc_time tm_;
+
+ //Turn on/off the share memory flag to inform RPM to record spm logs
+ //smem_set_reserved(6, debug_mask & DEBUG_WAKEUP_IRQ ? 1 : 0);
+// smem_set_reserved(6, debug_mask);
+
+ if (debug_mask & DEBUG_SLEEP_LOG) {
+ printk("%s(), APPS try to ENTER sleep mode>>>\n", __func__);
+
+ getnstimeofday(&ts_);
+ rtc_time_to_tm(ts_.tv_sec + 8 * 3600, &tm_);
+
+ sprintf(sleep_log_array[sleep_log_pointer % SLEEP_LOG_LENGTH].time,
+ "%d-%02d-%02d %02d:%02d:%02d", tm_.tm_year + 1900, tm_.tm_mon + 1,
+ tm_.tm_mday, tm_.tm_hour, tm_.tm_min, tm_.tm_sec);
+
+ if (strlen(sleep_log_name) < 1) {
+ sprintf(sleep_log_name,
+ "/data/local/log/aplog/sleeplog%d%02d%02d_%02d%02d%02d.txt",
+ tm_.tm_year + 1900, tm_.tm_mon + 1, tm_.tm_mday, tm_.tm_hour,
+ tm_.tm_min, tm_.tm_sec);
+ printk("%s(), sleep_log_name = %s \n", __func__, sleep_log_name);
+ }
+
+ sleep_log_array[sleep_log_pointer % SLEEP_LOG_LENGTH].timesec = ts_.tv_sec;
+ sleep_log_array[sleep_log_pointer % SLEEP_LOG_LENGTH].log = TRY_TO_SLEEP;
+ sleep_log_pointer++;
+ sleep_log_count++;
+ if (sleep_log_pointer == SLEEP_LOG_LENGTH)
+ sleep_log_pointer = 0;
+ }
+
+ clear_irq_wakeup_saved();
+ pm_wakeup_fetched = false;
+}
+
+void log_suspend_exit(int error)
+{
+#if 0
+ extern int smem_get_reserved(int index);
+#else
+ extern void msm_rpmstats_get_reverved(u32 reserved[][4]);
+ u32 reserved[4][4];
+#endif
+ struct timespec ts_;
+ struct rtc_time tm_;
+ uint32_t smem_value;
+ int i;
+
+ if (debug_mask & DEBUG_SLEEP_LOG) {
+ getnstimeofday(&ts_);
+ rtc_time_to_tm(ts_.tv_sec + 8 * 3600, &tm_);
+ sprintf(sleep_log_array[sleep_log_pointer % SLEEP_LOG_LENGTH].time,
+ "%d-%02d-%02d %02d:%02d:%02d", tm_.tm_year + 1900, tm_.tm_mon + 1,
+ tm_.tm_mday, tm_.tm_hour, tm_.tm_min, tm_.tm_sec);
+
+ sleep_log_array[sleep_log_pointer % SLEEP_LOG_LENGTH].timesec = ts_.tv_sec;
+
+ if (error == 0) {
+#if 0
+ rpm_smem_state_data.wakeup_ints[0] = smem_get_reserved(0);
+ rpm_smem_state_data.wakeup_ints[1] = smem_get_reserved(1);
+#elif 0
+ if (debug_mask & DEBUG_RPM_SPM_LOG) {
+ for(i = 0; i <= 5; i++) {
+ smem_value = ((uint32_t)smem_get_reserved(i)) & 0xffff;
+ if(smem_value > 0)
+ printk("rpm: %s[%d] = %d\n", "spm_active" , i, smem_value);
+ }
+ }
+ if (debug_mask & DEBUG_RPM_CXO_LOG) {
+ for(i = 0; i <= 5; i++) {
+ smem_value = ((uint32_t)smem_get_reserved(i)) >> 16;
+ if(smem_value > 0)
+ printk("rpm: %s[%d] = %d\n", "cxo_voter" , i, smem_value);
+ }
+ }
+#else
+ printk("%s, debug_mask=%x\n", __func__, debug_mask);
+ if (debug_mask & (DEBUG_RPM_SPM_LOG | DEBUG_RPM_CXO_LOG)) {
+ memset(reserved, 0, sizeof(reserved));
+ msm_rpmstats_get_reverved(reserved);
+#if 1
+ for(i = 0; i < 3; i++)
+ printk("reserved[0][%d]=0x%08x\n", i, reserved[0][i]);
+ for(i = 0; i < 3; i++)
+ printk("reserved[1][%d]=0x%08x\n", i, reserved[1][i]);
+#endif
+ }
+
+ if (debug_mask & DEBUG_RPM_SPM_LOG) {
+ for(i = 0; i <= 5; i++) {
+ smem_value = (reserved[1][i/2] >> (16 * (i % 2))) & 0xffff;
+ if(smem_value > 0)
+ printk("rpm: %s[%d] = %d\n", "spm_active" , i, smem_value);
+ }
+ }
+ if (debug_mask & DEBUG_RPM_CXO_LOG) {
+ for(i = 0; i <= 5; i++) {
+ smem_value = (reserved[0][i/2] >> (16 * (i % 2))) & 0xffff;
+ if(smem_value > 0)
+ printk("rpm: %s[%d] = %d\n", "cxo_voter" , i, smem_value);
+ }
+ }
+#endif
+
+ printk("%s(), APPS Exit from sleep<<<: wakeup ints=0x%x, 0x%x\n", __func__ ,
+ rpm_smem_state_data.wakeup_ints[0],
+ rpm_smem_state_data.wakeup_ints[1]);
+
+ sleep_log_array[sleep_log_pointer % SLEEP_LOG_LENGTH].log =
+ LEAVE_FORM_SLEEP;
+ sleep_log_array[sleep_log_pointer % SLEEP_LOG_LENGTH].maoints[0] =
+ rpm_smem_state_data.wakeup_ints[0];
+ sleep_log_array[sleep_log_pointer % SLEEP_LOG_LENGTH].maoints[1] =
+ rpm_smem_state_data.wakeup_ints[1];
+ for (i = 0; i < (irq_wakeup_gpio == 0 ? irq_wakeup_saved : 1); i++)
+ sleep_log_array[sleep_log_pointer % SLEEP_LOG_LENGTH].wakeup_irq[i] =
+ irq_wakeup_irq[i];
+ for (; i < MAX_WAKEUP_IRQ; i++)
+ sleep_log_array[sleep_log_pointer % SLEEP_LOG_LENGTH].wakeup_irq[i] = 0;
+ sleep_log_array[sleep_log_pointer % SLEEP_LOG_LENGTH].wakeup_gpio =
+ irq_wakeup_gpio;
+ } else {
+ printk("%s(), APPS FAIL to enter sleep^^^\n", __func__);
+
+ sleep_log_array[sleep_log_pointer % SLEEP_LOG_LENGTH].log =
+ FAIL_TO_SLEEP | (error << 24);
+ }
+
+ sleep_log_pointer++;
+ sleep_log_count++;
+
+ if (sleep_log_pointer == SLEEP_LOG_LENGTH)
+ sleep_log_pointer = 0;
+
+ if (debug_mask & DEBUG_WRITE_LOG) {
+ enter_times++;
+ if (enter_times < 5000)
+ sleep_log_write();
+ }
+ }
+
+ set_irq_wakeup_saved();
+}
+#else //SLEEP_LOG
+void log_suspend_enter(void)
+{
+ clear_irq_wakeup_saved();
+ pm_wakeup_fetched = false;
+}
+
+void log_suspend_exit(int error)
+{
+ set_irq_wakeup_saved();
+}
+#endif //SLEEP_LOG
+
+static const struct of_device_id sysfs_private_tlmm_dt_match[] = {
+ { .compatible = "qcom,msmtitanium-pinctrl", },
+ { },
+};
+
+static int sysfs_private_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct device_node *node = pdev->dev.of_node;
+ int size = 0;
+ int ret = 0;
+ struct regulator *vdd_l16;
+ u32 voltage_low, voltage_high;
+ //int i = 0;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "cannot find IO resource\n");
+ return -ENOENT;
+ }
+/*
+* Request same resource could cause ioremap failed and
+* return -EBUSY.Therefore we remove it.
+*/
+/*
+ tlmm_reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(tlmm_reg_base))
+ return PTR_ERR(tlmm_reg_base);
+*/
+ of_get_property(node, "lenovo,tz_gpio", &size);
+ //printk("%s(), %d, size=%d\n", __func__, __LINE__, size);
+ if (size) {
+ tz_pin_num = size / sizeof(u32);
+ tz_config = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ ret = of_property_read_u32_array(node,
+ "lenovo,tz_gpio", tz_config,
+ tz_pin_num);
+/*
+ for(i = 0; i < tz_pin_num; i++)
+ {
+ pr_debug("%s(), %d, tz_config[%d]=%d\n", __func__, __LINE__, i, tz_config[i]);
+ }
+*/
+ }
+ printk("%s(), %d, TLMM_BASE=%lx\n", __func__, __LINE__, (unsigned long int)(void *)tlmm_reg_base);
+
+ // enable l16
+ if (of_find_property(node, "vdd-supply", NULL)) {
+ vdd_l16 = devm_regulator_get(&pdev->dev, "vdd");
+ if (IS_ERR(vdd_l16))
+ return PTR_ERR(vdd_l16);
+ }
+
+ ret = of_property_read_u32(node, "vdd-low-microvolt",
+ &voltage_low);
+ if (ret) {
+ dev_err(&pdev->dev, "no vdd-low-microvolt property set\n");
+ return ret;
+ }
+ ret = of_property_read_u32(node, "vdd-low-microvolt",
+ &voltage_high);
+ if (ret) {
+ dev_err(&pdev->dev, "no vdd-low-microvolt property set\n");
+ return ret;
+ }
+ if (regulator_count_voltages(vdd_l16) <= 0)
+ return 0;
+
+ printk("%s(), %d, voltage_low=%u, voltage_high=%u\n", __func__, __LINE__, voltage_low, voltage_high);
+
+ regulator_set_voltage(vdd_l16, voltage_low, voltage_high);
+
+ ret = regulator_enable(vdd_l16);
+ pr_err("enable l16 regulator rc=%d\n", ret);
+ if (ret) {
+ pr_err("failed to enable l16 regulator rc=%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id msmtitanium_pinctrl_of_match[] = {
+ { .compatible = "qcom,sysfs_private", },
+ { },
+};
+
+static struct platform_driver sysfs_private_drv = {
+ .probe = sysfs_private_probe,
+ .driver = {
+ .name = "sysfs_private",
+ .owner = THIS_MODULE,
+ .of_match_table = msmtitanium_pinctrl_of_match,
+ },
+};
+
+
+MODULE_DEVICE_TABLE(of, msm_tlmm_dt_match);
+
+
+
+static int __init sysfs_private_init(void)
+{
+ int result;
+
+ printk("%s(), %d\n", __func__, __LINE__);
+
+ sysfs_private_kobj = kobject_create_and_add("private", NULL);
+ if (!sysfs_private_kobj)
+ return -ENOMEM;
+
+ result = sysfs_create_group(sysfs_private_kobj, &private_attr_group);
+ printk("%s(), %d, result=%d\n", __func__, __LINE__, result);
+
+#ifdef SLEEP_LOG
+ strcpy (sleep_log_name, "");
+ sleep_log_pointer = 0;
+ sleep_log_count = 0;
+ enter_times = 0;
+#endif
+
+ platform_driver_register(&sysfs_private_drv);
+ //tlmm_reg_base = ioremap((resource_size_t )0x1000000, (unsigned long)0x300000);
+
+ return 0;//platform_driver_register(&sysfs_private_drv);
+}
+
+static void __exit sysfs_private_exit(void)
+{
+ printk("%s(), %d\n", __func__, __LINE__);
+ sysfs_remove_group(sysfs_private_kobj, &private_attr_group);
+
+ kobject_put(sysfs_private_kobj);
+
+ platform_driver_unregister(&sysfs_private_drv);
+}
+
+module_init(sysfs_private_init);
+module_exit(sysfs_private_exit);
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 0f4c0848..239e1a24 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -55,6 +55,10 @@
#include "console_cmdline.h"
#include "braille.h"
+//lenovo sw, yexh1, add lastkmsg feature
+#include <asm/le_rkm.h>
+//lenovo sw, yexh1, end
+
#ifdef CONFIG_EARLY_PRINTK_DIRECT
extern void printascii(char *);
#endif
@@ -912,8 +916,16 @@ void __init setup_log_buf(int early)
if (!early && !new_log_buf_len)
log_buf_add_cpu();
+//lenovo sw, yexh1, add lastkmsg feature
if (!new_log_buf_len)
+ {
+#ifdef CONFIG_LENOVO_DEBUG_RKM
+ rkm_init_log_buf_header(__log_buf,log_buf_len);
+#endif
return;
+ }
+//lenovo sw, yexh1, end
+
if (early) {
new_log_buf =
@@ -1272,6 +1284,62 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
return len;
}
+//lenovo sw, yexh1, add lastkmsg feature
+#ifdef CONFIG_LENOVO_DEBUG_RKM
+int kernel_log_buf_text_parser(char *kernel_log_buf, char *text_buf, int size)
+{
+ char *parser_text_buf;
+ char *buf = text_buf;
+ int total_size = size;
+ struct printk_log *msg;
+ int len = 0;
+ int log_idx = 0;
+ enum log_flags log_prev = LOG_NOCONS;
+
+ if((kernel_log_buf == NULL) || (text_buf == NULL))
+ {
+ return -EINVAL;
+ }
+
+ parser_text_buf = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
+ if (!parser_text_buf)
+ return -ENOMEM;
+
+ while (size > 0) {
+ size_t n;
+
+ msg = (struct printk_log *)(kernel_log_buf + log_idx);
+ /*
+ * A length == 0 record is the end of buffer marker. Wrap around and
+ * read the message at the start of the buffer.
+ */
+ if (!msg->len)
+ break;
+
+ n = msg_print_text(msg, log_prev, false, parser_text_buf,
+ LOG_LINE_MAX + PREFIX_MAX);
+
+ if ((len+n) >= total_size)
+ break;
+
+ log_prev = msg->flags;
+
+ log_idx = log_idx + msg->len;
+
+ memcpy(buf, parser_text_buf, n);
+
+ len += n;
+ size -= n;
+ buf += n;
+ }
+
+ kfree(parser_text_buf);
+ return len;
+}
+#endif
+//lenovo sw, yexh1, end
+
+
int do_syslog(int type, char __user *buf, int len, bool from_file)
{
bool clear = false;
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index 45679c74..15cb264f 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -25,3 +25,4 @@ obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
obj-$(CONFIG_SCHEDSTATS) += stats.o
obj-$(CONFIG_SCHED_DEBUG) += debug.o
obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o
+obj-$(CONFIG_SCHED_CORE_CTL) += core_ctl.o
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 86bac834..96d3e274 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6158,7 +6158,8 @@ static noinline void __schedule_bug(struct task_struct *prev)
static inline void schedule_debug(struct task_struct *prev)
{
#ifdef CONFIG_SCHED_STACK_END_CHECK
- BUG_ON(unlikely(task_stack_end_corrupted(prev)));
+ if (unlikely(task_stack_end_corrupted(prev)))
+ panic("corrupted stack end detected inside scheduler\n");
#endif
/*
* Test if we are atomic. Since do_exit() needs to call into
diff --git a/kernel/sched/core_ctl.c b/kernel/sched/core_ctl.c
new file mode 100644
index 00000000..0897b8d7
--- /dev/null
+++ b/kernel/sched/core_ctl.c
@@ -0,0 +1,1115 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/cpufreq.h>
+#include <linux/timer.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/sched/rt.h>
+#include <linux/mutex.h>
+
+#include <trace/events/sched.h>
+
+#define MAX_CPUS_PER_GROUP 4
+
+struct cpu_data {
+ /* Per CPU data. */
+ bool inited;
+ bool online;
+ bool rejected;
+ bool is_busy;
+ bool not_preferred;
+ unsigned int busy;
+ unsigned int cpu;
+ struct list_head sib;
+ unsigned int first_cpu;
+ struct list_head pending_sib;
+
+ /* Per cluster data set only on first CPU */
+ unsigned int min_cpus;
+ unsigned int max_cpus;
+ unsigned int offline_delay_ms;
+ unsigned int busy_up_thres[MAX_CPUS_PER_GROUP];
+ unsigned int busy_down_thres[MAX_CPUS_PER_GROUP];
+ unsigned int online_cpus;
+ unsigned int avail_cpus;
+ unsigned int num_cpus;
+ unsigned int need_cpus;
+ unsigned int task_thres;
+ s64 need_ts;
+ struct list_head lru;
+ bool pending;
+ spinlock_t pending_lock;
+ bool is_big_cluster;
+ int nrrun;
+ bool nrrun_changed;
+ struct timer_list timer;
+ struct task_struct *hotplug_thread;
+ struct kobject kobj;
+ struct list_head pending_lru;
+ bool disabled;
+};
+
+static DEFINE_PER_CPU(struct cpu_data, cpu_state);
+static DEFINE_SPINLOCK(state_lock);
+static DEFINE_SPINLOCK(pending_lru_lock);
+static DEFINE_MUTEX(lru_lock);
+
+static void apply_need(struct cpu_data *f);
+static void wake_up_hotplug_thread(struct cpu_data *state);
+static void add_to_pending_lru(struct cpu_data *state);
+static void update_lru(struct cpu_data *state);
+
+/* ========================= sysfs interface =========================== */
+
+static ssize_t store_min_cpus(struct cpu_data *state,
+ const char *buf, size_t count)
+{
+ unsigned int val;
+
+ if (sscanf(buf, "%u\n", &val) != 1)
+ return -EINVAL;
+
+ state->min_cpus = min(val, state->max_cpus);
+ wake_up_hotplug_thread(state);
+
+ return count;
+}
+
+static ssize_t show_min_cpus(struct cpu_data *state, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", state->min_cpus);
+}
+
+static ssize_t store_max_cpus(struct cpu_data *state,
+ const char *buf, size_t count)
+{
+ unsigned int val;
+
+ if (sscanf(buf, "%u\n", &val) != 1)
+ return -EINVAL;
+
+ val = min(val, state->num_cpus);
+ state->max_cpus = val;
+ state->min_cpus = min(state->min_cpus, state->max_cpus);
+ wake_up_hotplug_thread(state);
+
+ return count;
+}
+
+static ssize_t show_max_cpus(struct cpu_data *state, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", state->max_cpus);
+}
+
+static ssize_t store_offline_delay_ms(struct cpu_data *state,
+ const char *buf, size_t count)
+{
+ unsigned int val;
+
+ if (sscanf(buf, "%u\n", &val) != 1)
+ return -EINVAL;
+
+ state->offline_delay_ms = val;
+ apply_need(state);
+
+ return count;
+}
+
+static ssize_t show_task_thres(struct cpu_data *state, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", state->task_thres);
+}
+
+static ssize_t store_task_thres(struct cpu_data *state,
+ const char *buf, size_t count)
+{
+ unsigned int val;
+
+ if (sscanf(buf, "%u\n", &val) != 1)
+ return -EINVAL;
+
+ if (val < state->num_cpus)
+ return -EINVAL;
+
+ state->task_thres = val;
+ apply_need(state);
+
+ return count;
+}
+
+static ssize_t show_offline_delay_ms(struct cpu_data *state, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", state->offline_delay_ms);
+}
+
+static ssize_t store_busy_up_thres(struct cpu_data *state,
+ const char *buf, size_t count)
+{
+ unsigned int val[MAX_CPUS_PER_GROUP];
+ int ret, i;
+
+ ret = sscanf(buf, "%u %u %u %u\n", &val[0], &val[1], &val[2], &val[3]);
+ if (ret != 1 && ret != state->num_cpus)
+ return -EINVAL;
+
+ if (ret == 1) {
+ for (i = 0; i < state->num_cpus; i++)
+ state->busy_up_thres[i] = val[0];
+ } else {
+ for (i = 0; i < state->num_cpus; i++)
+ state->busy_up_thres[i] = val[i];
+ }
+ apply_need(state);
+ return count;
+}
+
+static ssize_t show_busy_up_thres(struct cpu_data *state, char *buf)
+{
+ int i, count = 0;
+
+ for (i = 0; i < state->num_cpus; i++)
+ count += snprintf(buf + count, PAGE_SIZE - count, "%u ",
+ state->busy_up_thres[i]);
+ count += snprintf(buf + count, PAGE_SIZE - count, "\n");
+ return count;
+}
+
+static ssize_t store_busy_down_thres(struct cpu_data *state,
+ const char *buf, size_t count)
+{
+ unsigned int val[MAX_CPUS_PER_GROUP];
+ int ret, i;
+
+ ret = sscanf(buf, "%u %u %u %u\n", &val[0], &val[1], &val[2], &val[3]);
+ if (ret != 1 && ret != state->num_cpus)
+ return -EINVAL;
+
+ if (ret == 1) {
+ for (i = 0; i < state->num_cpus; i++)
+ state->busy_down_thres[i] = val[0];
+ } else {
+ for (i = 0; i < state->num_cpus; i++)
+ state->busy_down_thres[i] = val[i];
+ }
+ apply_need(state);
+ return count;
+}
+
+static ssize_t show_busy_down_thres(struct cpu_data *state, char *buf)
+{
+ int i, count = 0;
+
+ for (i = 0; i < state->num_cpus; i++)
+ count += snprintf(buf + count, PAGE_SIZE - count, "%u ",
+ state->busy_down_thres[i]);
+ count += snprintf(buf + count, PAGE_SIZE - count, "\n");
+ return count;
+}
+
+static ssize_t store_is_big_cluster(struct cpu_data *state,
+ const char *buf, size_t count)
+{
+ unsigned int val;
+
+ if (sscanf(buf, "%u\n", &val) != 1)
+ return -EINVAL;
+
+ state->is_big_cluster = val ? 1 : 0;
+ return count;
+}
+
+static ssize_t show_is_big_cluster(struct cpu_data *state, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", state->is_big_cluster);
+}
+
+static ssize_t show_cpus(struct cpu_data *state, char *buf)
+{
+ struct cpu_data *c;
+ ssize_t count = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&state_lock, flags);
+ list_for_each_entry(c, &state->lru, sib) {
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "CPU%u (%s)\n", c->cpu,
+ c->online ? "Online" : "Offline");
+ }
+ spin_unlock_irqrestore(&state_lock, flags);
+ return count;
+}
+
+static ssize_t show_need_cpus(struct cpu_data *state, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", state->need_cpus);
+}
+
+static ssize_t show_online_cpus(struct cpu_data *state, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", state->online_cpus);
+}
+
+static ssize_t show_global_state(struct cpu_data *state, char *buf)
+{
+ struct cpu_data *c;
+ ssize_t count = 0;
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu) {
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "CPU%u\n", cpu);
+ c = &per_cpu(cpu_state, cpu);
+ if (!c->inited)
+ continue;
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "\tCPU: %u\n", c->cpu);
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "\tOnline: %u\n", c->online);
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "\tRejected: %u\n", c->rejected);
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "\tFirst CPU: %u\n", c->first_cpu);
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "\tBusy%%: %u\n", c->busy);
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "\tIs busy: %u\n", c->is_busy);
+ if (c->cpu != c->first_cpu)
+ continue;
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "\tNr running: %u\n", c->nrrun);
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "\tAvail CPUs: %u\n", c->avail_cpus);
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "\tNeed CPUs: %u\n", c->need_cpus);
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "\tStatus: %s\n",
+ c->disabled ? "disabled" : "enabled");
+ }
+
+ return count;
+}
+
+static ssize_t store_not_preferred(struct cpu_data *state,
+ const char *buf, size_t count)
+{
+ struct cpu_data *c;
+ unsigned int i, first_cpu;
+ unsigned int val[MAX_CPUS_PER_GROUP];
+ int ret;
+
+ ret = sscanf(buf, "%u %u %u %u\n", &val[0], &val[1], &val[2], &val[3]);
+ if (ret != 1 && ret != state->num_cpus)
+ return -EINVAL;
+
+ first_cpu = state->first_cpu;
+
+ for (i = 0; i < state->num_cpus; i++) {
+ c = &per_cpu(cpu_state, first_cpu);
+ c->not_preferred = val[i];
+ first_cpu++;
+ }
+
+ return count;
+}
+
+static ssize_t show_not_preferred(struct cpu_data *state, char *buf)
+{
+ struct cpu_data *c;
+ ssize_t count = 0;
+ unsigned int i, first_cpu;
+
+ first_cpu = state->first_cpu;
+
+ for (i = 0; i < state->num_cpus; i++) {
+ c = &per_cpu(cpu_state, first_cpu);
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "\tCPU:%d %u\n", first_cpu, c->not_preferred);
+ first_cpu++;
+ }
+
+ return count;
+}
+
+static ssize_t store_disable(struct cpu_data *state,
+ const char *buf, size_t count)
+{
+ unsigned int val;
+
+ if (sscanf(buf, "%u\n", &val) != 1)
+ return -EINVAL;
+
+ val = !!val;
+
+ if (state->disabled == val)
+ return count;
+
+ state->disabled = val;
+
+ if (!state->disabled)
+ wake_up_hotplug_thread(state);
+
+
+ return count;
+}
+
+static ssize_t show_disable(struct cpu_data *state, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", state->disabled);
+}
+
+struct core_ctl_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct cpu_data *, char *);
+ ssize_t (*store)(struct cpu_data *, const char *, size_t count);
+};
+
+#define core_ctl_attr_ro(_name) \
+static struct core_ctl_attr _name = \
+__ATTR(_name, 0444, show_##_name, NULL)
+
+#define core_ctl_attr_rw(_name) \
+static struct core_ctl_attr _name = \
+__ATTR(_name, 0644, show_##_name, store_##_name)
+
+core_ctl_attr_rw(min_cpus);
+core_ctl_attr_rw(max_cpus);
+core_ctl_attr_rw(offline_delay_ms);
+core_ctl_attr_rw(busy_up_thres);
+core_ctl_attr_rw(busy_down_thres);
+core_ctl_attr_rw(task_thres);
+core_ctl_attr_rw(is_big_cluster);
+core_ctl_attr_ro(cpus);
+core_ctl_attr_ro(need_cpus);
+core_ctl_attr_ro(online_cpus);
+core_ctl_attr_ro(global_state);
+core_ctl_attr_rw(not_preferred);
+core_ctl_attr_rw(disable);
+
+static struct attribute *default_attrs[] = {
+ &min_cpus.attr,
+ &max_cpus.attr,
+ &offline_delay_ms.attr,
+ &busy_up_thres.attr,
+ &busy_down_thres.attr,
+ &task_thres.attr,
+ &is_big_cluster.attr,
+ &cpus.attr,
+ &need_cpus.attr,
+ &online_cpus.attr,
+ &global_state.attr,
+ &not_preferred.attr,
+ &disable.attr,
+ NULL
+};
+
+#define to_cpu_data(k) container_of(k, struct cpu_data, kobj)
+#define to_attr(a) container_of(a, struct core_ctl_attr, attr)
+static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ struct cpu_data *data = to_cpu_data(kobj);
+ struct core_ctl_attr *cattr = to_attr(attr);
+ ssize_t ret = -EIO;
+
+ if (cattr->show)
+ ret = cattr->show(data, buf);
+
+ return ret;
+}
+
+static ssize_t store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cpu_data *data = to_cpu_data(kobj);
+ struct core_ctl_attr *cattr = to_attr(attr);
+ ssize_t ret = -EIO;
+
+ if (cattr->store)
+ ret = cattr->store(data, buf, count);
+
+ return ret;
+}
+
+static const struct sysfs_ops sysfs_ops = {
+ .show = show,
+ .store = store,
+};
+
+static struct kobj_type ktype_core_ctl = {
+ .sysfs_ops = &sysfs_ops,
+ .default_attrs = default_attrs,
+};
+
+/* ==================== runqueue based core count =================== */
+
+#define RQ_AVG_TOLERANCE 2
+#define RQ_AVG_DEFAULT_MS 20
+#define NR_RUNNING_TOLERANCE 5
+static unsigned int rq_avg_period_ms = RQ_AVG_DEFAULT_MS;
+
+static s64 rq_avg_timestamp_ms;
+static struct timer_list rq_avg_timer;
+
+static void update_running_avg(bool trigger_update)
+{
+ int cpu;
+ struct cpu_data *pcpu;
+ int avg, iowait_avg, big_avg, old_nrrun;
+ s64 now;
+ unsigned long flags;
+
+ spin_lock_irqsave(&state_lock, flags);
+
+ now = ktime_to_ms(ktime_get());
+ if (now - rq_avg_timestamp_ms < rq_avg_period_ms - RQ_AVG_TOLERANCE) {
+ spin_unlock_irqrestore(&state_lock, flags);
+ return;
+ }
+ rq_avg_timestamp_ms = now;
+ sched_get_nr_running_avg(&avg, &iowait_avg, &big_avg);
+
+ spin_unlock_irqrestore(&state_lock, flags);
+
+ /*
+ * Round up to the next integer if the average nr running tasks
+ * is within NR_RUNNING_TOLERANCE/100 of the next integer.
+ * If normal rounding up is used, it will allow a transient task
+ * to trigger online event. By the time core is onlined, the task
+ * has finished.
+ * Rounding to closest suffers same problem because scheduler
+ * might only provide running stats per jiffy, and a transient
+ * task could skew the number for one jiffy. If core control
+ * samples every 2 jiffies, it will observe 0.5 additional running
+ * average which rounds up to 1 task.
+ */
+ avg = (avg + NR_RUNNING_TOLERANCE) / 100;
+ big_avg = (big_avg + NR_RUNNING_TOLERANCE) / 100;
+
+ for_each_possible_cpu(cpu) {
+ pcpu = &per_cpu(cpu_state, cpu);
+ if (!pcpu->inited || pcpu->first_cpu != cpu)
+ continue;
+ old_nrrun = pcpu->nrrun;
+ /*
+ * Big cluster only need to take care of big tasks, but if
+ * there are not enough big cores, big tasks need to be run
+ * on little as well. Thus for little's runqueue stat, it
+ * has to use overall runqueue average, or derive what big
+ * tasks would have to be run on little. The latter approach
+ * is not easy to get given core control reacts much slower
+ * than scheduler, and can't predict scheduler's behavior.
+ */
+ pcpu->nrrun = pcpu->is_big_cluster ? big_avg : avg;
+ if (pcpu->nrrun != old_nrrun) {
+ if (trigger_update)
+ apply_need(pcpu);
+ else
+ pcpu->nrrun_changed = true;
+ }
+ }
+}
+
+/* adjust needed CPUs based on current runqueue information */
+static unsigned int apply_task_need(struct cpu_data *f, unsigned int new_need)
+{
+ /* Online all cores if there are enough tasks */
+ if (f->nrrun >= f->task_thres)
+ return f->num_cpus;
+
+ /* only online more cores if there are tasks to run */
+ if (f->nrrun > new_need)
+ return new_need + 1;
+
+ return new_need;
+}
+
+static u64 round_to_nw_start(void)
+{
+ unsigned long step = msecs_to_jiffies(rq_avg_period_ms);
+ u64 jif = get_jiffies_64();
+
+ do_div(jif, step);
+ return (jif + 1) * step;
+}
+
+static void rq_avg_timer_func(unsigned long not_used)
+{
+ update_running_avg(true);
+ mod_timer(&rq_avg_timer, round_to_nw_start());
+}
+
+/* ======================= load based core count ====================== */
+
+static unsigned int apply_limits(struct cpu_data *f, unsigned int need_cpus)
+{
+ return min(max(f->min_cpus, need_cpus), f->max_cpus);
+}
+
+static bool eval_need(struct cpu_data *f)
+{
+ unsigned long flags;
+ struct cpu_data *c;
+ unsigned int need_cpus = 0, last_need, thres_idx;
+ int ret = 0;
+ bool need_flag = false;
+ s64 now;
+
+ if (unlikely(!f->inited))
+ return 0;
+
+ spin_lock_irqsave(&state_lock, flags);
+ thres_idx = f->online_cpus ? f->online_cpus - 1 : 0;
+ list_for_each_entry(c, &f->lru, sib) {
+ if (c->busy >= f->busy_up_thres[thres_idx])
+ c->is_busy = true;
+ else if (c->busy < f->busy_down_thres[thres_idx])
+ c->is_busy = false;
+ need_cpus += c->is_busy;
+ }
+ need_cpus = apply_task_need(f, need_cpus);
+ need_flag = apply_limits(f, need_cpus) != apply_limits(f, f->need_cpus);
+ last_need = f->need_cpus;
+
+ now = ktime_to_ms(ktime_get());
+
+ if (need_cpus == last_need) {
+ f->need_ts = now;
+ spin_unlock_irqrestore(&state_lock, flags);
+ return 0;
+ }
+
+ if (need_cpus > last_need) {
+ ret = 1;
+ } else if (need_cpus < last_need) {
+ s64 elapsed = now - f->need_ts;
+
+ if (elapsed >= f->offline_delay_ms) {
+ ret = 1;
+ } else {
+ mod_timer(&f->timer, jiffies +
+ msecs_to_jiffies(f->offline_delay_ms));
+ }
+ }
+
+ if (ret) {
+ f->need_ts = now;
+ f->need_cpus = need_cpus;
+ }
+
+ trace_core_ctl_eval_need(f->cpu, last_need, need_cpus,
+ ret && need_flag);
+ spin_unlock_irqrestore(&state_lock, flags);
+
+ return ret && need_flag;
+}
+
+static void apply_need(struct cpu_data *f)
+{
+ if (eval_need(f))
+ wake_up_hotplug_thread(f);
+}
+
+static int core_ctl_set_busy(unsigned int cpu, unsigned int busy)
+{
+ struct cpu_data *c = &per_cpu(cpu_state, cpu);
+ struct cpu_data *f;
+ unsigned int old_is_busy = c->is_busy;
+
+ if (!c->inited)
+ return 0;
+ f = &per_cpu(cpu_state, c->first_cpu);
+
+ update_running_avg(false);
+ if (c->busy == busy && !f->nrrun_changed)
+ return 0;
+ c->busy = busy;
+ f->nrrun_changed = false;
+
+ apply_need(f);
+ trace_core_ctl_set_busy(cpu, busy, old_is_busy, c->is_busy);
+ return 0;
+}
+
+/* ========================= core count enforcement ==================== */
+
+/*
+ * If current thread is hotplug thread, don't attempt to wake up
+ * itself or other hotplug threads because it will deadlock. Instead,
+ * schedule a timer to fire in next timer tick and wake up the thread.
+ */
+static void wake_up_hotplug_thread(struct cpu_data *state)
+{
+ unsigned long flags;
+ int cpu;
+ struct cpu_data *pcpu;
+ bool no_wakeup = false;
+
+ if (unlikely(state->disabled))
+ return;
+
+ for_each_possible_cpu(cpu) {
+ pcpu = &per_cpu(cpu_state, cpu);
+ if (cpu != pcpu->first_cpu)
+ continue;
+ if (pcpu->hotplug_thread == current) {
+ no_wakeup = true;
+ break;
+ }
+ }
+
+ spin_lock_irqsave(&state->pending_lock, flags);
+ state->pending = true;
+ spin_unlock_irqrestore(&state->pending_lock, flags);
+
+ if (no_wakeup) {
+ spin_lock_irqsave(&state_lock, flags);
+ mod_timer(&state->timer, jiffies);
+ spin_unlock_irqrestore(&state_lock, flags);
+ } else {
+ wake_up_process(state->hotplug_thread);
+ }
+}
+
+static void core_ctl_timer_func(unsigned long cpu)
+{
+ struct cpu_data *state = &per_cpu(cpu_state, cpu);
+ unsigned long flags;
+
+ if (eval_need(state) && !state->disabled) {
+ spin_lock_irqsave(&state->pending_lock, flags);
+ state->pending = true;
+ spin_unlock_irqrestore(&state->pending_lock, flags);
+ wake_up_process(state->hotplug_thread);
+ }
+
+}
+
+static int core_ctl_online_core(unsigned int cpu)
+{
+ int ret;
+ struct device *dev;
+
+ lock_device_hotplug();
+ dev = get_cpu_device(cpu);
+ if (!dev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__, cpu);
+ ret = -ENODEV;
+ } else {
+ ret = device_online(dev);
+ }
+ unlock_device_hotplug();
+ return ret;
+}
+
+static int core_ctl_offline_core(unsigned int cpu)
+{
+ int ret;
+ struct device *dev;
+
+ lock_device_hotplug();
+ dev = get_cpu_device(cpu);
+ if (!dev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__, cpu);
+ ret = -ENODEV;
+ } else {
+ ret = device_offline(dev);
+ }
+ unlock_device_hotplug();
+ return ret;
+}
+
+static void update_lru(struct cpu_data *f)
+{
+ struct cpu_data *c, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pending_lru_lock, flags);
+ spin_lock(&state_lock);
+
+ list_for_each_entry_safe(c, tmp, &f->pending_lru, pending_sib) {
+ list_del_init(&c->pending_sib);
+ list_del(&c->sib);
+ list_add_tail(&c->sib, &f->lru);
+ }
+
+ spin_unlock(&state_lock);
+ spin_unlock_irqrestore(&pending_lru_lock, flags);
+}
+
+static void __ref do_hotplug(struct cpu_data *f)
+{
+ unsigned int need;
+ struct cpu_data *c, *tmp;
+
+ need = apply_limits(f, f->need_cpus);
+ pr_debug("Trying to adjust group %u to %u\n", f->first_cpu, need);
+
+ mutex_lock(&lru_lock);
+ if (f->online_cpus > need) {
+ list_for_each_entry_safe(c, tmp, &f->lru, sib) {
+ if (!c->online)
+ continue;
+
+ if (f->online_cpus == need)
+ break;
+
+ /* Don't offline busy CPUs. */
+ if (c->is_busy)
+ continue;
+
+ pr_debug("Trying to Offline CPU%u\n", c->cpu);
+ if (core_ctl_offline_core(c->cpu))
+ pr_debug("Unable to Offline CPU%u\n", c->cpu);
+ }
+
+ /*
+ * If the number of online CPUs is within the limits, then
+ * don't force any busy CPUs offline.
+ */
+ if (f->online_cpus <= f->max_cpus)
+ goto done;
+
+ list_for_each_entry_safe(c, tmp, &f->lru, sib) {
+ if (!c->online)
+ continue;
+
+ if (f->online_cpus <= f->max_cpus)
+ break;
+
+ pr_debug("Trying to Offline CPU%u\n", c->cpu);
+ if (core_ctl_offline_core(c->cpu))
+ pr_debug("Unable to Offline CPU%u\n", c->cpu);
+ }
+ } else if (f->online_cpus < need) {
+ list_for_each_entry_safe(c, tmp, &f->lru, sib) {
+ if (c->online || c->rejected || c->not_preferred)
+ continue;
+ if (f->online_cpus == need)
+ break;
+
+ pr_debug("Trying to Online CPU%u\n", c->cpu);
+ if (core_ctl_online_core(c->cpu))
+ pr_debug("Unable to Online CPU%u\n", c->cpu);
+ }
+
+ if (f->online_cpus == need)
+ goto done;
+
+
+ list_for_each_entry_safe(c, tmp, &f->lru, sib) {
+ if (c->online || c->rejected || !c->not_preferred)
+ continue;
+ if (f->online_cpus == need)
+ break;
+
+ pr_debug("Trying to Online CPU%u\n", c->cpu);
+ if (core_ctl_online_core(c->cpu))
+ pr_debug("Unable to Online CPU%u\n", c->cpu);
+ }
+ }
+done:
+ mutex_unlock(&lru_lock);
+ update_lru(f);
+}
+
+static int __ref try_hotplug(void *data)
+{
+ struct cpu_data *f = data;
+ unsigned long flags;
+
+ while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_lock_irqsave(&f->pending_lock, flags);
+ if (!f->pending) {
+ spin_unlock_irqrestore(&f->pending_lock, flags);
+ schedule();
+ if (kthread_should_stop())
+ break;
+ spin_lock_irqsave(&f->pending_lock, flags);
+ }
+ set_current_state(TASK_RUNNING);
+ f->pending = false;
+ spin_unlock_irqrestore(&f->pending_lock, flags);
+
+ do_hotplug(f);
+ }
+
+ return 0;
+}
+
+static void add_to_pending_lru(struct cpu_data *state)
+{
+ unsigned long flags;
+ struct cpu_data *f = &per_cpu(cpu_state, state->first_cpu);
+
+ spin_lock_irqsave(&pending_lru_lock, flags);
+
+ if (!list_empty(&state->pending_sib))
+ list_del(&state->pending_sib);
+ list_add_tail(&state->pending_sib, &f->pending_lru);
+
+ spin_unlock_irqrestore(&pending_lru_lock, flags);
+}
+
+static int __ref cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ uint32_t cpu = (uintptr_t)hcpu;
+ struct cpu_data *state = &per_cpu(cpu_state, cpu);
+ struct cpu_data *f;
+ int ret = NOTIFY_OK;
+ unsigned long flags;
+
+ /* Don't affect suspend resume */
+ if (action & CPU_TASKS_FROZEN)
+ return NOTIFY_OK;
+
+ if (unlikely(!state->inited))
+ return NOTIFY_OK;
+
+ f = &per_cpu(cpu_state, state->first_cpu);
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+
+ /* If online state of CPU somehow got out of sync, fix it. */
+ if (state->online) {
+ f->online_cpus--;
+ state->online = false;
+ pr_warn("CPU%d offline when state is online\n", cpu);
+ }
+
+ if (state->rejected) {
+ state->rejected = false;
+ f->avail_cpus++;
+ }
+
+ /*
+ * If a CPU is in the process of coming up, mark it as online
+ * so that there's no race with hotplug thread bringing up more
+ * CPUs than necessary.
+ */
+ if (!f->disabled &&
+ apply_limits(f, f->need_cpus) <= f->online_cpus) {
+ pr_debug("Prevent CPU%d onlining\n", cpu);
+ ret = NOTIFY_BAD;
+ } else {
+ state->online = true;
+ f->online_cpus++;
+ }
+ break;
+
+ case CPU_ONLINE:
+ /*
+ * Moving to the end of the list should only happen in
+ * CPU_ONLINE and not on CPU_UP_PREPARE to prevent an
+ * infinite list traversal when thermal (or other entities)
+ * reject trying to online CPUs.
+ */
+ ret = mutex_trylock(&lru_lock);
+ if (ret) {
+ spin_lock_irqsave(&state_lock, flags);
+ list_del(&state->sib);
+ list_add_tail(&state->sib, &f->lru);
+ spin_unlock_irqrestore(&state_lock, flags);
+ mutex_unlock(&lru_lock);
+ } else {
+ /*
+ * lru_lock is held by our hotplug thread to
+ * prevent concurrent access of lru list. The updates
+ * are maintained in pending_lru list and lru is
+ * updated at the end of do_hotplug().
+ */
+ add_to_pending_lru(state);
+ }
+ break;
+
+ case CPU_DEAD:
+ /* Move a CPU to the end of the LRU when it goes offline. */
+ ret = mutex_trylock(&lru_lock);
+ if (ret) {
+ spin_lock_irqsave(&state_lock, flags);
+ list_del(&state->sib);
+ list_add_tail(&state->sib, &f->lru);
+ spin_unlock_irqrestore(&state_lock, flags);
+ mutex_unlock(&lru_lock);
+ } else {
+ add_to_pending_lru(state);
+ }
+ /* Fall through */
+
+ case CPU_UP_CANCELED:
+
+ /* If online state of CPU somehow got out of sync, fix it. */
+ if (!state->online) {
+ f->online_cpus++;
+ pr_warn("CPU%d online when state is offline\n", cpu);
+ }
+
+ if (!state->rejected && action == CPU_UP_CANCELED) {
+ state->rejected = true;
+ f->avail_cpus--;
+ }
+
+ state->online = false;
+ state->busy = 0;
+ f->online_cpus--;
+ break;
+ }
+
+ if (f->online_cpus < apply_limits(f, f->need_cpus)
+ && f->online_cpus < f->avail_cpus
+ && action == CPU_DEAD)
+ wake_up_hotplug_thread(f);
+
+ return ret;
+}
+
+static struct notifier_block __refdata cpu_notifier = {
+ .notifier_call = cpu_callback,
+};
+
+/* ============================ init code ============================== */
+
+static int group_init(struct cpumask *mask)
+{
+ struct device *dev;
+ unsigned int first_cpu = cpumask_first(mask);
+ struct cpu_data *f = &per_cpu(cpu_state, first_cpu);
+ struct cpu_data *state;
+ unsigned int cpu;
+ struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
+
+ if (likely(f->inited))
+ return 0;
+
+ dev = get_cpu_device(first_cpu);
+ if (!dev)
+ return -ENODEV;
+
+ pr_info("Creating CPU group %d\n", first_cpu);
+
+ f->num_cpus = cpumask_weight(mask);
+ if (f->num_cpus > MAX_CPUS_PER_GROUP) {
+ pr_err("HW configuration not supported\n");
+ return -EINVAL;
+ }
+ f->min_cpus = 1;
+ f->max_cpus = f->num_cpus;
+ f->need_cpus = f->num_cpus;
+ f->avail_cpus = f->num_cpus;
+ f->offline_delay_ms = 100;
+ f->task_thres = UINT_MAX;
+ f->nrrun = f->num_cpus;
+ INIT_LIST_HEAD(&f->lru);
+ INIT_LIST_HEAD(&f->pending_lru);
+ init_timer(&f->timer);
+ spin_lock_init(&f->pending_lock);
+ f->timer.function = core_ctl_timer_func;
+ f->timer.data = first_cpu;
+
+ for_each_cpu(cpu, mask) {
+ pr_info("Init CPU%u state\n", cpu);
+
+ state = &per_cpu(cpu_state, cpu);
+ state->cpu = cpu;
+ state->first_cpu = first_cpu;
+
+ if (cpu_online(cpu)) {
+ f->online_cpus++;
+ state->online = true;
+ }
+
+ list_add_tail(&state->sib, &f->lru);
+ INIT_LIST_HEAD(&state->pending_sib);
+ }
+
+ f->hotplug_thread = kthread_run(try_hotplug, (void *) f,
+ "core_ctl/%d", first_cpu);
+ if (IS_ERR(f->hotplug_thread))
+ return PTR_ERR(f->hotplug_thread);
+ sched_setscheduler_nocheck(f->hotplug_thread, SCHED_FIFO, &param);
+
+ for_each_cpu(cpu, mask) {
+ state = &per_cpu(cpu_state, cpu);
+ state->inited = true;
+ }
+
+ kobject_init(&f->kobj, &ktype_core_ctl);
+ return kobject_add(&f->kobj, &dev->kobj, "core_ctl");
+}
+
+static int cpufreq_policy_cb(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct cpufreq_policy *policy = data;
+
+ switch (val) {
+ case CPUFREQ_CREATE_POLICY:
+ group_init(policy->related_cpus);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block cpufreq_pol_nb = {
+ .notifier_call = cpufreq_policy_cb,
+};
+
+static int cpufreq_gov_cb(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct cpufreq_govinfo *info = data;
+
+ switch (val) {
+ case CPUFREQ_LOAD_CHANGE:
+ core_ctl_set_busy(info->cpu, info->load);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block cpufreq_gov_nb = {
+ .notifier_call = cpufreq_gov_cb,
+};
+
+static int __init core_ctl_init(void)
+{
+ struct cpufreq_policy *policy;
+ unsigned int cpu;
+
+ register_cpu_notifier(&cpu_notifier);
+ cpufreq_register_notifier(&cpufreq_pol_nb, CPUFREQ_POLICY_NOTIFIER);
+ cpufreq_register_notifier(&cpufreq_gov_nb, CPUFREQ_GOVINFO_NOTIFIER);
+ init_timer_deferrable(&rq_avg_timer);
+ rq_avg_timer.function = rq_avg_timer_func;
+
+ get_online_cpus();
+ for_each_online_cpu(cpu) {
+ policy = cpufreq_cpu_get(cpu);
+ if (policy) {
+ group_init(policy->related_cpus);
+ cpufreq_cpu_put(policy);
+ }
+ }
+ put_online_cpus();
+ mod_timer(&rq_avg_timer, round_to_nw_start());
+ return 0;
+}
+
+late_initcall(core_ctl_init);
diff --git a/kernel/sched/qhmp_core.c b/kernel/sched/qhmp_core.c
index 9925058a..fe99dcdd 100644
--- a/kernel/sched/qhmp_core.c
+++ b/kernel/sched/qhmp_core.c
@@ -4526,7 +4526,8 @@ static noinline void __schedule_bug(struct task_struct *prev)
static inline void schedule_debug(struct task_struct *prev)
{
#ifdef CONFIG_SCHED_STACK_END_CHECK
- BUG_ON(unlikely(task_stack_end_corrupted(prev)));
+ if (unlikely(task_stack_end_corrupted(prev)))
+ panic("corrupted stack end detected inside scheduler\n");
#endif
/*
* Test if we are atomic. Since do_exit() needs to call into
@@ -6595,7 +6596,8 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
if (cpumask_equal(&p->cpus_allowed, new_mask))
goto out;
- if (!cpumask_intersects(new_mask, cpu_active_mask)) {
+ dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
+ if (dest_cpu >= nr_cpu_ids) {
ret = -EINVAL;
goto out;
}
@@ -6606,7 +6608,6 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
if (cpumask_test_cpu(task_cpu(p), new_mask))
goto out;
- dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
if (task_running(rq, p) || p->state == TASK_WAKING) {
struct migration_arg arg = { p, dest_cpu };
/* Need help from migration thread: drop lock and wait. */
@@ -7155,6 +7156,14 @@ static int sched_cpu_active(struct notifier_block *nfb,
case CPU_STARTING:
set_cpu_rq_start_time();
return NOTIFY_OK;
+ case CPU_ONLINE:
+ /*
+ * At this point a starting CPU has marked itself as online via
+ * set_cpu_online(). But it might not yet have marked itself
+ * as active, which is essential from here on.
+ *
+ * Thus, fall-through and help the starting CPU along.
+ */
case CPU_DOWN_FAILED:
set_cpu_active((long)hcpu, true);
return NOTIFY_OK;
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 47d256ff..326bbb8f 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -153,6 +153,10 @@ void set_power_on_alarm(void)
if (rc)
goto disable_alarm;
+//lenovo sw yexh1, add for rtc setting log
+ pr_info("%s: %ld\n", __func__, alarm_secs);
+//lenovo sw yexh1, add for rtc setting log
+
mutex_unlock(&power_on_alarm_lock);
return;
@@ -322,6 +326,10 @@ static enum hrtimer_restart alarmtimer_fired(struct hrtimer *timer)
int ret = HRTIMER_NORESTART;
int restart = ALARMTIMER_NORESTART;
+//lenovo sw, yexh1, add log for showing alarm wakeup
+ printk("%s\n", __func__);
+//lenovo sw, yexh1, end
+
spin_lock_irqsave(&base->lock, flags);
alarmtimer_dequeue(base, alarm);
spin_unlock_irqrestore(&base->lock, flags);
diff --git a/kernel/trace/power-traces.c b/kernel/trace/power-traces.c
index 74760614..d89a8e98 100644
--- a/kernel/trace/power-traces.c
+++ b/kernel/trace/power-traces.c
@@ -14,5 +14,3 @@
#include <trace/events/power.h>
EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_idle);
-EXPORT_TRACEPOINT_SYMBOL(core_ctl_set_busy);
-EXPORT_TRACEPOINT_SYMBOL(core_ctl_eval_need);