aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile3
-rw-r--r--arch/arm/kernel/le_rkm.c305
-rw-r--r--arch/arm/kernel/perf_event.c21
-rw-r--r--arch/arm/kernel/perf_event_cpu.c1
-rw-r--r--arch/arm/kernel/perf_event_v7.c10
5 files changed, 334 insertions, 6 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index e0371466..3f8d701f 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -72,6 +72,9 @@ obj-$(CONFIG_ARM_UNWIND) += unwind.o
obj-$(CONFIG_HAVE_TCM) += tcm.o
obj-$(CONFIG_OF) += devtree.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
+#lenovo sw, yexh1, add lastkmsg feature
+obj-$(CONFIG_LENOVO_DEBUG_RKM) += le_rkm.o
+#lenovo sw, yexh1, end
obj-$(CONFIG_SWP_EMULATE) += swp_emulate.o
CFLAGS_swp_emulate.o := -Wa,-march=armv7-a
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
diff --git a/arch/arm/kernel/le_rkm.c b/arch/arm/kernel/le_rkm.c
new file mode 100644
index 00000000..82bd32d3
--- /dev/null
+++ b/arch/arm/kernel/le_rkm.c
@@ -0,0 +1,305 @@
+/*
+filename: le_rkm.c
+
+this is the lenovo replay kernel message implement.
+
+By RKM,the offline log in andorid can backup previous system logs,
+such as kernel dmesg log, tz logs; it also can access current
+system lifetime's boot logs,such as sbl logs, lk logs, even tz logs.
+
+Author:KerryXi
+Date: Apr, 2014
+
+Copyright Lenovo 2014
+*/
+
+//#define DEBUG 1
+#include <asm/le_rkm.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/memblock.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+
+static rkm_log_buf_header_table_t __log_buf_header_table __page_aligned_bss;
+
+static char rkm_lk_buf[MAX_RKM_LK_BUF_LEN*PAGE_SIZE];
+static unsigned long rkm_lk_buf_va; //va addr
+static unsigned long phys_lk_log_start __initdata = 0;
+static unsigned long phys_lk_log_size = 0;
+
+#define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
+static char rkm_kernel_buf[__LOG_BUF_LEN];
+//static char rkm_kernel_buf[MAX_RKM_LOG_BUF_LEN*PAGE_SIZE];
+static unsigned long rkm_kernel_buf_va; //va addr
+static unsigned long phys_kernel_log_start __initdata = 0;
+static unsigned long phys_kernel_log_size = 0;
+
+extern char __bss_start[], __bss_stop[];
+
+void rkm_init_log_buf_header(char *addr,int len)
+{
+ __log_buf_header_table.magic1 = RKM_LOG_BUF_HEADER_TABLE_MAGIC1;
+ __log_buf_header_table.magic2 = RKM_LOG_BUF_HEADER_TABLE_MAGIC2;
+ __log_buf_header_table.version = RKM_LOG_BUF_HEADER_TABLE_VERSION;
+ __log_buf_header_table.bss_start = (unsigned int)__pa(__bss_start);
+ __log_buf_header_table.bss_stop = (unsigned int)__pa(__bss_stop);
+ __log_buf_header_table.log_buf_pa = (unsigned int)__pa(addr);
+ __log_buf_header_table.log_buf_len = len;
+
+ pr_debug("rkm init log header:bss_start=0x%08x,bss_stop=0x%08x,log_pa=0x%08x,len=%d\n",
+ __log_buf_header_table.bss_start,
+ __log_buf_header_table.bss_stop,
+ __log_buf_header_table.log_buf_pa,
+ __log_buf_header_table.log_buf_len);
+ return;
+}
+
+void __init arm64_rkm_log_backup(void)
+{
+ unsigned long phys_log_end;
+
+ if (phys_lk_log_size)
+ {
+ phys_log_end = phys_lk_log_start+phys_lk_log_size;
+
+ if ((phys_log_end < (PHYSIC_MEM_BASE_ADDR + 0x1e00000)) || (phys_log_end > (PHYSIC_MEM_BASE_ADDR + 0x2000000))) {
+ pr_err("rkm:unvalid lk log pa,end phys: 0x%lx\n",phys_log_end);
+ phys_lk_log_start=phys_lk_log_size=0;
+ }
+
+ if (phys_lk_log_size &&
+ !memblock_is_region_memory(phys_lk_log_start, phys_lk_log_size )) {
+ pr_err("rkm: 0x%08lx+0x%08lx is not a memory region - disabling lk log\n",
+ phys_lk_log_start, phys_lk_log_size);
+ phys_lk_log_start=phys_lk_log_size=0;
+ }
+ if (phys_lk_log_size &&
+ memblock_is_region_reserved(phys_lk_log_start, phys_lk_log_size)) {
+ pr_err("rkm: 0x%08lx+0x%08lx overlaps in-use memory region - disabling lk log\n",
+ phys_lk_log_start, phys_lk_log_size);
+ phys_lk_log_start=phys_lk_log_size=0;
+ }
+ if (phys_lk_log_size) {
+ rkm_lk_buf_va = ( unsigned long)__va(phys_lk_log_start);
+ pr_debug("rkm:reserve lk log ok,va=0x%08lx\n",rkm_lk_buf_va);
+ }
+ }
+
+ if (phys_kernel_log_size)
+ {
+ phys_log_end = phys_kernel_log_start+phys_kernel_log_size;
+
+ if ((phys_log_end < (PHYSIC_MEM_BASE_ADDR + 0x1e00000)) || (phys_log_end > (PHYSIC_MEM_BASE_ADDR + 0x4000000))) {
+ pr_err("rkm:unvalid kernel log pa,end phys: 0x%lx\n",phys_log_end);
+ phys_kernel_log_start=phys_kernel_log_size=0;
+ }
+
+ if (phys_kernel_log_size &&
+ !memblock_is_region_memory(phys_kernel_log_start, phys_kernel_log_size )) {
+ pr_err("rkm: 0x%08lx+0x%08lx is not a memory region - disabling kernel log\n",
+ phys_kernel_log_start, phys_kernel_log_size);
+ phys_kernel_log_start=phys_kernel_log_size=0;
+ }
+ if (phys_kernel_log_size &&
+ memblock_is_region_reserved(phys_kernel_log_start, phys_kernel_log_size)) {
+ pr_err("rkm: 0x%08lx+0x%08lx overlaps in-use memory region - disabling kernel log\n",
+ phys_kernel_log_start, phys_kernel_log_size);
+ phys_kernel_log_start=phys_kernel_log_size=0;
+ }
+ if (phys_kernel_log_size) {
+ rkm_kernel_buf_va = ( unsigned long)__va(phys_kernel_log_start);
+ pr_debug("rkm:reserve kernel log ok,va=0x%08lx\n",rkm_kernel_buf_va);
+ }
+ }
+
+ //bakcup rkm message log
+ if ((phys_lk_log_size>0) && (phys_lk_log_size <= sizeof(rkm_lk_buf))) {
+ pr_info("rkm:backup lk log message: va=%p len=%ld\n",(void*)rkm_lk_buf_va,phys_lk_log_size);
+ memcpy(rkm_lk_buf,(void*)rkm_lk_buf_va,phys_lk_log_size);
+ }
+
+
+ if ((phys_kernel_log_size>0) && (phys_kernel_log_size <= sizeof(rkm_kernel_buf))) {
+ pr_info("rkm:backup kernel log message: va=%p len=%ld\n",(void*)rkm_kernel_buf_va,phys_kernel_log_size);
+ memcpy(rkm_kernel_buf,(void*)rkm_kernel_buf_va,phys_kernel_log_size);
+ }
+ else
+ pr_info("rkm:fail backup kernel log message: len=%ld\n",phys_kernel_log_size);
+ return;
+}
+
+static void __init early_init_dt_check_for_lk_log(unsigned long node)
+{
+ unsigned long start, size;
+ unsigned int len;
+ const __be32 *prop;
+
+ pr_debug("rkm:Looking for lk log properties... ");
+
+ prop = of_get_flat_dt_prop(node, "lk,lk_log_start", &len);
+ if (!prop)
+ return;
+ start = of_read_ulong(prop, len/4);
+
+ prop = of_get_flat_dt_prop(node, "lk,lk_log_size", &len);
+ if (!prop)
+ return;
+ size = of_read_ulong(prop, len/4);
+
+ pr_debug("rkm:lk_log_start=0x%lx lk_log_size=0x%lx\n", start, size);
+
+ if ((int)size <=4) {
+ pr_err("uncorrect lk log size %d\n",(int)size);
+ return;
+ }
+ phys_lk_log_start = start;
+ phys_lk_log_size = size;
+}
+
+static void __init early_init_dt_check_for_kernel_log(unsigned long node)
+{
+ unsigned long start, size;
+ unsigned int len;
+ const __be32 *prop;
+
+ pr_debug("rkm:Looking for kernel log properties... ");
+
+ prop = of_get_flat_dt_prop(node, "kernel,log_buf_start", &len);
+ if (!prop)
+ return;
+ start = of_read_ulong(prop, len/4);
+
+ prop = of_get_flat_dt_prop(node, "kernel,log_buf_size", &len);
+ if (!prop)
+ return;
+ size = of_read_ulong(prop, len/4);
+
+ pr_debug("rkm:log_buf_start=0x%lx log_buf_size=0x%lx\n", start, size);
+
+ if ((int)size <=4) {
+ pr_err("uncorrect kernel log size %d\n",(int)size);
+ return;
+ }
+ phys_kernel_log_start = start;
+ phys_kernel_log_size = size;
+}
+
+int __init early_init_dt_scan_boot_log(unsigned long node, const char *uname,
+ int depth, void *data)
+{
+ pr_debug("search \"rkm_log\", depth: %d, uname: %s\n", depth, uname);
+
+ if (depth != 1 ||
+ (strcmp(uname, "rkm_log") != 0 && strcmp(uname, "rkm_log@0") != 0))
+ return 0;
+
+ early_init_dt_check_for_lk_log(node);
+ early_init_dt_check_for_kernel_log(node);
+
+ return 1;
+}
+
+/* Define lk_log to add a sysfs interface to read/write */
+static ssize_t rkm_debug_lk_log_read(struct file *file,char __user *buf, size_t n, loff_t *ppos)
+{
+ if (*ppos == 0)
+ pr_debug("rkm:---lk_log read/wrte testing--- pos=%d\n",(int)*ppos);
+ if ((*ppos == 0) || (*ppos == 4096)) {
+ if (!copy_to_user(buf,rkm_lk_buf+*ppos,PAGE_SIZE)) {
+ if (*ppos == 0)
+ pr_debug("%s:check copy_to_user ok\n",__func__);
+ *ppos += PAGE_SIZE;
+ return PAGE_SIZE;
+ }
+ }
+ return 0;
+}
+
+static const struct file_operations rkm_debug_lk_log_fops = {
+ .owner = THIS_MODULE,
+ .read = rkm_debug_lk_log_read,
+};
+
+/* Define kernel_log to add a sysfs interface to read/write */
+static ssize_t rkm_debug_kernel_log_read(struct file *file,char __user *buf, size_t n, loff_t *ppos)
+{
+ static int text_parsered = 0;
+ if(text_parsered == 0)
+ {
+ char *rkm_kernel_text_buf = NULL;
+ int ret = 0;
+
+ text_parsered = 1;
+
+ rkm_kernel_text_buf = kmalloc(phys_kernel_log_size, GFP_KERNEL);
+ if(rkm_kernel_text_buf == NULL)
+ {
+ printk(KERN_ERR "rkm: %s: can't alloc rkm_kernel_text_buf ! \n", __func__);
+ }
+ else
+ {
+ ret = kernel_log_buf_text_parser(rkm_kernel_buf, rkm_kernel_text_buf, phys_kernel_log_size);
+ if(ret < 0)
+ {
+ printk(KERN_ERR "rkm: kernel_log_buf_text_parser FAILED! \n");
+ }
+ else
+ {
+ printk(KERN_ERR "rkm: kernel_log_buf_text_parser Suceess! \n");
+ memcpy(rkm_kernel_buf, rkm_kernel_text_buf, phys_kernel_log_size);
+ }
+ kfree(rkm_kernel_text_buf);
+ }
+ }
+
+ if (*ppos == 0)
+ pr_debug("rkm:---kernel_log read/wrte testing--- pos=%d\n",(int)*ppos);
+
+ if ( (*ppos < phys_kernel_log_size)
+ && ( (*ppos & (PAGE_SIZE-1)) == 0) ) {
+ if (!copy_to_user(buf,rkm_kernel_buf+*ppos,PAGE_SIZE)) {
+ if (*ppos == 0)
+ pr_debug("%s:check copy_to_user ok\n",__func__);
+ *ppos += PAGE_SIZE;
+ return PAGE_SIZE;
+ }
+ }
+ return 0;
+}
+
+static const struct file_operations rkm_debug_kernel_log_fops = {
+ .owner = THIS_MODULE,
+ .read = rkm_debug_kernel_log_read,
+};
+
+static int __init le_rkm_init_debugfs(void)
+{
+ struct dentry *dir, *file;
+
+ dir = debugfs_create_dir("le_rkm", NULL);
+ if (!dir)
+ return -ENOMEM;
+
+ file = debugfs_create_file("lk_mesg", 0400, dir, NULL,
+ &rkm_debug_lk_log_fops);
+ if (!file) {
+ pr_err("%s:init lk_mesg node fail\n",__func__);
+ debugfs_remove(dir);
+ return -ENOMEM;
+ }
+ file = debugfs_create_file("last_kmsg", 0400, dir, NULL,
+ &rkm_debug_kernel_log_fops);
+ if (!file) {
+ pr_err("%s:init last_kmsg node fail\n",__func__);
+ debugfs_remove(dir);
+ return -ENOMEM;
+ }
+ return 0;
+}
+/* Debugfs setup must be done later */
+module_init(le_rkm_init_debugfs);
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 266cba46..b72a5f19 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -252,20 +252,29 @@ out:
}
static int
-validate_event(struct pmu_hw_events *hw_events,
- struct perf_event *event)
+validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
+ struct perf_event *event)
{
- struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+ struct arm_pmu *armpmu;
if (is_software_event(event))
return 1;
+ /*
+ * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
+ * core perf code won't check that the pmu->ctx == leader->ctx
+ * until after pmu->event_init(event).
+ */
+ if (event->pmu != pmu)
+ return 0;
+
if (event->state < PERF_EVENT_STATE_OFF)
return 1;
if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
return 1;
+ armpmu = to_arm_pmu(event->pmu);
return armpmu->get_event_idx(hw_events, event) >= 0;
}
@@ -283,15 +292,15 @@ validate_group(struct perf_event *event)
memset(fake_used_mask, 0, sizeof(fake_used_mask));
fake_pmu.used_mask = fake_used_mask;
- if (!validate_event(&fake_pmu, leader))
+ if (!validate_event(event->pmu, &fake_pmu, leader))
return -EINVAL;
list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
- if (!validate_event(&fake_pmu, sibling))
+ if (!validate_event(event->pmu, &fake_pmu, sibling))
return -EINVAL;
}
- if (!validate_event(&fake_pmu, event))
+ if (!validate_event(event->pmu, &fake_pmu, event))
return -EINVAL;
return 0;
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index eb2c4d55..e7418fe4 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -230,6 +230,7 @@ static struct of_device_id cpu_pmu_of_device_ids[] = {
{.compatible = "arm,arm1176-pmu", .data = armv6_1176_pmu_init},
{.compatible = "arm,arm1136-pmu", .data = armv6_1136_pmu_init},
{.compatible = "qcom,krait-pmu", .data = krait_pmu_init},
+ {.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_pmu_init},
{},
};
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 65e626da..e505f1eb 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -1042,6 +1042,16 @@ static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
return 0;
}
+static int armv8_pmuv3_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ armv7pmu_init(cpu_pmu);
+ cpu_pmu->name = "ARMv8 Cortex-A53";
+ cpu_pmu->map_event = armv7_a7_map_event;
+ cpu_pmu->num_events = armv7_read_num_pmnc_events();
+ cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
+ return 0;
+}
+
static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
{
armv7pmu_init(cpu_pmu);