diff options
| author | doc HD <doc.divxm@gmail.com> | 2018-03-07 10:05:53 +0300 |
|---|---|---|
| committer | doc HD <doc.divxm@gmail.com> | 2018-03-07 10:05:53 +0300 |
| commit | 812962bfbf833dc214abda0dea8339d02bff3cd1 (patch) | |
| tree | 7384e312916e52a56610d321022d534251c0b0d4 | |
| parent | 9faca91a45aca9b575f4bbee4ce2ce1681f4ce8f (diff) | |
| parent | 0de87126892e03146cf68b204056c48c45aec7a6 (diff) | |
Merge branch 'android-msm-marlin-3.18-oreo-mr1' of https://android.googlesource.com/kernel/msm into o8.1o8.1
Change-Id: I91a8b8fc5803aacafb3eb5e6cf35cfa1213ed3e4
68 files changed, 1088 insertions, 383 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 08dc963bf89..bffbbdd26a3 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -783,6 +783,18 @@ config FORCE_MAX_ZONEORDER default "14" if (ARM64_64K_PAGES && TRANSPARENT_HUGEPAGE) default "11" +config UNMAP_KERNEL_AT_EL0 + bool "Unmap kernel when running in userspace (aka \"KAISER\")" if EXPERT + default y + help + Speculation attacks against some high-performance processors can + be used to bypass MMU permission checks and leak kernel data to + userspace. This can be defended against by unmapping the kernel + when running in userspace, mapping it back in on exception entry + via a trampoline page in the vector table. + + If unsure, say Y. + menuconfig ARMV8_DEPRECATED bool "Emulate deprecated/obsolete ARMv8 instructions" depends on COMPAT @@ -852,7 +864,7 @@ config SETEND_EMULATION endif config ARM64_SW_TTBR0_PAN - bool "Emulate Priviledged Access Never using TTBR0_EL1 switching" + bool "Emulate Privileged Access Never using TTBR0_EL1 switching" help Enabling this option prevents the kernel from accessing user-space memory directly by pointing TTBR0_EL1 to a reserved diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index fa16d56c184..fe34ae0bed9 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -2,6 +2,7 @@ #define __ASM_ALTERNATIVE_H #include <asm/cpufeature.h> +#include <asm/insn.h> #ifndef __ASSEMBLY__ @@ -91,34 +92,55 @@ void free_alternatives_memory(void); .endm /* - * Begin an alternative code sequence. + * Alternative sequences + * + * The code for the case where the capability is not present will be + * assembled and linked as normal. There are no restrictions on this + * code. + * + * The code for the case where the capability is present will be + * assembled into a special section to be used for dynamic patching. + * Code for that case must: + * + * 1. Be exactly the same length (in bytes) as the default code + * sequence. * - * The code that follows this macro will be assembled and linked as - * normal. There are no restrictions on this code. + * 2. Not contain a branch target that is used outside of the + * alternative sequence it is defined in (branches into an + * alternative sequence are not fixed up). + */ + +/* + * Begin an alternative code sequence. */ .macro alternative_if_not cap + .set .Lasm_alt_mode, 0 .pushsection .altinstructions, "a" altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f .popsection 661: .endm +.macro alternative_if cap + .set .Lasm_alt_mode, 1 + .pushsection .altinstructions, "a" + altinstruction_entry 663f, 661f, \cap, 664f-663f, 662f-661f + .popsection + .pushsection .altinstr_replacement, "ax" + .align 2 /* So GAS knows label 661 is suitably aligned */ +661: +.endm + /* - * Provide the alternative code sequence. - * - * The code that follows this macro is assembled into a special - * section to be used for dynamic patching. Code that follows this - * macro must: - * - * 1. Be exactly the same length (in bytes) as the default code - * sequence. - * - * 2. Not contain a branch target that is used outside of the - * alternative sequence it is defined in (branches into an - * alternative sequence are not fixed up). + * Provide the other half of the alternative code sequence. */ .macro alternative_else -662: .pushsection .altinstr_replacement, "ax" +662: + .if .Lasm_alt_mode==0 + .pushsection .altinstr_replacement, "ax" + .else + .popsection + .endif 663: .endm @@ -126,11 +148,25 @@ void free_alternatives_memory(void); * Complete an alternative code sequence. */ .macro alternative_endif -664: .popsection +664: + .if .Lasm_alt_mode==0 + .popsection + .endif .org . - (664b-663b) + (662b-661b) .org . - (662b-661b) + (664b-663b) .endm +/* + * Provides a trivial alternative or default sequence consisting solely + * of NOPs. The number of NOPs is chosen automatically to match the + * previous case. + */ +.macro alternative_else_nop_endif +alternative_else + nops (662b-661b) / AARCH64_INSN_SIZE +alternative_endif +.endm + #define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...) \ alternative_insn insn1, insn2, cap, IS_ENABLED(cfg) diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 0ea2d576904..96f2be09f61 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -117,6 +117,15 @@ dmb \opt .endm +/* + * NOP sequence + */ + .macro nops, num + .rept \num + nop + .endr + .endm + #define USER(l, x...) \ 9999: x; \ .section __ex_table,"a"; \ @@ -295,14 +304,6 @@ lr .req x30 // link register .endm /* - * Return the current thread_info. - */ - .macro get_thread_info, rd - mov \rd, sp - and \rd, \rd, #~(THREAD_SIZE - 1) // top of stack - .endm - -/* * Annotate a function as position independent, i.e., safe to be called before * the kernel virtual mapping is activated. */ @@ -313,4 +314,32 @@ lr .req x30 // link register .size __pi_##x, . - x; \ ENDPROC(x) +/* + * Return the current thread_info. + */ + .macro get_thread_info, rd + mov \rd, sp + and \rd, \rd, #~(THREAD_SIZE - 1) // top of stack + .endm + + /* + * mov_q - move an immediate constant into a 64-bit register using + * between 2 and 4 movz/movk instructions (depending on the + * magnitude and sign of the operand) + */ + .macro mov_q, reg, val + .if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff) + movz \reg, :abs_g1_s:\val + .else + .if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff) + movz \reg, :abs_g2_s:\val + .else + movz \reg, :abs_g3:\val + movk \reg, :abs_g2_nc:\val + .endif + movk \reg, :abs_g1_nc:\val + .endif + movk \reg, :abs_g0_nc:\val + .endm + #endif /* __ASM_ASSEMBLER_H */ diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index e914cf4d553..01df231d622 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -20,6 +20,9 @@ #ifndef __ASSEMBLY__ +#define __nops(n) ".rept " #n "\nnop\n.endr\n" +#define nops(n) asm volatile(__nops(n)) + #define sev() asm volatile("sev" : : : "memory") #define wfe() asm volatile("wfe" : : : "memory") #define wfi() asm volatile("wfi" : : : "memory") diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index a0789bfc4ac..ba6983e901d 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -29,8 +29,9 @@ #define ARM64_HAS_PAN 4 #define ARM64_HAS_UAO 5 #define ARM64_ALT_PAN_NOT_UAO 6 +#define ARM64_UNMAP_KERNEL_AT_EL0 23 -#define ARM64_NCAPS 7 +#define ARM64_NCAPS 24 #ifndef __ASSEMBLY__ diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index ef572206f1c..180fe12bb33 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -1,6 +1,7 @@ #ifndef _ASM_EFI_H #define _ASM_EFI_H +#include <asm/cpufeature.h> #include <asm/io.h> #include <asm/neon.h> diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h index defa0ff9825..6c5bf65c6f4 100644 --- a/arch/arm64/include/asm/fixmap.h +++ b/arch/arm64/include/asm/fixmap.h @@ -33,6 +33,11 @@ enum fixed_addresses { FIX_HOLE, FIX_EARLYCON_MEM_BASE, +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + FIX_ENTRY_TRAMP_DATA, + FIX_ENTRY_TRAMP_TEXT, +#define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT)) +#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ __end_of_permanent_fixed_addresses, /* diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h index 63996bda088..e8272c475bb 100644 --- a/arch/arm64/include/asm/futex.h +++ b/arch/arm64/include/asm/futex.h @@ -21,10 +21,7 @@ #include <linux/futex.h> #include <linux/uaccess.h> -#include <asm/alternative.h> -#include <asm/cpufeature.h> #include <asm/errno.h> -#include <asm/sysreg.h> #define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \ do { \ diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h index 591c0b0db49..86588793450 100644 --- a/arch/arm64/include/asm/kernel-pgtable.h +++ b/arch/arm64/include/asm/kernel-pgtable.h @@ -19,6 +19,7 @@ #ifndef __ASM_KERNEL_PGTABLE_H #define __ASM_KERNEL_PGTABLE_H +#include <asm/page.h> #include <asm/pgtable.h> /* @@ -60,8 +61,16 @@ /* * Initial memory map attributes. */ -#define SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) -#define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) +#define _SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) +#define _SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) + +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +#define SWAPPER_PTE_FLAGS (_SWAPPER_PTE_FLAGS | PTE_NG) +#define SWAPPER_PMD_FLAGS (_SWAPPER_PMD_FLAGS | PMD_SECT_NG) +#else +#define SWAPPER_PTE_FLAGS _SWAPPER_PTE_FLAGS +#define SWAPPER_PMD_FLAGS _SWAPPER_PMD_FLAGS +#endif #ifdef CONFIG_ARM64_64K_PAGES #define SWAPPER_MM_MMUFLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS) diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 77c3851deae..e3ffa74e555 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -16,6 +16,11 @@ #ifndef __ASM_MMU_H #define __ASM_MMU_H +#define USER_ASID_FLAG (UL(1) << 48) +#define TTBR_ASID_MASK (UL(0xffff) << 48) + +#ifndef __ASSEMBLY__ + typedef struct { atomic64_t id; void *vdso; @@ -28,6 +33,12 @@ typedef struct { */ #define ASID(mm) ((mm)->context.id.counter & 0xffff) +static inline bool arm64_kernel_unmapped_at_el0(void) +{ + return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) && + cpus_have_cap(ARM64_UNMAP_KERNEL_AT_EL0); +} + extern void paging_init(void); extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); extern void init_mem_pgprot(void); @@ -35,4 +46,5 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, unsigned long virt, phys_addr_t size, pgprot_t prot); +#endif /* !__ASSEMBLY__ */ #endif diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 7d31238e287..e55d9a88b64 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -63,6 +63,13 @@ static inline void cpu_set_reserved_ttbr0(void) : "r" (ttbr)); } +static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm) +{ + BUG_ON(pgd == swapper_pg_dir); + cpu_set_reserved_ttbr0(); + cpu_do_switch_mm(virt_to_phys(pgd),mm); +} + /* * TCR.T0SZ value to use when the ID map is active. Usually equals * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in @@ -128,9 +135,10 @@ static inline void update_saved_ttbr0(struct task_struct *tsk, struct mm_struct *mm) { if (system_uses_ttbr0_pan()) { + u64 ttbr; BUG_ON(mm->pgd == swapper_pg_dir); - task_thread_info(tsk)->ttbr0 = - virt_to_phys(mm->pgd) | ASID(mm) << 48; + ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48; + WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr); } } #else @@ -167,7 +175,8 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, * Update the saved TTBR0_EL1 of the scheduled-in task as the previous * value may have not been initialised yet (activate_mm caller) or the * ASID has changed since the last run (following the context switch - * of another thread of the same process). + * of another thread of the same process). Avoid setting the reserved + * TTBR0_EL1 to swapper_pg_dir (init_mm; e.g. via idle_task_exit). */ if (next != &init_mm) update_saved_ttbr0(tsk, next); @@ -176,4 +185,6 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, #define deactivate_mm(tsk,mm) do { } while (0) #define activate_mm(prev,next) switch_mm(prev, next, current) +void post_ttbr_update_workaround(void); + #endif diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 5e50782f625..bfe379d13b8 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -164,6 +164,8 @@ #define TCR_TG1_16K (UL(1) << 30) #define TCR_TG1_4K (UL(2) << 30) #define TCR_TG1_64K (UL(3) << 30) + +#define TCR_A1 (UL(1) << 22) #define TCR_ASID16 (UL(1) << 36) #define TCR_TBI0 (UL(1) << 37) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index abe6186dc97..29fba7348bc 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -63,8 +63,16 @@ extern void __pmd_error(const char *file, int line, unsigned long val); extern void __pud_error(const char *file, int line, unsigned long val); extern void __pgd_error(const char *file, int line, unsigned long val); -#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) -#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) +#define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) +#define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) + +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +#define PROT_DEFAULT (_PROT_DEFAULT | PTE_NG) +#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_SECT_NG) +#else +#define PROT_DEFAULT _PROT_DEFAULT +#define PROT_SECT_DEFAULT _PROT_SECT_DEFAULT +#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC)) @@ -75,17 +83,18 @@ extern void __pgd_error(const char *file, int line, unsigned long val); #define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) #define _PAGE_DEFAULT (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) +#define _HYP_PAGE_DEFAULT (_PAGE_DEFAULT & ~PTE_NG) #define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE) #define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE) -#define PAGE_HYP __pgprot(_PAGE_DEFAULT | PTE_HYP) +#define PAGE_HYP __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP) #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) #define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) #define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN) -#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN) +#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_NG | PTE_PXN | PTE_UXN) #define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) #define PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) @@ -476,6 +485,7 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; +extern pgd_t tramp_pg_dir[PTRS_PER_PGD]; /* * Encode and decode a swap entry: diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h index b5240e86878..d35dcebb121 100644 --- a/arch/arm64/include/asm/proc-fns.h +++ b/arch/arm64/include/asm/proc-fns.h @@ -35,12 +35,6 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); #include <asm/memory.h> -#define cpu_switch_mm(pgd,mm) \ -do { \ - BUG_ON(pgd == swapper_pg_dir); \ - cpu_do_switch_mm(virt_to_phys(pgd),mm); \ -} while (0) - #define cpu_get_pgd() \ ({ \ unsigned long pg; \ diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index aea291bc519..6a2a5fca90d 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -29,6 +29,7 @@ #include <linux/string.h> +#include <asm/cpufeature.h> #include <asm/fpsimd.h> #include <asm/hw_breakpoint.h> #include <asm/ptrace.h> diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index f9c9aba788c..bc358b52569 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -21,8 +21,6 @@ #include <uapi/asm/ptrace.h> -#define _PSR_PAN_BIT 22 - /* Current Exception Level values, as contained in CurrentEL */ #define CurrentEL_EL1 (1 << 2) #define CurrentEL_EL2 (2 << 2) diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index d9f04cc4904..88ed6c0e946 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -46,12 +46,12 @@ typedef unsigned long mm_segment_t; struct thread_info { unsigned long flags; /* low level flags */ mm_segment_t addr_limit; /* address limit */ -#ifdef CONFIG_ARM64_SW_TTBR0_PAN - u64 ttbr0; /* saved TTBR0_EL1 */ -#endif struct task_struct *task; /* main task structure */ struct exec_domain *exec_domain; /* execution domain */ struct restart_block restart_block; +#ifdef CONFIG_ARM64_SW_TTBR0_PAN + u64 ttbr0; /* saved TTBR0_EL1 */ +#endif int preempt_count; /* 0 => preemptable, <0 => bug */ int cpu; /* cpu */ }; diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 72753b32f35..461cc32e2fd 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -23,6 +23,30 @@ #include <linux/sched.h> #include <asm/cputype.h> +#include <asm/mmu.h> + +/* + * Raw TLBI operations. + * + * Where necessary, use the __tlbi() macro to avoid asm() + * boilerplate. Drivers and most kernel code should use the TLB + * management routines in preference to the macro below. + * + * The macro can be used as __tlbi(op) or __tlbi(op, arg), depending + * on whether a particular TLBI operation takes an argument or + * not. The macros handles invoking the asm with or without the + * register argument as appropriate. + */ +#define __TLBI_0(op, arg) asm ("tlbi " #op) +#define __TLBI_1(op, arg) asm ("tlbi " #op ", %0" : : "r" (arg)) +#define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg) + +#define __tlbi(op, ...) __TLBI_N(op, ##__VA_ARGS__, 1, 0) + +#define __tlbi_user(op, arg) do { \ + if (arm64_kernel_unmapped_at_el0()) \ + __tlbi(op, (arg) | USER_ASID_FLAG); \ +} while (0) /* * TLB Management @@ -66,7 +90,7 @@ static inline void local_flush_tlb_all(void) { dsb(nshst); - asm("tlbi vmalle1"); + __tlbi(vmalle1); dsb(nsh); isb(); } @@ -74,7 +98,7 @@ static inline void local_flush_tlb_all(void) static inline void flush_tlb_all(void) { dsb(ishst); - asm("tlbi vmalle1is"); + __tlbi(vmalle1is); dsb(ish); isb(); } @@ -90,7 +114,8 @@ static inline void flush_tlb_mm(struct mm_struct *mm) unsigned long asid = ASID(mm) << 48; dsb(ishst); - asm("tlbi aside1is, %0" : : "r" (asid)); + __tlbi(aside1is, asid); + __tlbi_user(aside1is, asid); dsb(ish); #endif } @@ -107,7 +132,8 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr = uaddr >> 12 | (ASID(vma->vm_mm) << 48); dsb(ishst); - asm("tlbi vale1is, %0" : : "r" (addr)); + __tlbi(vale1is, addr); + __tlbi_user(vale1is, addr); dsb(ish); #endif } @@ -135,10 +161,13 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma, dsb(ishst); for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) { - if (last_level) - asm("tlbi vale1is, %0" : : "r"(addr)); - else - asm("tlbi vae1is, %0" : : "r"(addr)); + if (last_level) { + __tlbi(vale1is, addr); + __tlbi_user(vale1is, addr); + } else { + __tlbi(vae1is, addr); + __tlbi_user(vae1is, addr); + } } dsb(ish); } @@ -163,7 +192,7 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end dsb(ishst); for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) - asm("tlbi vaae1is, %0" : : "r"(addr)); + __tlbi(vaae1is, addr); dsb(ish); isb(); } @@ -177,7 +206,8 @@ static inline void __flush_tlb_pgtable(struct mm_struct *mm, { unsigned long addr = uaddr >> 12 | (ASID(mm) << 48); - asm("tlbi vae1is, %0" : : "r" (addr)); + __tlbi(vae1is, addr); + __tlbi_user(vae1is, addr); dsb(ish); } diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 55931e8e00c..998ca85840d 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -18,6 +18,11 @@ #ifndef __ASM_UACCESS_H #define __ASM_UACCESS_H +#include <asm/alternative.h> +#include <asm/kernel-pgtable.h> +#include <asm/mmu.h> +#include <asm/sysreg.h> + #ifndef __ASSEMBLY__ /* @@ -27,11 +32,8 @@ #include <linux/string.h> #include <linux/thread_info.h> -#include <asm/alternative.h> #include <asm/cpufeature.h> -#include <asm/kernel-pgtable.h> #include <asm/ptrace.h> -#include <asm/sysreg.h> #include <asm/errno.h> #include <asm/memory.h> #include <asm/compiler.h> @@ -131,19 +133,25 @@ static inline void set_fs(mm_segment_t fs) * User access enabling/disabling. */ #ifdef CONFIG_ARM64_SW_TTBR0_PAN -static inline void uaccess_ttbr0_disable(void) +static inline void __uaccess_ttbr0_disable(void) { - unsigned long ttbr; + unsigned long flags, ttbr; + local_irq_save(flags); + ttbr = read_sysreg(ttbr1_el1); + ttbr &= ~TTBR_ASID_MASK; /* reserved_ttbr0 placed at the end of swapper_pg_dir */ - ttbr = read_sysreg(ttbr1_el1) + SWAPPER_DIR_SIZE; - write_sysreg(ttbr, ttbr0_el1); + write_sysreg(ttbr + SWAPPER_DIR_SIZE, ttbr0_el1); + isb(); + /* Set reserved ASID */ + write_sysreg(ttbr, ttbr1_el1); isb(); + local_irq_restore(flags); } -static inline void uaccess_ttbr0_enable(void) +static inline void __uaccess_ttbr0_enable(void) { - unsigned long flags; + unsigned long flags, ttbr0, ttbr1; /* * Disable interrupts to avoid preemption between reading the 'ttbr0' @@ -151,34 +159,58 @@ static inline void uaccess_ttbr0_enable(void) * roll-over and an update of 'ttbr0'. */ local_irq_save(flags); - write_sysreg(current_thread_info()->ttbr0, ttbr0_el1); + ttbr0 = READ_ONCE(current_thread_info()->ttbr0); + + /* Restore active ASID */ + ttbr1 = read_sysreg(ttbr1_el1); + ttbr1 &= ~TTBR_ASID_MASK; /* safety measure */ + ttbr1 |= ttbr0 & TTBR_ASID_MASK; + write_sysreg(ttbr1, ttbr1_el1); + isb(); + + /* Restore user page table */ + write_sysreg(ttbr0, ttbr0_el1); isb(); local_irq_restore(flags); } + +static inline bool uaccess_ttbr0_disable(void) +{ + if (!system_uses_ttbr0_pan()) + return false; + __uaccess_ttbr0_disable(); + return true; +} + +static inline bool uaccess_ttbr0_enable(void) +{ + if (!system_uses_ttbr0_pan()) + return false; + __uaccess_ttbr0_enable(); + return true; +} #else -static inline void uaccess_ttbr0_disable(void) +static inline bool uaccess_ttbr0_disable(void) { + return false; } -static inline void uaccess_ttbr0_enable(void) +static inline bool uaccess_ttbr0_enable(void) { + return false; } #endif #define __uaccess_disable(alt) \ do { \ - if (system_uses_ttbr0_pan()) \ - uaccess_ttbr0_disable(); \ - else \ + if (!uaccess_ttbr0_disable()) \ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt, \ CONFIG_ARM64_PAN)); \ } while (0) #define __uaccess_enable(alt) \ do { \ - if (system_uses_ttbr0_pan()) \ - uaccess_ttbr0_enable(); \ - else \ + if (!uaccess_ttbr0_enable()) \ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt, \ CONFIG_ARM64_PAN)); \ } while (0) @@ -414,71 +446,73 @@ extern __must_check long strnlen_user(const char __user *str, long n); #else /* __ASSEMBLY__ */ -#include <asm/alternative.h> #include <asm/assembler.h> -#include <asm/kernel-pgtable.h> -#include <asm/page.h> /* * User access enabling/disabling macros. */ - .macro uaccess_ttbr0_disable, tmp1 +#ifdef CONFIG_ARM64_SW_TTBR0_PAN + .macro __uaccess_ttbr0_disable, tmp1 mrs \tmp1, ttbr1_el1 // swapper_pg_dir + bic \tmp1, \tmp1, #TTBR_ASID_MASK add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1 isb + sub \tmp1, \tmp1, #SWAPPER_DIR_SIZE + msr ttbr1_el1, \tmp1 // set reserved ASID + isb .endm - .macro uaccess_ttbr0_enable, tmp1 + .macro __uaccess_ttbr0_enable, tmp1, tmp2 get_thread_info \tmp1 - ldr \tmp1, [\tmp1, #TI_TTBR0] // load saved TTBR0_EL1 + ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1 + mrs \tmp2, ttbr1_el1 + extr \tmp2, \tmp2, \tmp1, #48 + ror \tmp2, \tmp2, #16 + msr ttbr1_el1, \tmp2 // set the active ASID + isb msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1 isb .endm + .macro uaccess_ttbr0_disable, tmp1, tmp2 +alternative_if_not ARM64_HAS_PAN + save_and_disable_irq \tmp2 // avoid preemption + __uaccess_ttbr0_disable \tmp1 + restore_irq \tmp2 +alternative_else_nop_endif + .endm + + .macro uaccess_ttbr0_enable, tmp1, tmp2, tmp3 +alternative_if_not ARM64_HAS_PAN + save_and_disable_irq \tmp3 // avoid preemption + __uaccess_ttbr0_enable \tmp1, \tmp2 + restore_irq \tmp3 +alternative_else_nop_endif + .endm +#else + .macro uaccess_ttbr0_disable, tmp1, tmp2 + .endm + + .macro uaccess_ttbr0_enable, tmp1, tmp2, tmp3 + .endm +#endif + /* * These macros are no-ops when UAO is present. */ - .macro uaccess_disable_not_uao, tmp1 -#ifdef CONFIG_ARM64_SW_TTBR0_PAN -alternative_if_not ARM64_HAS_PAN - uaccess_ttbr0_disable \tmp1 -alternative_else - nop - nop - nop - nop -alternative_endif -#endif -alternative_if_not ARM64_ALT_PAN_NOT_UAO - nop -alternative_else + .macro uaccess_disable_not_uao, tmp1, tmp2 + uaccess_ttbr0_disable \tmp1, \tmp2 +alternative_if ARM64_ALT_PAN_NOT_UAO SET_PSTATE_PAN(1) -alternative_endif +alternative_else_nop_endif .endm - .macro uaccess_enable_not_uao, tmp1, tmp2 -#ifdef CONFIG_ARM64_SW_TTBR0_PAN -alternative_if_not ARM64_HAS_PAN - save_and_disable_irq \tmp2 // avoid preemption - uaccess_ttbr0_enable \tmp1 - restore_irq \tmp2 -alternative_else - nop - nop - nop - nop - nop - nop - nop - nop -alternative_endif -#endif -alternative_if_not ARM64_ALT_PAN_NOT_UAO - nop -alternative_else + .macro uaccess_enable_not_uao, tmp1, tmp2, tmp3 + uaccess_ttbr0_enable \tmp1, \tmp2, \tmp3 +alternative_if ARM64_ALT_PAN_NOT_UAO SET_PSTATE_PAN(0) -alternative_endif +alternative_else_nop_endif .endm #endif /* __ASSEMBLY__ */ diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c index b8b4c5109e3..61668a44666 100644 --- a/arch/arm64/kernel/armv8_deprecated.c +++ b/arch/arm64/kernel/armv8_deprecated.c @@ -14,7 +14,6 @@ #include <linux/slab.h> #include <linux/sysctl.h> -#include <asm/alternative.h> #include <asm/cpufeature.h> #include <asm/insn.h> #include <asm/opcodes.h> diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 9b80791e49d..d2bb27d5ef5 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -22,6 +22,7 @@ #include <linux/mm.h> #include <linux/dma-mapping.h> #include <linux/kvm_host.h> +#include <asm/fixmap.h> #include <asm/thread_info.h> #include <asm/memory.h> #include <asm/smp_plat.h> @@ -36,12 +37,12 @@ int main(void) DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); -#ifdef CONFIG_ARM64_SW_TTBR0_PAN - DEFINE(TI_TTBR0, offsetof(struct thread_info, ttbr0)); -#endif DEFINE(TI_TASK, offsetof(struct thread_info, task)); DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain)); DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); +#ifdef CONFIG_ARM64_SW_TTBR0_PAN + DEFINE(TSK_TI_TTBR0, offsetof(struct thread_info, ttbr0)); +#endif BLANK(); DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context)); BLANK(); @@ -161,5 +162,9 @@ int main(void) DEFINE(SLEEP_SAVE_SP_PHYS, offsetof(struct sleep_save_sp, save_ptr_stash_phys)); DEFINE(SLEEP_SAVE_SP_VIRT, offsetof(struct sleep_save_sp, save_ptr_stash)); #endif + BLANK(); +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + DEFINE(TRAMP_VALIAS, TRAMP_VALIAS); +#endif return 0; } diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 58347534d76..ec7e68c2c73 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -617,6 +617,39 @@ has_cpuid_feature(const struct arm64_cpu_capabilities *entry) return feature_matches(val, entry); } +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */ + +static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry) +{ + /* Forced on command line? */ + if (__kpti_forced) { + pr_info_once("kernel page table isolation forced %s by command line option\n", + __kpti_forced > 0 ? "ON" : "OFF"); + return __kpti_forced > 0; + } + + /* Useful for KASLR robustness */ + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) + return true; + + return false; +} + +static int __init parse_kpti(char *str) +{ + bool enabled; + int ret = strtobool(str, &enabled); + + if (ret) + return ret; + + __kpti_forced = enabled ? 1 : -1; + return 0; +} +__setup("kpti=", parse_kpti); +#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ + static const struct arm64_cpu_capabilities arm64_features[] = { { .desc = "GIC system register CPU interface", @@ -654,6 +687,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .matches = cpufeature_pan_not_uao, }, #endif /* CONFIG_ARM64_PAN */ +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + { + .capability = ARM64_UNMAP_KERNEL_AT_EL0, + .matches = unmap_kernel_at_el0, + }, +#endif {}, }; diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index 348c44be731..1c6b036838e 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -28,7 +28,6 @@ #include <linux/spinlock.h> #include <asm/cacheflush.h> -#include <asm/cpufeature.h> #include <asm/efi.h> #include <asm/tlbflush.h> #include <asm/mmu_context.h> @@ -346,12 +345,14 @@ static void efi_set_pgd(struct mm_struct *mm) if (mm != current->active_mm) { /* * Update the current thread's saved ttbr0 since it is - * restored as part of a return from exception. Set - * the hardware TTBR0_EL1 using cpu_switch_mm() - * directly to enable potential errata workarounds. + * restored as part of a return from exception. Enable + * access to the valid TTBR0_EL1 and invoke the errata + * workaround directly since there is no return from + * exception when invoking the EFI run-time services. */ update_saved_ttbr0(current, mm); - cpu_switch_mm(mm->pgd, mm); + uaccess_ttbr0_enable(); + post_ttbr_update_workaround(); } else { /* * Defer the switch to the current thread's TTBR0_EL1 @@ -359,7 +360,7 @@ static void efi_set_pgd(struct mm_struct *mm) * thread's saved ttbr0 corresponding to its active_mm * (if different from init_mm). */ - cpu_set_reserved_ttbr0(); + uaccess_ttbr0_disable(); if (current->active_mm != &init_mm) update_saved_ttbr0(current, current->active_mm); } diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 35cf3ad495e..af873a84f19 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -28,6 +28,7 @@ #include <asm/errno.h> #include <asm/esr.h> #include <asm/memory.h> +#include <asm/mmu.h> #include <asm/ptrace.h> #include <asm/thread_info.h> #include <asm/uaccess.h> @@ -69,8 +70,31 @@ #define BAD_FIQ 2 #define BAD_ERROR 3 - .macro kernel_entry, el, regsize = 64 + .macro kernel_ventry, el, label, regsize = 64 + .align 7 +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +alternative_if ARM64_UNMAP_KERNEL_AT_EL0 + .if \el == 0 + .if \regsize == 64 + mrs x30, tpidrro_el0 + msr tpidrro_el0, xzr + .else + mov x30, xzr + .endif + .endif +alternative_else_nop_endif +#endif + sub sp, sp, #S_FRAME_SIZE + b el\()\el\()_\label + .endm + + .macro tramp_alias, dst, sym + mov_q \dst, TRAMP_VALIAS + add \dst, \dst, #(\sym - .entry.tramp.text) + .endm + + .macro kernel_entry, el, regsize = 64 .if \regsize == 32 mov w0, w0 // zero upper 32 bits of x0 .endif @@ -118,21 +142,19 @@ * feature as all TTBR0_EL1 accesses are disabled, not just those to * user mappings. */ -alternative_if_not ARM64_HAS_PAN - nop -alternative_else +alternative_if ARM64_HAS_PAN b 1f // skip TTBR0 PAN -alternative_endif +alternative_else_nop_endif .if \el != 0 mrs x21, ttbr0_el1 - tst x21, #0xffff << 48 // Check for the reserved ASID + tst x21, #TTBR_ASID_MASK // Check for the reserved ASID orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR b.eq 1f // TTBR0 access already disabled and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR .endif - uaccess_ttbr0_disable x21 + __uaccess_ttbr0_disable x21 1: #endif @@ -174,18 +196,25 @@ alternative_endif * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR * PAN bit checking. */ -alternative_if_not ARM64_HAS_PAN - nop -alternative_else +alternative_if ARM64_HAS_PAN b 2f // skip TTBR0 PAN -alternative_endif +alternative_else_nop_endif .if \el != 0 - tbnz x22, #_PSR_PAN_BIT, 1f // Skip re-enabling TTBR0 access if previously disabled + tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set .endif - uaccess_ttbr0_enable x0 + __uaccess_ttbr0_enable x0, x1 + .if \el == 0 + /* + * Enable errata workarounds only if returning to user. The only + * workaround currently required for TTBR0_EL1 changes are for the + * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache + * corruption). + */ + bl post_ttbr_update_workaround + .endif 1: .if \el != 0 and x22, x22, #~PSR_PAN_BIT // ARMv8.0 CPUs do not understand this bit @@ -196,24 +225,20 @@ alternative_endif .if \el == 0 ldr x23, [sp, #S_SP] // load return stack pointer msr sp_el0, x23 + tst x22, #PSR_MODE32_BIT // native task? + b.eq 3f + #ifdef CONFIG_ARM64_ERRATUM_845719 -alternative_if_not ARM64_WORKAROUND_845719 - nop - nop -#ifdef CONFIG_PID_IN_CONTEXTIDR - nop -#endif -alternative_else - tbz x22, #4, 1f +alternative_if ARM64_WORKAROUND_845719 #ifdef CONFIG_PID_IN_CONTEXTIDR mrs x29, contextidr_el1 msr contextidr_el1, x29 #else msr contextidr_el1, xzr #endif -1: -alternative_endif +alternative_else_nop_endif #endif +3: .endif msr elr_el1, x21 // set up the return data @@ -235,7 +260,21 @@ alternative_endif ldp x28, x29, [sp, #16 * 14] ldr lr, [sp, #S_LR] add sp, sp, #S_FRAME_SIZE // restore sp - eret // return to kernel + + .if \el == 0 +alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0 +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + bne 4f + msr far_el1, x30 + tramp_alias x30, tramp_exit_native + br x30 +4: + tramp_alias x30, tramp_exit_compat + br x30 +#endif + .else + eret + .endif .endm /* @@ -267,31 +306,31 @@ tsk .req x28 // current thread_info .align 11 ENTRY(vectors) - ventry el1_sync_invalid // Synchronous EL1t - ventry el1_irq_invalid // IRQ EL1t - ventry el1_fiq_invalid // FIQ EL1t - ventry el1_error_invalid // Error EL1t + kernel_ventry 1, sync_invalid // Synchronous EL1t + kernel_ventry 1, irq_invalid // IRQ EL1t + kernel_ventry 1, fiq_invalid // FIQ EL1t + kernel_ventry 1, error_invalid // Error EL1t - ventry el1_sync // Synchronous EL1h - ventry el1_irq // IRQ EL1h - ventry el1_fiq_invalid // FIQ EL1h - ventry el1_error_invalid // Error EL1h + kernel_ventry 1, sync // Synchronous EL1h + kernel_ventry 1, irq // IRQ EL1h + kernel_ventry 1, fiq_invalid // FIQ EL1h + kernel_ventry 1, error_invalid // Error EL1h - ventry el0_sync // Synchronous 64-bit EL0 - ventry el0_irq // IRQ 64-bit EL0 - ventry el0_fiq_invalid // FIQ 64-bit EL0 - ventry el0_error_invalid // Error 64-bit EL0 + kernel_ventry 0, sync // Synchronous 64-bit EL0 + kernel_ventry 0, irq // IRQ 64-bit EL0 + kernel_ventry 0, fiq_invalid // FIQ 64-bit EL0 + kernel_ventry 0, error_invalid // Error 64-bit EL0 #ifdef CONFIG_COMPAT - ventry el0_sync_compat // Synchronous 32-bit EL0 - ventry el0_irq_compat // IRQ 32-bit EL0 - ventry el0_fiq_invalid_compat // FIQ 32-bit EL0 - ventry el0_error_invalid_compat // Error 32-bit EL0 + kernel_ventry 0, sync_compat, 32 // Synchronous 32-bit EL0 + kernel_ventry 0, irq_compat, 32 // IRQ 32-bit EL0 + kernel_ventry 0, fiq_invalid_compat, 32 // FIQ 32-bit EL0 + kernel_ventry 0, error_invalid_compat, 32 // Error 32-bit EL0 #else - ventry el0_sync_invalid // Synchronous 32-bit EL0 - ventry el0_irq_invalid // IRQ 32-bit EL0 - ventry el0_fiq_invalid // FIQ 32-bit EL0 - ventry el0_error_invalid // Error 32-bit EL0 + kernel_ventry 0, sync_invalid, 32 // Synchronous 32-bit EL0 + kernel_ventry 0, irq_invalid, 32 // IRQ 32-bit EL0 + kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0 + kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0 #endif END(vectors) @@ -848,6 +887,117 @@ __ni_sys_trace: bl do_ni_syscall b __sys_trace_return +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +/* + * Exception vectors trampoline. + */ + .pushsection ".entry.tramp.text", "ax" + + .macro tramp_map_kernel, tmp + mrs \tmp, ttbr1_el1 + sub \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE) + bic \tmp, \tmp, #USER_ASID_FLAG + msr ttbr1_el1, \tmp +#ifdef CONFIG_ARCH_MSM8996 + /* ASID already in \tmp[63:48] */ + movk \tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12) + movk \tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12) + /* 2MB boundary containing the vectors, so we nobble the walk cache */ + movk \tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12) + isb + tlbi vae1, \tmp + dsb nsh +#endif /* CONFIG_ARCH_MSM8996 */ + .endm + + .macro tramp_unmap_kernel, tmp + mrs \tmp, ttbr1_el1 + add \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE) + orr \tmp, \tmp, #USER_ASID_FLAG + msr ttbr1_el1, \tmp + /* + * We avoid running the post_ttbr_update_workaround here because the + * user and kernel ASIDs don't have conflicting mappings, so any + * "blessing" as described in: + * + * http://lkml.kernel.org/r/56BB848A.6060603@caviumnetworks.com + * + * will not hurt correctness. Whilst this may partially defeat the + * point of using split ASIDs in the first place, it avoids + * the hit of invalidating the entire I-cache on every return to + * userspace. + */ + .endm + + .macro tramp_ventry, regsize = 64 + .align 7 +1: + .if \regsize == 64 + msr tpidrro_el0, x30 // Restored in kernel_ventry + .endif + bl 2f + b . +2: + tramp_map_kernel x30 +#ifdef CONFIG_RANDOMIZE_BASE + adr x30, tramp_vectors + PAGE_SIZE +alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003 + ldr x30, [x30] +#else + ldr x30, =vectors +#endif + prfm plil1strm, [x30, #(1b - tramp_vectors)] + msr vbar_el1, x30 + add x30, x30, #(1b - tramp_vectors) + isb + ret + .endm + + .macro tramp_exit, regsize = 64 + adr x30, tramp_vectors + msr vbar_el1, x30 + tramp_unmap_kernel x30 + .if \regsize == 64 + mrs x30, far_el1 + .endif + eret + .endm + + .align 11 +ENTRY(tramp_vectors) + .space 0x400 + + tramp_ventry + tramp_ventry + tramp_ventry + tramp_ventry + + tramp_ventry 32 + tramp_ventry 32 + tramp_ventry 32 + tramp_ventry 32 +END(tramp_vectors) + +ENTRY(tramp_exit_native) + tramp_exit +END(tramp_exit_native) + +ENTRY(tramp_exit_compat) + tramp_exit 32 +END(tramp_exit_compat) + + .ltorg + .popsection // .entry.tramp.text +#ifdef CONFIG_RANDOMIZE_BASE + .pushsection ".rodata", "a" + .align PAGE_SHIFT + .globl __entry_tramp_data_start +__entry_tramp_data_start: + .quad vectors + .popsection // .rodata +#endif /* CONFIG_RANDOMIZE_BASE */ +#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ + /* * Special system call wrappers. */ diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 9d1fd515dd5..05e3585ec2f 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -361,25 +361,18 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, static void tls_thread_switch(struct task_struct *next) { - unsigned long tpidr, tpidrro; - if (!is_compat_task()) { + unsigned long tpidr; asm("mrs %0, tpidr_el0" : "=r" (tpidr)); current->thread.tp_value = tpidr; } - if (is_compat_thread(task_thread_info(next))) { - tpidr = 0; - tpidrro = next->thread.tp_value; - } else { - tpidr = next->thread.tp_value; - tpidrro = 0; - } + if (is_compat_thread(task_thread_info(next))) + write_sysreg(next->thread.tp_value, tpidrro_el0); + else if (!arm64_kernel_unmapped_at_el0()) + write_sysreg(0, tpidrro_el0); - asm( - " msr tpidr_el0, %0\n" - " msr tpidrro_el0, %1" - : : "r" (tpidr), "r" (tpidrro)); + write_sysreg(next->thread.tp_value, tpidr_el0); } /* Restore the UAO state depending on next's addr_limit */ diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index ab14fee1f25..4512d11f200 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c @@ -116,7 +116,7 @@ int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) */ if (mm == &init_mm) cpu_set_reserved_ttbr0(); - else + else if (!system_uses_ttbr0_pan()) cpu_switch_mm(mm->pgd, mm); local_flush_tlb_all(); diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index a38f929fc6d..3a3d0666463 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -40,6 +40,17 @@ jiffies = jiffies_64; *(.hyp.text) \ VMLINUX_SYMBOL(__hyp_text_end) = .; +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +#define TRAMP_TEXT \ + . = ALIGN(PAGE_SIZE); \ + VMLINUX_SYMBOL(__entry_tramp_text_start) = .; \ + *(.entry.tramp.text) \ + . = ALIGN(PAGE_SIZE); \ + VMLINUX_SYMBOL(__entry_tramp_text_end) = .; +#else +#define TRAMP_TEXT +#endif + /* * The size of the PE/COFF section that covers the kernel image, which * runs from stext to _edata, must be a round multiple of the PE/COFF @@ -97,6 +108,7 @@ SECTIONS SCHED_TEXT LOCK_TEXT HYPERVISOR_TEXT + TRAMP_TEXT *(.fixup) *(.gnu.warning) . = ALIGN(16); @@ -167,6 +179,11 @@ SECTIONS . += RESERVED_TTBR0_SIZE; #endif +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + tramp_pg_dir = .; + . += PAGE_SIZE; +#endif + _end = .; STABS_DEBUG @@ -179,7 +196,10 @@ SECTIONS */ ASSERT(((__hyp_idmap_text_start + PAGE_SIZE) > __hyp_idmap_text_end), "HYP init code too big") - +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE, + "Entry trampoline text too big") +#endif /* * If padding is applied before .head.text, virt<->phys conversions will fail. */ diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S index 08b5f18ba60..07c7ad97ee2 100644 --- a/arch/arm64/lib/clear_user.S +++ b/arch/arm64/lib/clear_user.S @@ -17,9 +17,6 @@ */ #include <linux/linkage.h> -#include <asm/assembler.h> -#include <asm/cpufeature.h> -#include <asm/sysreg.h> #include <asm/uaccess.h> .text @@ -33,7 +30,7 @@ * Alignment fixed up by hardware. */ ENTRY(__clear_user) - uaccess_enable_not_uao x2, x3 + uaccess_enable_not_uao x2, x3, x4 mov x2, x1 // save the size for fixup return subs x1, x1, #8 b.mi 2f @@ -53,7 +50,7 @@ uao_user_alternative 9f, strh, sttrh, wzr, x0, 2 b.mi 5f uao_user_alternative 9f, strb, sttrb, wzr, x0, 0 5: mov x0, #0 - uaccess_disable_not_uao x2 + uaccess_disable_not_uao x2, x3 ret ENDPROC(__clear_user) diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S index 6505ec81f1d..683adc358be 100644 --- a/arch/arm64/lib/copy_from_user.S +++ b/arch/arm64/lib/copy_from_user.S @@ -16,10 +16,7 @@ #include <linux/linkage.h> -#include <asm/assembler.h> #include <asm/cache.h> -#include <asm/cpufeature.h> -#include <asm/sysreg.h> #include <asm/uaccess.h> /* @@ -67,10 +64,10 @@ end .req x5 ENTRY(__arch_copy_from_user) - uaccess_enable_not_uao x3, x4 + uaccess_enable_not_uao x3, x4, x5 add end, x0, x2 #include "copy_template.S" - uaccess_disable_not_uao x3 + uaccess_disable_not_uao x3, x4 mov x0, #0 // Nothing to copy ret ENDPROC(__arch_copy_from_user) diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S index 9b04ff3ab61..e8bfaf19f77 100644 --- a/arch/arm64/lib/copy_in_user.S +++ b/arch/arm64/lib/copy_in_user.S @@ -18,10 +18,7 @@ #include <linux/linkage.h> -#include <asm/assembler.h> #include <asm/cache.h> -#include <asm/cpufeature.h> -#include <asm/sysreg.h> #include <asm/uaccess.h> /* @@ -68,10 +65,10 @@ end .req x5 ENTRY(__copy_in_user) - uaccess_enable_not_uao x3, x4 + uaccess_enable_not_uao x3, x4, x5 add end, x0, x2 #include "copy_template.S" - uaccess_disable_not_uao x3 + uaccess_disable_not_uao x3, x4 mov x0, #0 ret ENDPROC(__copy_in_user) diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S index 8077e4f34d5..f6cfcc0441d 100644 --- a/arch/arm64/lib/copy_to_user.S +++ b/arch/arm64/lib/copy_to_user.S @@ -16,10 +16,7 @@ #include <linux/linkage.h> -#include <asm/assembler.h> #include <asm/cache.h> -#include <asm/cpufeature.h> -#include <asm/sysreg.h> #include <asm/uaccess.h> /* @@ -66,10 +63,10 @@ end .req x5 ENTRY(__arch_copy_to_user) - uaccess_enable_not_uao x3, x4 + uaccess_enable_not_uao x3, x4, x5 add end, x0, x2 #include "copy_template.S" - uaccess_disable_not_uao x3 + uaccess_disable_not_uao x3, x4 mov x0, #0 ret ENDPROC(__arch_copy_to_user) diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index a0b54501460..59164694626 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -156,7 +156,7 @@ ENTRY(flush_icache_range) * - end - virtual end address of region */ ENTRY(__flush_cache_user_range) - uaccess_enable_not_uao x2, x3 + uaccess_ttbr0_enable x2, x3, x4 dcache_line_size x2, x3 sub x3, x2, #1 bic x4, x0, x3 @@ -178,7 +178,7 @@ USER(9f, ic ivau, x4 ) // invalidate I line PoU 9: // ignore any faulting cache operation dsb ish isb - uaccess_disable_not_uao x1 + uaccess_ttbr0_disable x1, x2 ret ENDPROC(flush_icache_range) ENDPROC(__flush_cache_user_range) @@ -267,7 +267,7 @@ ENDPROC(__dma_clean_range) * - end - virtual end address of region */ ENTRY(__dma_flush_range) - uaccess_enable_not_uao x2, x3 + uaccess_enable_not_uao x2, x3, x4 dcache_line_size x2, x3 sub x3, x2, #1 bic x0, x0, x3 @@ -276,7 +276,7 @@ ENTRY(__dma_flush_range) cmp x0, x1 b.lo 1b dsb sy - uaccess_disable_not_uao x1 + uaccess_disable_not_uao x1, x2 ret ENDPIPROC(__dma_flush_range) diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index 43cff0e6f52..cc3664b088d 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -38,7 +38,16 @@ static cpumask_t tlb_flush_pending; #define ASID_MASK (~GENMASK(asid_bits - 1, 0)) #define ASID_FIRST_VERSION (1UL << asid_bits) -#define NUM_USER_ASIDS ASID_FIRST_VERSION + +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +#define NUM_USER_ASIDS (ASID_FIRST_VERSION >> 1) +#define asid2idx(asid) (((asid) & ~ASID_MASK) >> 1) +#define idx2asid(idx) (((idx) << 1) & ~ASID_MASK) +#else +#define NUM_USER_ASIDS (ASID_FIRST_VERSION) +#define asid2idx(asid) ((asid) & ~ASID_MASK) +#define idx2asid(idx) asid2idx(idx) +#endif static void flush_context(unsigned int cpu) { @@ -65,7 +74,7 @@ static void flush_context(unsigned int cpu) */ if (asid == 0) asid = per_cpu(reserved_asids, i); - __set_bit(asid & ~ASID_MASK, asid_map); + __set_bit(asid2idx(asid), asid_map); per_cpu(reserved_asids, i) = asid; } @@ -76,13 +85,28 @@ static void flush_context(unsigned int cpu) __flush_icache_all(); } -static int is_reserved_asid(u64 asid) +static bool check_update_reserved_asid(u64 asid, u64 newasid) { int cpu; - for_each_possible_cpu(cpu) - if (per_cpu(reserved_asids, cpu) == asid) - return 1; - return 0; + bool hit = false; + + /* + * Iterate over the set of reserved ASIDs looking for a match. + * If we find one, then we can update our mm to use newasid + * (i.e. the same ASID in the current generation) but we can't + * exit the loop early, since we need to ensure that all copies + * of the old ASID are updated to reflect the mm. Failure to do + * so could result in us missing the reserved ASID in a future + * generation. + */ + for_each_possible_cpu(cpu) { + if (per_cpu(reserved_asids, cpu) == asid) { + hit = true; + per_cpu(reserved_asids, cpu) = newasid; + } + } + + return hit; } static u64 new_context(struct mm_struct *mm, unsigned int cpu) @@ -92,27 +116,29 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) u64 generation = atomic64_read(&asid_generation); if (asid != 0) { + u64 newasid = generation | (asid & ~ASID_MASK); + /* * If our current ASID was active during a rollover, we * can continue to use it and this was just a false alarm. */ - if (is_reserved_asid(asid)) - return generation | (asid & ~ASID_MASK); + if (check_update_reserved_asid(asid, newasid)) + return newasid; /* * We had a valid ASID in a previous life, so try to re-use * it if possible. */ - asid &= ~ASID_MASK; - if (!__test_and_set_bit(asid, asid_map)) - goto bump_gen; + if (!__test_and_set_bit(asid2idx(asid), asid_map)) + return newasid; } /* * Allocate a free ASID. If we can't find one, take a note of the - * currently active ASIDs and mark the TLBs as requiring flushes. - * We always count from ASID #1, as we use ASID #0 when setting a - * reserved TTBR0 for the init_mm. + * currently active ASIDs and mark the TLBs as requiring flushes. We + * always count from ASID #2 (index 1), as we use ASID #0 when setting + * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd + * pairs. */ asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx); if (asid != NUM_USER_ASIDS) @@ -129,10 +155,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) set_asid: __set_bit(asid, asid_map); cur_idx = asid; - -bump_gen: - asid |= generation; - return asid; + return idx2asid(asid) | generation; } void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) @@ -176,6 +199,11 @@ switch_mm_fastpath: cpu_switch_mm(mm->pgd, mm); } +/* Errata workaround post TTBRx_EL1 update. */ +asmlinkage void post_ttbr_update_workaround(void) +{ +} + static int asids_init(void) { int fld = cpuid_feature_extract_field(read_cpuid(SYS_ID_AA64MMFR0_EL1), 4); diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 32f0ac75b95..bfce642eb1c 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -455,10 +455,10 @@ static const struct fault_info { { do_bad, SIGBUS, 0, "asynchronous external abort" }, { do_bad, SIGBUS, 0, "unknown 18" }, { do_bad, SIGBUS, 0, "unknown 19" }, - { do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" }, - { do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" }, - { do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" }, - { do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" }, + { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, + { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, + { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, + { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, { do_bad, SIGBUS, 0, "synchronous parity error" }, { do_bad, SIGBUS, 0, "asynchronous parity error" }, { do_bad, SIGBUS, 0, "unknown 26" }, diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 50191538f32..f4fbc3c8cb9 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -582,6 +582,52 @@ void fixup_init(void) PAGE_KERNEL); } +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +static void __init *pgd_pgtable_alloc(unsigned long size) +{ + void *ptr; + BUG_ON(size != PAGE_SIZE); + + ptr = (void *)__get_free_page(PGALLOC_GFP); + if (!ptr || !pgtable_page_ctor(virt_to_page(ptr))) + BUG(); + + /* Ensure the zeroed page is visible to the page table walker */ + dsb(ishst); + return ptr; +} + +static int __init map_entry_trampoline(void) +{ + extern char __entry_tramp_text_start[]; + + pgprot_t prot = PAGE_KERNEL_EXEC; + phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start); + + /* The trampoline is always mapped and can therefore be global */ + pgprot_val(prot) &= ~PTE_NG; + + /* Map only the text into the trampoline page table */ + memset(tramp_pg_dir, 0, PGD_SIZE); + __create_mapping(NULL, tramp_pg_dir + pgd_index(TRAMP_VALIAS), pa_start, + TRAMP_VALIAS, PAGE_SIZE, prot, pgd_pgtable_alloc, + false); + + /* Map both the text and data into the kernel page table */ + __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot); + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { + extern char __entry_tramp_data_start[]; + + __set_fixmap(FIX_ENTRY_TRAMP_DATA, + __pa_symbol(__entry_tramp_data_start), + PAGE_KERNEL); + } + + return 0; +} +core_initcall(map_entry_trampoline); +#endif + /* * paging_init() sets up the page tables, initialises the zone memory * maps and sets up the zero page. diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index d579089a164..b6245c71bd0 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -127,11 +127,17 @@ ENDPROC(cpu_do_resume) * - pgd_phys - physical address of new TTB */ ENTRY(cpu_do_switch_mm) + mrs x2, ttbr1_el1 mmid x1, x1 // get mm->context.id - bfi x0, x1, #48, #16 // set the ASID - msr ttbr0_el1, x0 // set TTBR0 +#ifdef CONFIG_ARM64_SW_TTBR0_PAN + bfi x0, x1, #48, #16 // set the ASID field in TTBR0 +#endif + bfi x2, x1, #48, #16 // set the ASID + msr ttbr1_el1, x2 // in TTBR1 (since TCR.A1 is set) isb - ret + msr ttbr0_el1, x0 // now update TTBR0 + isb + b post_ttbr_update_workaround // Back to C code... ENDPROC(cpu_do_switch_mm) .section ".text.init", #alloc, #execinstr @@ -182,7 +188,7 @@ ENTRY(__cpu_setup) * both user and kernel. */ ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ - TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 + TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 | TCR_A1 tcr_set_idmap_t0sz x10, x9 /* diff --git a/arch/arm64/xen/hypercall.S b/arch/arm64/xen/hypercall.S index 6d6e4af1a4b..a396beb7829 100644 --- a/arch/arm64/xen/hypercall.S +++ b/arch/arm64/xen/hypercall.S @@ -90,7 +90,6 @@ ENTRY(privcmd_call) mov x2, x3 mov x3, x4 mov x4, x5 -#ifdef CONFIG_ARM64_SW_TTBR0_PAN /* * Privcmd calls are issued by the userspace. The kernel needs to * enable access to TTBR0_EL1 as the hypervisor would issue stage 1 @@ -99,15 +98,12 @@ ENTRY(privcmd_call) * need the explicit uaccess_enable/disable if the TTBR0 PAN emulation * is enabled (it implies that hardware UAO and PAN disabled). */ - uaccess_enable_not_uao x6, x7 -#endif + uaccess_ttbr0_enable x6, x7, x8 hvc XEN_IMM -#ifdef CONFIG_ARM64_SW_TTBR0_PAN /* * Disable userspace access from kernel once the hyp call completed. */ - uaccess_disable_not_uao x6 -#endif + uaccess_ttbr0_disable x6, x7 ret ENDPROC(privcmd_call); diff --git a/drivers/char/diag/diag_debugfs.c b/drivers/char/diag/diag_debugfs.c index b66c8cb8257..040790a2261 100644 --- a/drivers/char/diag/diag_debugfs.c +++ b/drivers/char/diag/diag_debugfs.c @@ -50,7 +50,7 @@ static int diag_dbgfs_bridgeinfo_index; static int diag_dbgfs_finished; static int diag_dbgfs_dci_data_index; static int diag_dbgfs_dci_finished; - +static struct mutex diag_dci_dbgfs_mutex; static ssize_t diag_dbgfs_read_status(struct file *file, char __user *ubuf, size_t count, loff_t *ppos) { @@ -151,6 +151,7 @@ static ssize_t diag_dbgfs_read_dcistats(struct file *file, buf_size = ksize(buf); bytes_remaining = buf_size; + mutex_lock(&diag_dci_dbgfs_mutex); if (diag_dbgfs_dci_data_index == 0) { bytes_written = scnprintf(buf, buf_size, @@ -206,8 +207,8 @@ static ssize_t diag_dbgfs_read_dcistats(struct file *file, } temp_data++; } - diag_dbgfs_dci_data_index = (i >= DIAG_DCI_DEBUG_CNT) ? 0 : i + 1; + mutex_unlock(&diag_dci_dbgfs_mutex); bytes_written = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buf); kfree(buf); @@ -1065,6 +1066,7 @@ int diag_debugfs_init(void) pr_warn("diag: could not allocate memory for dci debug info\n"); mutex_init(&dci_stat_mutex); + mutex_init(&diag_dci_dbgfs_mutex); return 0; err: kfree(dci_traffic); @@ -1081,6 +1083,7 @@ void diag_debugfs_cleanup(void) kfree(dci_traffic); mutex_destroy(&dci_stat_mutex); + mutex_destroy(&diag_dci_dbgfs_mutex); } #else int diag_debugfs_init(void) { return 0; } diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c index 359bf830ac7..39c021420dc 100644 --- a/drivers/char/diag/diagchar_core.c +++ b/drivers/char/diag/diagchar_core.c @@ -1691,14 +1691,18 @@ static int diag_ioctl_lsm_deinit(void) { int i; + mutex_lock(&driver->diagchar_mutex); for (i = 0; i < driver->num_clients; i++) if (driver->client_map[i].pid == current->tgid) break; - if (i == driver->num_clients) + if (i == driver->num_clients) { + mutex_unlock(&driver->diagchar_mutex); return -EINVAL; + } driver->data_ready[i] |= DEINIT_TYPE; + mutex_unlock(&driver->diagchar_mutex); wake_up_interruptible(&driver->wait_q); return 1; diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index d3fd973e877..c96fbb8c804 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c @@ -959,6 +959,8 @@ static int usbhid_parse(struct hid_device *hid) unsigned int rsize = 0; char *rdesc; int ret, n; + int num_descriptors; + size_t offset = offsetof(struct hid_descriptor, desc); quirks = usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor), le16_to_cpu(dev->descriptor.idProduct)); @@ -981,10 +983,18 @@ static int usbhid_parse(struct hid_device *hid) return -ENODEV; } + if (hdesc->bLength < sizeof(struct hid_descriptor)) { + dbg_hid("hid descriptor is too short\n"); + return -EINVAL; + } + hid->version = le16_to_cpu(hdesc->bcdHID); hid->country = hdesc->bCountryCode; - for (n = 0; n < hdesc->bNumDescriptors; n++) + num_descriptors = min_t(int, hdesc->bNumDescriptors, + (hdesc->bLength - offset) / sizeof(struct hid_class_descriptor)); + + for (n = 0; n < num_descriptors; n++) if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT) rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength); diff --git a/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c index a615c63c582..fd4eac7a982 100644 --- a/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c +++ b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c @@ -1060,6 +1060,9 @@ static long msm_flash_subdev_do_ioctl( break; } break; + case VIDIOC_MSM_FLASH_CFG: + pr_err("invalid cmd 0x%x received\n", cmd); + return -EINVAL; default: return msm_flash_subdev_ioctl(sd, cmd, arg); } diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c index d6e563b935b..a5c0893666d 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c @@ -1023,7 +1023,7 @@ static int __ipa_add_flt_rule(struct ipa_flt_tbl *tbl, enum ipa_ip_type ip, goto error; } - if (rt_tbl->cookie != IPA_COOKIE) { + if (rt_tbl->cookie != IPA_RT_TBL_COOKIE) { IPAERR("RT table cookie is invalid\n"); goto error; } @@ -1044,7 +1044,7 @@ static int __ipa_add_flt_rule(struct ipa_flt_tbl *tbl, enum ipa_ip_type ip, } INIT_LIST_HEAD(&entry->link); entry->rule = *rule; - entry->cookie = IPA_COOKIE; + entry->cookie = IPA_FLT_COOKIE; entry->rt_tbl = rt_tbl; entry->tbl = tbl; if (add_rear) { @@ -1063,13 +1063,19 @@ static int __ipa_add_flt_rule(struct ipa_flt_tbl *tbl, enum ipa_ip_type ip, if (id < 0) { IPAERR("failed to add to tree\n"); WARN_ON(1); + goto ipa_insert_failed; } *rule_hdl = id; entry->id = id; IPADBG("add flt rule rule_cnt=%d\n", tbl->rule_cnt); return 0; - +ipa_insert_failed: + tbl->rule_cnt--; + if (entry->rt_tbl) + entry->rt_tbl->ref_cnt--; + list_del(&entry->link); + kmem_cache_free(ipa_ctx->flt_rule_cache, entry); error: return -EPERM; } @@ -1085,7 +1091,7 @@ static int __ipa_del_flt_rule(u32 rule_hdl) return -EINVAL; } - if (entry->cookie != IPA_COOKIE) { + if (entry->cookie != IPA_FLT_COOKIE) { IPAERR("bad params\n"); return -EINVAL; } @@ -1117,7 +1123,7 @@ static int __ipa_mdfy_flt_rule(struct ipa_flt_rule_mdfy *frule, goto error; } - if (entry->cookie != IPA_COOKIE) { + if (entry->cookie != IPA_FLT_COOKIE) { IPAERR("bad params\n"); goto error; } @@ -1138,7 +1144,7 @@ static int __ipa_mdfy_flt_rule(struct ipa_flt_rule_mdfy *frule, goto error; } - if (rt_tbl->cookie != IPA_COOKIE) { + if (rt_tbl->cookie != IPA_RT_TBL_COOKIE) { IPAERR("RT table cookie is invalid\n"); goto error; } diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c index 3d468459122..9fde9337115 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c @@ -544,7 +544,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx, { struct ipa_hdr_entry *hdr_entry; struct ipa_hdr_proc_ctx_entry *entry; - struct ipa_hdr_proc_ctx_offset_entry *offset; + struct ipa_hdr_proc_ctx_offset_entry *offset = NULL; u32 bin; struct ipa_hdr_proc_ctx_tbl *htbl = &ipa_ctx->hdr_proc_ctx_tbl; int id; @@ -559,7 +559,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx, } hdr_entry = ipa_id_find(proc_ctx->hdr_hdl); - if (!hdr_entry || (hdr_entry->cookie != IPA_COOKIE)) { + if (!hdr_entry || (hdr_entry->cookie != IPA_HDR_COOKIE)) { IPAERR("hdr_hdl is invalid\n"); return -EINVAL; } @@ -576,7 +576,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx, entry->hdr = hdr_entry; if (add_ref_hdr) hdr_entry->ref_cnt++; - entry->cookie = IPA_COOKIE; + entry->cookie = IPA_PROC_HDR_COOKIE; needed_len = (proc_ctx->type == IPA_HDR_PROC_NONE) ? sizeof(struct ipa_hdr_proc_ctx_add_hdr_seq) : @@ -628,6 +628,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx, if (id < 0) { IPAERR("failed to alloc id\n"); WARN_ON(1); + goto ipa_insert_failed; } entry->id = id; proc_ctx->proc_ctx_hdl = id; @@ -635,6 +636,14 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx, return 0; +ipa_insert_failed: + if (offset) + list_move(&offset->link, + &htbl->head_free_offset_list[offset->bin]); + entry->offset_entry = NULL; + list_del(&entry->link); + htbl->proc_ctx_cnt--; + bad_len: if (add_ref_hdr) hdr_entry->ref_cnt--; @@ -647,7 +656,7 @@ bad_len: static int __ipa_add_hdr(struct ipa_hdr_add *hdr) { struct ipa_hdr_entry *entry; - struct ipa_hdr_offset_entry *offset; + struct ipa_hdr_offset_entry *offset = NULL; u32 bin; struct ipa_hdr_tbl *htbl = &ipa_ctx->hdr_tbl; int id; @@ -678,7 +687,7 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr) entry->type = hdr->type; entry->is_eth2_ofst_valid = hdr->is_eth2_ofst_valid; entry->eth2_ofst = hdr->eth2_ofst; - entry->cookie = IPA_COOKIE; + entry->cookie = IPA_HDR_COOKIE; if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0]) bin = IPA_HDR_BIN0; @@ -761,6 +770,7 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr) if (id < 0) { IPAERR("failed to alloc id\n"); WARN_ON(1); + goto ipa_insert_failed; } entry->id = id; hdr->hdr_hdl = id; @@ -785,10 +795,18 @@ fail_add_proc_ctx: entry->ref_cnt--; hdr->hdr_hdl = 0; ipa_id_remove(id); +ipa_insert_failed: + if (entry->is_hdr_proc_ctx) { + dma_unmap_single(ipa_ctx->pdev, entry->phys_base, + entry->hdr_len, DMA_TO_DEVICE); + } else { + if (offset) + list_move(&offset->link, + &htbl->head_free_offset_list[offset->bin]); + entry->offset_entry = NULL; + } htbl->hdr_cnt--; list_del(&entry->link); - dma_unmap_single(ipa_ctx->pdev, entry->phys_base, - entry->hdr_len, DMA_TO_DEVICE); bad_hdr_len: entry->cookie = 0; kmem_cache_free(ipa_ctx->hdr_cache, entry); @@ -803,7 +821,7 @@ static int __ipa_del_hdr_proc_ctx(u32 proc_ctx_hdl, struct ipa_hdr_proc_ctx_tbl *htbl = &ipa_ctx->hdr_proc_ctx_tbl; entry = ipa_id_find(proc_ctx_hdl); - if (!entry || (entry->cookie != IPA_COOKIE)) { + if (!entry || (entry->cookie != IPA_PROC_HDR_COOKIE)) { IPAERR("bad parm\n"); return -EINVAL; } @@ -854,7 +872,7 @@ int __ipa_del_hdr(u32 hdr_hdl, bool by_user) return -EINVAL; } - if (entry->cookie != IPA_COOKIE) { + if (entry->cookie != IPA_HDR_COOKIE) { IPAERR("bad parm\n"); return -EINVAL; } @@ -1416,7 +1434,7 @@ int ipa2_put_hdr(u32 hdr_hdl) goto bail; } - if (entry == NULL || entry->cookie != IPA_COOKIE) { + if (entry == NULL || entry->cookie != IPA_HDR_COOKIE) { IPAERR("bad params\n"); result = -EINVAL; goto bail; diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h index 4b9c9edd2f3..8c57be2eb70 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h @@ -35,7 +35,15 @@ #define DRV_NAME "ipa" #define NAT_DEV_NAME "ipaNatTable" + #define IPA_COOKIE 0x57831603 +#define IPA_RT_RULE_COOKIE 0x57831604 +#define IPA_RT_TBL_COOKIE 0x57831605 +#define IPA_FLT_COOKIE 0x57831606 +#define IPA_HDR_COOKIE 0x57831607 +#define IPA_PROC_HDR_COOKIE 0x57831608 + + #define MTU_BYTE 1500 #define IPA_MAX_NUM_PIPES 0x14 @@ -190,8 +198,8 @@ struct ipa_smmu_cb_ctx { */ struct ipa_flt_entry { struct list_head link; - struct ipa_flt_rule rule; u32 cookie; + struct ipa_flt_rule rule; struct ipa_flt_tbl *tbl; struct ipa_rt_tbl *rt_tbl; u32 hw_len; @@ -216,13 +224,13 @@ struct ipa_flt_entry { */ struct ipa_rt_tbl { struct list_head link; + u32 cookie; struct list_head head_rt_rule_list; char name[IPA_RESOURCE_NAME_MAX]; u32 idx; u32 rule_cnt; u32 ref_cnt; struct ipa_rt_tbl_set *set; - u32 cookie; bool in_sys; u32 sz; struct ipa_mem_buffer curr_mem; @@ -253,6 +261,7 @@ struct ipa_rt_tbl { */ struct ipa_hdr_entry { struct list_head link; + u32 cookie; u8 hdr[IPA_HDR_MAX_SIZE]; u32 hdr_len; char name[IPA_RESOURCE_NAME_MAX]; @@ -262,7 +271,6 @@ struct ipa_hdr_entry { dma_addr_t phys_base; struct ipa_hdr_proc_ctx_entry *proc_ctx; struct ipa_hdr_offset_entry *offset_entry; - u32 cookie; u32 ref_cnt; int id; u8 is_eth2_ofst_valid; @@ -335,10 +343,10 @@ struct ipa_hdr_proc_ctx_add_hdr_cmd_seq { */ struct ipa_hdr_proc_ctx_entry { struct list_head link; + u32 cookie; enum ipa_hdr_proc_type type; struct ipa_hdr_proc_ctx_offset_entry *offset_entry; struct ipa_hdr_entry *hdr; - u32 cookie; u32 ref_cnt; int id; bool user_deleted; @@ -394,8 +402,8 @@ struct ipa_flt_tbl { */ struct ipa_rt_entry { struct list_head link; - struct ipa_rt_rule rule; u32 cookie; + struct ipa_rt_rule rule; struct ipa_rt_tbl *tbl; struct ipa_hdr_entry *hdr; struct ipa_hdr_proc_ctx_entry *proc_ctx; diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c index aaefae5db43..a339900f205 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c @@ -904,7 +904,7 @@ static struct ipa_rt_tbl *__ipa_add_rt_tbl(enum ipa_ip_type ip, INIT_LIST_HEAD(&entry->link); strlcpy(entry->name, name, IPA_RESOURCE_NAME_MAX); entry->set = set; - entry->cookie = IPA_COOKIE; + entry->cookie = IPA_RT_TBL_COOKIE; entry->in_sys = (ip == IPA_IP_v4) ? !ipa_ctx->ip4_rt_tbl_lcl : !ipa_ctx->ip6_rt_tbl_lcl; set->tbl_cnt++; @@ -917,12 +917,16 @@ static struct ipa_rt_tbl *__ipa_add_rt_tbl(enum ipa_ip_type ip, if (id < 0) { IPAERR("failed to add to tree\n"); WARN_ON(1); + goto ipa_insert_failed; } entry->id = id; } return entry; +ipa_insert_failed: + set->tbl_cnt--; + list_del(&entry->link); fail_rt_idx_alloc: entry->cookie = 0; kmem_cache_free(ipa_ctx->rt_tbl_cache, entry); @@ -935,7 +939,7 @@ static int __ipa_del_rt_tbl(struct ipa_rt_tbl *entry) enum ipa_ip_type ip = IPA_IP_MAX; u32 id; - if (entry == NULL || (entry->cookie != IPA_COOKIE)) { + if (entry == NULL || (entry->cookie != IPA_RT_TBL_COOKIE)) { IPAERR("bad parms\n"); return -EINVAL; } @@ -949,8 +953,11 @@ static int __ipa_del_rt_tbl(struct ipa_rt_tbl *entry) ip = IPA_IP_v4; else if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v6]) ip = IPA_IP_v6; - else + else { WARN_ON(1); + return -EPERM; + } + if (!entry->in_sys) { list_del(&entry->link); @@ -989,13 +996,14 @@ static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name, if (rule->hdr_hdl) { hdr = ipa_id_find(rule->hdr_hdl); - if ((hdr == NULL) || (hdr->cookie != IPA_COOKIE)) { + if ((hdr == NULL) || (hdr->cookie != IPA_HDR_COOKIE)) { IPAERR("rt rule does not point to valid hdr\n"); goto error; } } else if (rule->hdr_proc_ctx_hdl) { proc_ctx = ipa_id_find(rule->hdr_proc_ctx_hdl); - if ((proc_ctx == NULL) || (proc_ctx->cookie != IPA_COOKIE)) { + if ((proc_ctx == NULL) || + (proc_ctx->cookie != IPA_PROC_HDR_COOKIE)) { IPAERR("rt rule does not point to valid proc ctx\n"); goto error; } @@ -1003,7 +1011,7 @@ static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name, tbl = __ipa_add_rt_tbl(ip, name); - if (tbl == NULL || (tbl->cookie != IPA_COOKIE)) { + if (tbl == NULL || (tbl->cookie != IPA_RT_TBL_COOKIE)) { IPAERR("bad params\n"); goto error; } @@ -1024,7 +1032,7 @@ static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name, goto error; } INIT_LIST_HEAD(&entry->link); - entry->cookie = IPA_COOKIE; + entry->cookie = IPA_RT_RULE_COOKIE; entry->rule = *rule; entry->tbl = tbl; entry->hdr = hdr; @@ -1113,7 +1121,7 @@ int __ipa_del_rt_rule(u32 rule_hdl) return -EINVAL; } - if (entry->cookie != IPA_COOKIE) { + if (entry->cookie != IPA_RT_RULE_COOKIE) { IPAERR("bad params\n"); return -EINVAL; } @@ -1348,7 +1356,7 @@ int ipa2_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup) } mutex_lock(&ipa_ctx->lock); entry = __ipa_find_rt_tbl(lookup->ip, lookup->name); - if (entry && entry->cookie == IPA_COOKIE) { + if (entry && entry->cookie == IPA_RT_TBL_COOKIE) { if (entry->ref_cnt == U32_MAX) { IPAERR("fail: ref count crossed limit\n"); goto ret; @@ -1391,7 +1399,7 @@ int ipa2_put_rt_tbl(u32 rt_tbl_hdl) goto ret; } - if ((entry->cookie != IPA_COOKIE) || entry->ref_cnt == 0) { + if ((entry->cookie != IPA_RT_TBL_COOKIE) || entry->ref_cnt == 0) { IPAERR("bad parms\n"); result = -EINVAL; goto ret; @@ -1401,8 +1409,11 @@ int ipa2_put_rt_tbl(u32 rt_tbl_hdl) ip = IPA_IP_v4; else if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v6]) ip = IPA_IP_v6; - else + else { WARN_ON(1); + result = -EINVAL; + goto ret; + } entry->ref_cnt--; if (entry->ref_cnt == 0 && entry->rule_cnt == 0) { @@ -1429,7 +1440,7 @@ static int __ipa_mdfy_rt_rule(struct ipa_rt_rule_mdfy *rtrule) if (rtrule->rule.hdr_hdl) { hdr = ipa_id_find(rtrule->rule.hdr_hdl); - if ((hdr == NULL) || (hdr->cookie != IPA_COOKIE)) { + if ((hdr == NULL) || (hdr->cookie != IPA_HDR_COOKIE)) { IPAERR("rt rule does not point to valid hdr\n"); goto error; } @@ -1441,7 +1452,7 @@ static int __ipa_mdfy_rt_rule(struct ipa_rt_rule_mdfy *rtrule) goto error; } - if (entry->cookie != IPA_COOKIE) { + if (entry->cookie != IPA_RT_RULE_COOKIE) { IPAERR("bad params\n"); goto error; } diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c index f4307d2bf1a..b88f2b53d2e 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c @@ -1085,7 +1085,7 @@ static int __ipa_validate_flt_rule(const struct ipa_flt_rule *rule, goto error; } - if ((*rt_tbl)->cookie != IPA_COOKIE) { + if ((*rt_tbl)->cookie != IPA_RT_TBL_COOKIE) { IPAERR("RT table cookie is invalid\n"); goto error; } @@ -1130,7 +1130,7 @@ static int __ipa_create_flt_entry(struct ipa3_flt_entry **entry, } INIT_LIST_HEAD(&((*entry)->link)); (*entry)->rule = *rule; - (*entry)->cookie = IPA_COOKIE; + (*entry)->cookie = IPA_FLT_COOKIE; (*entry)->rt_tbl = rt_tbl; (*entry)->tbl = tbl; if (rule->rule_id) { @@ -1165,12 +1165,18 @@ static int __ipa_finish_flt_rule_add(struct ipa3_flt_tbl *tbl, if (id < 0) { IPAERR("failed to add to tree\n"); WARN_ON(1); + goto ipa_insert_failed; } *rule_hdl = id; entry->id = id; IPADBG_LOW("add flt rule rule_cnt=%d\n", tbl->rule_cnt); return 0; +ipa_insert_failed: + if (entry->rt_tbl) + entry->rt_tbl->ref_cnt--; + tbl->rule_cnt--; + return -EPERM; } static int __ipa_add_flt_rule(struct ipa3_flt_tbl *tbl, enum ipa_ip_type ip, @@ -1196,9 +1202,16 @@ static int __ipa_add_flt_rule(struct ipa3_flt_tbl *tbl, enum ipa_ip_type ip, list_add(&entry->link, &tbl->head_flt_rule_list); } - __ipa_finish_flt_rule_add(tbl, entry, rule_hdl); + if (__ipa_finish_flt_rule_add(tbl, entry, rule_hdl)) + goto ipa_insert_failed; return 0; +ipa_insert_failed: + list_del(&entry->link); + /* if rule id was allocated from idr, remove it */ + if (!(entry->rule_id & ipahal_get_rule_id_hi_bit())) + idr_remove(&entry->tbl->rule_ids, entry->rule_id); + kmem_cache_free(ipa3_ctx->flt_rule_cache, entry); error: return -EPERM; @@ -1230,7 +1243,8 @@ static int __ipa_add_flt_rule_after(struct ipa3_flt_tbl *tbl, list_add(&entry->link, &((*add_after_entry)->link)); - __ipa_finish_flt_rule_add(tbl, entry, rule_hdl); + if (__ipa_finish_flt_rule_add(tbl, entry, rule_hdl)) + goto ipa_insert_failed; /* * prepare for next insertion @@ -1239,6 +1253,13 @@ static int __ipa_add_flt_rule_after(struct ipa3_flt_tbl *tbl, return 0; +ipa_insert_failed: + list_del(&entry->link); + /* if rule id was allocated from idr, remove it */ + if (!(entry->rule_id & ipahal_get_rule_id_hi_bit())) + idr_remove(&entry->tbl->rule_ids, entry->rule_id); + kmem_cache_free(ipa3_ctx->flt_rule_cache, entry); + error: *add_after_entry = NULL; return -EPERM; @@ -1255,7 +1276,7 @@ static int __ipa_del_flt_rule(u32 rule_hdl) return -EINVAL; } - if (entry->cookie != IPA_COOKIE) { + if (entry->cookie != IPA_FLT_COOKIE) { IPAERR("bad params\n"); return -EINVAL; } @@ -1292,7 +1313,7 @@ static int __ipa_mdfy_flt_rule(struct ipa_flt_rule_mdfy *frule, goto error; } - if (entry->cookie != IPA_COOKIE) { + if (entry->cookie != IPA_FLT_COOKIE) { IPAERR("bad params\n"); goto error; } @@ -1313,7 +1334,7 @@ static int __ipa_mdfy_flt_rule(struct ipa_flt_rule_mdfy *frule, goto error; } - if (rt_tbl->cookie != IPA_COOKIE) { + if (rt_tbl->cookie != IPA_RT_TBL_COOKIE) { IPAERR("RT table cookie is invalid\n"); goto error; } diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c index 38d37c3022b..ce5c0d410bd 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c @@ -331,7 +331,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx, } hdr_entry = ipa3_id_find(proc_ctx->hdr_hdl); - if (!hdr_entry || (hdr_entry->cookie != IPA_COOKIE)) { + if (!hdr_entry || (hdr_entry->cookie != IPA_HDR_COOKIE)) { IPAERR("hdr_hdl is invalid\n"); return -EINVAL; } @@ -348,7 +348,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx, entry->hdr = hdr_entry; if (add_ref_hdr) hdr_entry->ref_cnt++; - entry->cookie = IPA_COOKIE; + entry->cookie = IPA_PROC_HDR_COOKIE; needed_len = ipahal_get_proc_ctx_needed_len(proc_ctx->type); @@ -398,6 +398,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx, if (id < 0) { IPAERR("failed to alloc id\n"); WARN_ON(1); + goto ipa_insert_failed; } entry->id = id; proc_ctx->proc_ctx_hdl = id; @@ -405,6 +406,14 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx, return 0; +ipa_insert_failed: + if (offset) + list_move(&offset->link, + &htbl->head_free_offset_list[offset->bin]); + entry->offset_entry = NULL; + list_del(&entry->link); + htbl->proc_ctx_cnt--; + bad_len: if (add_ref_hdr) hdr_entry->ref_cnt--; @@ -417,7 +426,7 @@ bad_len: static int __ipa_add_hdr(struct ipa_hdr_add *hdr) { struct ipa3_hdr_entry *entry; - struct ipa_hdr_offset_entry *offset; + struct ipa_hdr_offset_entry *offset = NULL; u32 bin; struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl; int id; @@ -448,7 +457,7 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr) entry->type = hdr->type; entry->is_eth2_ofst_valid = hdr->is_eth2_ofst_valid; entry->eth2_ofst = hdr->eth2_ofst; - entry->cookie = IPA_COOKIE; + entry->cookie = IPA_HDR_COOKIE; if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0]) bin = IPA_HDR_BIN0; @@ -522,6 +531,7 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr) if (id < 0) { IPAERR("failed to alloc id\n"); WARN_ON(1); + goto ipa_insert_failed; } entry->id = id; hdr->hdr_hdl = id; @@ -546,10 +556,18 @@ fail_add_proc_ctx: entry->ref_cnt--; hdr->hdr_hdl = 0; ipa3_id_remove(id); +ipa_insert_failed: + if (entry->is_hdr_proc_ctx) { + dma_unmap_single(ipa3_ctx->pdev, entry->phys_base, + entry->hdr_len, DMA_TO_DEVICE); + } else { + if (offset) + list_move(&offset->link, + &htbl->head_free_offset_list[offset->bin]); + entry->offset_entry = NULL; + } htbl->hdr_cnt--; list_del(&entry->link); - dma_unmap_single(ipa3_ctx->pdev, entry->phys_base, - entry->hdr_len, DMA_TO_DEVICE); bad_hdr_len: entry->cookie = 0; kmem_cache_free(ipa3_ctx->hdr_cache, entry); @@ -564,7 +582,7 @@ static int __ipa3_del_hdr_proc_ctx(u32 proc_ctx_hdl, struct ipa3_hdr_proc_ctx_tbl *htbl = &ipa3_ctx->hdr_proc_ctx_tbl; entry = ipa3_id_find(proc_ctx_hdl); - if (!entry || (entry->cookie != IPA_COOKIE)) { + if (!entry || (entry->cookie != IPA_PROC_HDR_COOKIE)) { IPAERR("bad parm\n"); return -EINVAL; } @@ -615,7 +633,7 @@ int __ipa3_del_hdr(u32 hdr_hdl, bool by_user) return -EINVAL; } - if (entry->cookie != IPA_COOKIE) { + if (entry->cookie != IPA_HDR_COOKIE) { IPAERR("bad parm\n"); return -EINVAL; } @@ -1157,7 +1175,7 @@ int ipa3_put_hdr(u32 hdr_hdl) goto bail; } - if (entry == NULL || entry->cookie != IPA_COOKIE) { + if (entry == NULL || entry->cookie != IPA_HDR_COOKIE) { IPAERR("bad params\n"); result = -EINVAL; goto bail; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h index 2f45ab954f1..6171673cf69 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h @@ -39,6 +39,12 @@ #define DRV_NAME "ipa" #define NAT_DEV_NAME "ipaNatTable" #define IPA_COOKIE 0x57831603 +#define IPA_RT_RULE_COOKIE 0x57831604 +#define IPA_RT_TBL_COOKIE 0x57831605 +#define IPA_FLT_COOKIE 0x57831606 +#define IPA_HDR_COOKIE 0x57831607 +#define IPA_PROC_HDR_COOKIE 0x57831608 + #define MTU_BYTE 1500 #define IPA3_MAX_NUM_PIPES 31 @@ -252,8 +258,8 @@ struct ipa_smmu_cb_ctx { */ struct ipa3_flt_entry { struct list_head link; - struct ipa_flt_rule rule; u32 cookie; + struct ipa_flt_rule rule; struct ipa3_flt_tbl *tbl; struct ipa3_rt_tbl *rt_tbl; u32 hw_len; @@ -281,13 +287,13 @@ struct ipa3_flt_entry { */ struct ipa3_rt_tbl { struct list_head link; + u32 cookie; struct list_head head_rt_rule_list; char name[IPA_RESOURCE_NAME_MAX]; u32 idx; u32 rule_cnt; u32 ref_cnt; struct ipa3_rt_tbl_set *set; - u32 cookie; bool in_sys[IPA_RULE_TYPE_MAX]; u32 sz[IPA_RULE_TYPE_MAX]; struct ipa_mem_buffer curr_mem[IPA_RULE_TYPE_MAX]; @@ -319,6 +325,7 @@ struct ipa3_rt_tbl { */ struct ipa3_hdr_entry { struct list_head link; + u32 cookie; u8 hdr[IPA_HDR_MAX_SIZE]; u32 hdr_len; char name[IPA_RESOURCE_NAME_MAX]; @@ -328,7 +335,6 @@ struct ipa3_hdr_entry { dma_addr_t phys_base; struct ipa3_hdr_proc_ctx_entry *proc_ctx; struct ipa_hdr_offset_entry *offset_entry; - u32 cookie; u32 ref_cnt; int id; u8 is_eth2_ofst_valid; @@ -377,10 +383,10 @@ struct ipa3_hdr_proc_ctx_offset_entry { */ struct ipa3_hdr_proc_ctx_entry { struct list_head link; + u32 cookie; enum ipa_hdr_proc_type type; struct ipa3_hdr_proc_ctx_offset_entry *offset_entry; struct ipa3_hdr_entry *hdr; - u32 cookie; u32 ref_cnt; int id; bool user_deleted; @@ -442,8 +448,8 @@ struct ipa3_flt_tbl { */ struct ipa3_rt_entry { struct list_head link; - struct ipa_rt_rule rule; u32 cookie; + struct ipa_rt_rule rule; struct ipa3_rt_tbl *tbl; struct ipa3_hdr_entry *hdr; struct ipa3_hdr_proc_ctx_entry *proc_ctx; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c index faca116f1d4..25d72396010 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c @@ -970,7 +970,7 @@ static struct ipa3_rt_tbl *__ipa_add_rt_tbl(enum ipa_ip_type ip, INIT_LIST_HEAD(&entry->link); strlcpy(entry->name, name, IPA_RESOURCE_NAME_MAX); entry->set = set; - entry->cookie = IPA_COOKIE; + entry->cookie = IPA_RT_TBL_COOKIE; entry->in_sys[IPA_RULE_HASHABLE] = (ip == IPA_IP_v4) ? !ipa3_ctx->ip4_rt_tbl_hash_lcl : !ipa3_ctx->ip6_rt_tbl_hash_lcl; @@ -988,12 +988,16 @@ static struct ipa3_rt_tbl *__ipa_add_rt_tbl(enum ipa_ip_type ip, if (id < 0) { IPAERR("failed to add to tree\n"); WARN_ON(1); + goto ipa_insert_failed; } entry->id = id; } return entry; - +ipa_insert_failed: + set->tbl_cnt--; + list_del(&entry->link); + idr_destroy(&entry->rule_ids); fail_rt_idx_alloc: entry->cookie = 0; kmem_cache_free(ipa3_ctx->rt_tbl_cache, entry); @@ -1007,7 +1011,7 @@ static int __ipa_del_rt_tbl(struct ipa3_rt_tbl *entry) u32 id; struct ipa3_rt_tbl_set *rset; - if (entry == NULL || (entry->cookie != IPA_COOKIE)) { + if (entry == NULL || (entry->cookie != IPA_RT_TBL_COOKIE)) { IPAERR("bad parms\n"); return -EINVAL; } @@ -1021,8 +1025,10 @@ static int __ipa_del_rt_tbl(struct ipa3_rt_tbl *entry) ip = IPA_IP_v4; else if (entry->set == &ipa3_ctx->rt_tbl_set[IPA_IP_v6]) ip = IPA_IP_v6; - else + else { WARN_ON(1); + return -EPERM; + } rset = &ipa3_ctx->reap_rt_tbl_set[ip]; @@ -1059,14 +1065,14 @@ static int __ipa_rt_validate_hndls(const struct ipa_rt_rule *rule, if (rule->hdr_hdl) { *hdr = ipa3_id_find(rule->hdr_hdl); - if ((*hdr == NULL) || ((*hdr)->cookie != IPA_COOKIE)) { + if ((*hdr == NULL) || ((*hdr)->cookie != IPA_HDR_COOKIE)) { IPAERR("rt rule does not point to valid hdr\n"); return -EPERM; } } else if (rule->hdr_proc_ctx_hdl) { *proc_ctx = ipa3_id_find(rule->hdr_proc_ctx_hdl); if ((*proc_ctx == NULL) || - ((*proc_ctx)->cookie != IPA_COOKIE)) { + ((*proc_ctx)->cookie != IPA_PROC_HDR_COOKIE)) { IPAERR("rt rule does not point to valid proc ctx\n"); return -EPERM; @@ -1089,7 +1095,7 @@ static int __ipa_create_rt_entry(struct ipa3_rt_entry **entry, goto error; } INIT_LIST_HEAD(&(*entry)->link); - (*(entry))->cookie = IPA_COOKIE; + (*(entry))->cookie = IPA_RT_RULE_COOKIE; (*(entry))->rule = *rule; (*(entry))->tbl = tbl; (*(entry))->hdr = hdr; @@ -1153,7 +1159,7 @@ static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name, tbl = __ipa_add_rt_tbl(ip, name); - if (tbl == NULL || (tbl->cookie != IPA_COOKIE)) { + if (tbl == NULL || (tbl->cookie != IPA_RT_TBL_COOKIE)) { IPAERR("failed adding rt tbl name = %s\n", name ? name : ""); goto error; @@ -1288,7 +1294,7 @@ int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules) mutex_lock(&ipa3_ctx->lock); tbl = __ipa3_find_rt_tbl(rules->ip, rules->rt_tbl_name); - if (tbl == NULL || (tbl->cookie != IPA_COOKIE)) { + if (tbl == NULL || (tbl->cookie != IPA_RT_TBL_COOKIE)) { IPAERR("failed finding rt tbl name = %s\n", rules->rt_tbl_name ? rules->rt_tbl_name : ""); ret = -EINVAL; @@ -1372,7 +1378,7 @@ int __ipa3_del_rt_rule(u32 rule_hdl) return -EINVAL; } - if (entry->cookie != IPA_COOKIE) { + if (entry->cookie != IPA_RT_RULE_COOKIE) { IPAERR("bad params\n"); return -EINVAL; } @@ -1609,7 +1615,7 @@ int ipa3_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup) } mutex_lock(&ipa3_ctx->lock); entry = __ipa3_find_rt_tbl(lookup->ip, lookup->name); - if (entry && entry->cookie == IPA_COOKIE) { + if (entry && entry->cookie == IPA_RT_TBL_COOKIE) { if (entry->ref_cnt == U32_MAX) { IPAERR("fail: ref count crossed limit\n"); goto ret; @@ -1652,7 +1658,7 @@ int ipa3_put_rt_tbl(u32 rt_tbl_hdl) goto ret; } - if ((entry->cookie != IPA_COOKIE) || entry->ref_cnt == 0) { + if ((entry->cookie != IPA_RT_TBL_COOKIE) || entry->ref_cnt == 0) { IPAERR("bad parms\n"); result = -EINVAL; goto ret; @@ -1662,8 +1668,10 @@ int ipa3_put_rt_tbl(u32 rt_tbl_hdl) ip = IPA_IP_v4; else if (entry->set == &ipa3_ctx->rt_tbl_set[IPA_IP_v6]) ip = IPA_IP_v6; - else + else { WARN_ON(1); + goto ret; + } entry->ref_cnt--; if (entry->ref_cnt == 0 && entry->rule_cnt == 0) { @@ -1691,13 +1699,14 @@ static int __ipa_mdfy_rt_rule(struct ipa_rt_rule_mdfy *rtrule) if (rtrule->rule.hdr_hdl) { hdr = ipa3_id_find(rtrule->rule.hdr_hdl); - if ((hdr == NULL) || (hdr->cookie != IPA_COOKIE)) { + if ((hdr == NULL) || (hdr->cookie != IPA_HDR_COOKIE)) { IPAERR("rt rule does not point to valid hdr\n"); goto error; } } else if (rtrule->rule.hdr_proc_ctx_hdl) { proc_ctx = ipa3_id_find(rtrule->rule.hdr_proc_ctx_hdl); - if ((proc_ctx == NULL) || (proc_ctx->cookie != IPA_COOKIE)) { + if ((proc_ctx == NULL) || + (proc_ctx->cookie != IPA_PROC_HDR_COOKIE)) { IPAERR("rt rule does not point to valid proc ctx\n"); goto error; } @@ -1709,7 +1718,7 @@ static int __ipa_mdfy_rt_rule(struct ipa_rt_rule_mdfy *rtrule) goto error; } - if (entry->cookie != IPA_COOKIE) { + if (entry->cookie != IPA_RT_RULE_COOKIE) { IPAERR("bad params\n"); goto error; } diff --git a/drivers/power/qcom/msm-core.c b/drivers/power/qcom/msm-core.c index c817acf1bb2..a46d47e1cf9 100644 --- a/drivers/power/qcom/msm-core.c +++ b/drivers/power/qcom/msm-core.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -407,9 +407,10 @@ static int update_userspace_power(struct sched_params __user *argp) if (!sp) return -ENOMEM; - + mutex_lock(&policy_update_mutex); sp->power = allocate_2d_array_uint32_t(node->sp->num_of_freqs); if (IS_ERR_OR_NULL(sp->power)) { + mutex_unlock(&policy_update_mutex); ret = PTR_ERR(sp->power); kfree(sp); return ret; @@ -453,6 +454,7 @@ static int update_userspace_power(struct sched_params __user *argp) } } spin_unlock(&update_lock); + mutex_unlock(&policy_update_mutex); for_each_possible_cpu(cpu) { if (pdata_valid[cpu]) @@ -466,6 +468,7 @@ static int update_userspace_power(struct sched_params __user *argp) return 0; failed: + mutex_unlock(&policy_update_mutex); for (i = 0; i < TEMP_DATA_POINTS; i++) kfree(sp->power[i]); kfree(sp->power); diff --git a/drivers/staging/qcacld-2.0/CORE/CLD_TXRX/TLSHIM/tl_shim.c b/drivers/staging/qcacld-2.0/CORE/CLD_TXRX/TLSHIM/tl_shim.c index 58441b0e956..a9f8a027bcf 100644 --- a/drivers/staging/qcacld-2.0/CORE/CLD_TXRX/TLSHIM/tl_shim.c +++ b/drivers/staging/qcacld-2.0/CORE/CLD_TXRX/TLSHIM/tl_shim.c @@ -641,6 +641,16 @@ static int tlshim_mgmt_rx_process(void *context, u_int8_t *data, rx_pkt->pkt_meta.mpdu_hdr_len; /* + * If the mpdu_data_len is greater than Max (2k), drop the frame + */ + if (rx_pkt->pkt_meta.mpdu_data_len > WMA_MAX_MGMT_MPDU_LEN) { + TLSHIM_LOGE("Data Len %d greater than max, dropping frame", + rx_pkt->pkt_meta.mpdu_data_len); + vos_mem_free(rx_pkt); + return 0; + } + + /* * saved_beacon means this beacon is a duplicate of one * sent earlier. roamCandidateInd flag is used to indicate to * PE that roam scan finished and a better candidate AP diff --git a/drivers/staging/qcacld-2.0/CORE/HDD/src/wlan_hdd_oemdata.c b/drivers/staging/qcacld-2.0/CORE/HDD/src/wlan_hdd_oemdata.c index c80a5a16f03..f370be7c8fa 100644 --- a/drivers/staging/qcacld-2.0/CORE/HDD/src/wlan_hdd_oemdata.c +++ b/drivers/staging/qcacld-2.0/CORE/HDD/src/wlan_hdd_oemdata.c @@ -1141,6 +1141,7 @@ static void oem_request_dispatcher(tAniMsgHdr *msg_hdr, int pid) static void oem_cmd_handler(const void *data, int data_len, void *ctx, int pid) { tAniMsgHdr *msg_hdr; + int msg_len; int ret; struct nlattr *tb[CLD80211_ATTR_MAX + 1]; @@ -1150,6 +1151,10 @@ static void oem_cmd_handler(const void *data, int data_len, void *ctx, int pid) return; } + /* + * audit note: it is ok to pass a NULL policy here since only + * one attribute is parsed and it is explicitly validated + */ if (nla_parse(tb, CLD80211_ATTR_MAX, data, data_len, NULL)) { hddLog(LOGE, FL("Invalid ATTR")); return; @@ -1160,15 +1165,22 @@ static void oem_cmd_handler(const void *data, int data_len, void *ctx, int pid) return; } - msg_hdr = (tAniMsgHdr *)nla_data(tb[CLD80211_ATTR_DATA]); - if (!msg_hdr) { - hddLog(LOGE, FL("msg_hdr null")); + msg_len = nla_len(tb[CLD80211_ATTR_DATA]); + if (msg_len < sizeof(*msg_hdr)) { + hddLog(LOGE, FL("runt ATTR_DATA size %d"), msg_len); send_oem_err_rsp_nlink_msg(pid, OEM_ERR_NULL_MESSAGE_HEADER); return; } - oem_request_dispatcher(msg_hdr, pid); - return; + msg_hdr = nla_data(tb[CLD80211_ATTR_DATA]); + if (msg_len < (sizeof(*msg_hdr) + msg_hdr->length)) { + hddLog(LOGE, FL("Invalid nl msg len %d, msg hdr len %d"), + msg_len, msg_hdr->length); + send_oem_err_rsp_nlink_msg(pid, OEM_ERR_INVALID_MESSAGE_LENGTH); + return; + } + + oem_request_dispatcher(msg_hdr, pid); } /** diff --git a/drivers/staging/qcacld-2.0/CORE/MAC/src/include/dot11f.h b/drivers/staging/qcacld-2.0/CORE/MAC/src/include/dot11f.h index 686786ff15f..4beee9705fc 100644 --- a/drivers/staging/qcacld-2.0/CORE/MAC/src/include/dot11f.h +++ b/drivers/staging/qcacld-2.0/CORE/MAC/src/include/dot11f.h @@ -36,7 +36,7 @@ * * * This file was automatically generated by 'framesc' - * Tue Jul 4 11:07:27 2017 from the following file(s): + * Wed Jul 12 16:02:49 2017 from the following file(s): * * dot11f.frms * diff --git a/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/lim/limProcessActionFrame.c b/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/lim/limProcessActionFrame.c index 210e38b7fe1..468f3b89fff 100644 --- a/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/lim/limProcessActionFrame.c +++ b/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/lim/limProcessActionFrame.c @@ -2429,6 +2429,12 @@ limProcessActionFrame(tpAniSirGlobal pMac, tANI_U8 *pRxPacketInfo,tpPESession ps pHdr = WDA_GET_RX_MAC_HEADER(pRxPacketInfo); frameLen = WDA_GET_RX_PAYLOAD_LEN(pRxPacketInfo); + if (frameLen < sizeof(pPubAction)) { + limLog(pMac, LOG1, + FL("Received action frame of invalid len %d"), frameLen); + break; + } + //Check if it is a P2P public action frame. if (vos_mem_compare(pPubAction->Oui, P2POui, 4)) { @@ -2606,6 +2612,12 @@ limProcessActionFrameNoSession(tpAniSirGlobal pMac, tANI_U8 *pBd) pHdr = WDA_GET_RX_MAC_HEADER(pBd); frameLen = WDA_GET_RX_PAYLOAD_LEN(pBd); + if (frameLen < sizeof(pActionHdr)) { + limLog(pMac, LOG1, + FL("Received action frame of invalid len %d"), frameLen); + break; + } + //Check if it is a P2P public action frame. if (vos_mem_compare(pActionHdr->Oui, P2POui, 4)) { diff --git a/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/lim/limProcessAuthFrame.c b/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/lim/limProcessAuthFrame.c index 0e9a9ccfdb3..646c2829395 100644 --- a/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/lim/limProcessAuthFrame.c +++ b/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/lim/limProcessAuthFrame.c @@ -297,7 +297,8 @@ limProcessAuthFrame(tpAniSirGlobal pMac, tANI_U8 *pRxPacketInfo, tpPESession pse goto free; } - if (frameLen < LIM_ENCR_AUTH_BODY_LEN_SAP) + if ((frameLen < LIM_ENCR_AUTH_BODY_LEN_SAP) || + (frameLen > LIM_ENCR_AUTH_BODY_LEN_SAP)) { // Log error limLog(pMac, LOGE, diff --git a/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/lim/limProcessSmeReqMessages.c b/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/lim/limProcessSmeReqMessages.c index 39f0112ed8e..84cee883515 100644 --- a/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/lim/limProcessSmeReqMessages.c +++ b/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/lim/limProcessSmeReqMessages.c @@ -6721,8 +6721,17 @@ limUpdateIBssPropAddIEs(tpAniSirGlobal pMac, tANI_U8 **pDstData_buff, vos_mem_copy(vendor_ie, pModifyIE->pIEBuffer, pModifyIE->ieBufferlength); } else { - uint16_t new_length = pModifyIE->ieBufferlength + *pDstDataLen; - uint8_t *new_ptr = vos_mem_malloc(new_length); + uint8_t *new_ptr; + uint16_t new_length; + + if (USHRT_MAX - pModifyIE->ieBufferlength < *pDstDataLen) { + limLog(pMac,LOGE,FL("U16 overflow due to %d + %d"), + pModifyIE->ieBufferlength, *pDstDataLen); + return false; + } + + new_length = pModifyIE->ieBufferlength + *pDstDataLen; + new_ptr = vos_mem_malloc(new_length); if (NULL == new_ptr) { limLog(pMac, LOGE, FL("Memory allocation failed.")); diff --git a/drivers/staging/qcacld-2.0/CORE/SERVICES/WMA/wma.c b/drivers/staging/qcacld-2.0/CORE/SERVICES/WMA/wma.c index 1c7e3fdb592..fc20de998e2 100644 --- a/drivers/staging/qcacld-2.0/CORE/SERVICES/WMA/wma.c +++ b/drivers/staging/qcacld-2.0/CORE/SERVICES/WMA/wma.c @@ -4780,7 +4780,8 @@ static int wma_unified_link_peer_stats_event_handler(void *handle, wmi_rate_stats *rate_stats; tSirLLStatsResults *link_stats_results; u_int8_t *results, *t_peer_stats, *t_rate_stats; - u_int32_t count, num_rates=0, rate_cnt; + u_int32_t count, rate_cnt; + uint32_t total_num_rates = 0; u_int32_t next_res_offset, next_peer_offset, next_rate_offset; size_t peer_info_size, peer_stats_size, rate_stats_size; size_t link_stats_results_size; @@ -4810,8 +4811,8 @@ static int wma_unified_link_peer_stats_event_handler(void *handle, * cmd_param_info contains * wmi_peer_stats_event_fixed_param fixed_param; * num_peers * size of(struct wmi_peer_link_stats) - * num_rates * size of(struct wmi_rate_stats) - * num_rates is the sum of the rates of all the peers. + * total_num_rates * size of(struct wmi_rate_stats) + * total_num_rates is the sum of the rates of all the peers. */ fixed_param = param_tlvs->fixed_param; peer_stats = param_tlvs->peer_stats; @@ -4824,22 +4825,33 @@ static int wma_unified_link_peer_stats_event_handler(void *handle, } do { - if (peer_stats->num_rates > - WMA_SVC_MSG_MAX_SIZE/sizeof(wmi_rate_stats)) { - excess_data = true; - break; - } else { - buf_len = peer_stats->num_rates * - sizeof(wmi_rate_stats); - } if (fixed_param->num_peers > WMA_SVC_MSG_MAX_SIZE/sizeof(wmi_peer_link_stats)) { excess_data = true; break; } else { - buf_len += fixed_param->num_peers * + buf_len = fixed_param->num_peers * sizeof(wmi_peer_link_stats); } + temp_peer_stats = (wmi_peer_link_stats *) peer_stats; + for (count = 0; count < fixed_param->num_peers; count++) { + if (temp_peer_stats->num_rates > + WMA_SVC_MSG_MAX_SIZE / sizeof(wmi_rate_stats)) { + excess_data = true; + break; + } else { + total_num_rates += temp_peer_stats->num_rates; + if (total_num_rates > + WMA_SVC_MSG_MAX_SIZE / + sizeof(wmi_rate_stats)) { + excess_data = true; + break; + } + buf_len += temp_peer_stats->num_rates * + sizeof(wmi_rate_stats); + } + temp_peer_stats++; + } } while (0); if (excess_data || @@ -4850,21 +4862,12 @@ static int wma_unified_link_peer_stats_event_handler(void *handle, return -EINVAL; } - /* - * num_rates - sum of the rates of all the peers - */ - temp_peer_stats = (wmi_peer_link_stats*)peer_stats; - for (count = 0; count < fixed_param->num_peers; count++) { - num_rates += temp_peer_stats->num_rates; - temp_peer_stats++; - } - peer_stats_size = sizeof(tSirWifiPeerStat); peer_info_size = sizeof(tSirWifiPeerInfo); rate_stats_size = sizeof(tSirWifiRateStat); link_stats_results_size = sizeof(*link_stats_results) + peer_stats_size + (fixed_param->num_peers * peer_info_size) + - (num_rates * rate_stats_size); + (total_num_rates * rate_stats_size); link_stats_results = vos_mem_malloc(link_stats_results_size); if (NULL == link_stats_results ) { @@ -5573,6 +5576,12 @@ static void wma_send_bcn_buf_ll(tp_wma_handle wma, WMA_LOGE("%s: Invalid beacon buffer", __func__); return; } + if (WMI_UNIFIED_NOA_ATTR_NUM_DESC_GET(p2p_noa_info) > + WMI_P2P_MAX_NOA_DESCRIPTORS) { + WMA_LOGE("%s: Too many descriptors %d", __func__, + WMI_UNIFIED_NOA_ATTR_NUM_DESC_GET(p2p_noa_info)); + return; + } wmi_buf = wmi_buf_alloc(wma->wmi_handle, sizeof(*cmd)); if (!wmi_buf) { @@ -6228,6 +6237,12 @@ static int wma_p2p_noa_event_handler(void *handle, u_int8_t *event, u_int32_t le descriptors = WMI_UNIFIED_NOA_ATTR_NUM_DESC_GET(p2p_noa_info); noa_ie.num_descriptors = (u_int8_t)descriptors; + if (noa_ie.num_descriptors > WMA_MAX_NOA_DESCRIPTORS) { + WMA_LOGD("Sizing down the no of desc %d to max", + noa_ie.num_descriptors); + noa_ie.num_descriptors = WMA_MAX_NOA_DESCRIPTORS; + } + WMA_LOGI("%s: index %u, oppPs %u, ctwindow %u, " "num_descriptors = %u", __func__, noa_ie.index, noa_ie.oppPS, noa_ie.ctwindow, noa_ie.num_descriptors); @@ -32363,6 +32378,11 @@ static int wma_nlo_match_evt_handler(void *handle, u_int8_t *event, nlo_event = param_buf->fixed_param; WMA_LOGD("PNO match event received for vdev %d", nlo_event->vdev_id); + if (nlo_event->vdev_id >= wma->max_bssid) { + WMA_LOGE("Invalid vdev id in the NLO event %d", + nlo_event->vdev_id); + return -EINVAL; + } node = &wma->interfaces[nlo_event->vdev_id]; if (node) @@ -32511,7 +32531,8 @@ static int wma_mcc_vdev_tx_pause_evt_handler(void *handle, u_int8_t *event, /* FW mapped vdev from ID * vdev_map = (1 << vdev_id) * So, host should unmap to ID */ - for (vdev_id = 0; vdev_map != 0; vdev_id++) + for (vdev_id = 0; vdev_map != 0 && vdev_id < wma->max_bssid; + vdev_id++) { if (!(vdev_map & 0x1)) { @@ -35685,6 +35706,14 @@ wma_process_utf_event(WMA_HANDLE handle, currentSeq); } + if ((datalen > MAX_UTF_EVENT_LENGTH) || + (wma_handle->utf_event_info.offset > + (MAX_UTF_EVENT_LENGTH - datalen))) { + WMA_LOGE("Excess data from firmware, offset:%zu, len:%d", + wma_handle->utf_event_info.offset, datalen); + return -EINVAL; + } + memcpy(&wma_handle->utf_event_info.data[wma_handle->utf_event_info.offset], &data[sizeof(segHdrInfo)], datalen); diff --git a/drivers/staging/qcacld-2.0/CORE/SERVICES/WMA/wma.h b/drivers/staging/qcacld-2.0/CORE/SERVICES/WMA/wma.h index ce78c324d51..5dd315b2104 100644 --- a/drivers/staging/qcacld-2.0/CORE/SERVICES/WMA/wma.h +++ b/drivers/staging/qcacld-2.0/CORE/SERVICES/WMA/wma.h @@ -101,6 +101,8 @@ #define FRAGMENT_SIZE 3072 +#define WMA_MAX_MGMT_MPDU_LEN 2000 + #define WMA_INVALID_VDEV_ID 0xFF #define MAX_MEM_CHUNKS 32 #define WMA_MAX_VDEV_SIZE 20 diff --git a/drivers/staging/qcacld-2.0/CORE/SME/src/sme_common/sme_Api.c b/drivers/staging/qcacld-2.0/CORE/SME/src/sme_common/sme_Api.c index c4cf992c3c2..797a63f7907 100644 --- a/drivers/staging/qcacld-2.0/CORE/SME/src/sme_common/sme_Api.c +++ b/drivers/staging/qcacld-2.0/CORE/SME/src/sme_common/sme_Api.c @@ -1667,7 +1667,7 @@ eHalStatus sme_SetPlmRequest(tHalHandle hHal, tpSirPlmReq pPlmReq) eHalStatus status; tANI_BOOLEAN ret = eANI_BOOLEAN_FALSE; tpAniSirGlobal pMac = PMAC_STRUCT(hHal); - tANI_U8 ch_list[WNI_CFG_VALID_CHANNEL_LIST] = {0}; + tANI_U8 ch_list[WNI_CFG_VALID_CHANNEL_LIST_LEN] = {0}; tANI_U8 count, valid_count = 0; vos_msg_t msg; diff --git a/drivers/staging/qcacld-2.0/CORE/SYS/legacy/src/utils/src/dot11f.c b/drivers/staging/qcacld-2.0/CORE/SYS/legacy/src/utils/src/dot11f.c index ae53eceedbf..2a949e3bd72 100644 --- a/drivers/staging/qcacld-2.0/CORE/SYS/legacy/src/utils/src/dot11f.c +++ b/drivers/staging/qcacld-2.0/CORE/SYS/legacy/src/utils/src/dot11f.c @@ -35,7 +35,7 @@ * * * This file was automatically generated by 'framesc' - * Tue Jul 4 11:07:27 2017 from the following file(s): + * Wed Jul 12 16:02:49 2017 from the following file(s): * * dot11f.frms * @@ -476,7 +476,7 @@ static tANI_U32 GetContainerIesLen(tpAniSirGlobal pCtx, { const tIEDefn *pIe, *pIeFirst; tANI_U8 *pBufRemaining = pBuf; - tANI_U8 len = 0; + tANI_U32 len = 0; (void)pCtx; diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 1fb6303b918..2ecf6360171 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -536,15 +536,23 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx, } else if (header->bDescriptorType == USB_DT_INTERFACE_ASSOCIATION) { + struct usb_interface_assoc_descriptor *d; + + d = (struct usb_interface_assoc_descriptor *)header; + if (d->bLength < USB_DT_INTERFACE_ASSOCIATION_SIZE) { + dev_warn(ddev, + "config %d has an invalid interface association descriptor of length %d, skipping\n", + cfgno, d->bLength); + continue; + } + if (iad_num == USB_MAXIADS) { dev_warn(ddev, "found more Interface " "Association Descriptors " "than allocated for in " "configuration %d\n", cfgno); } else { - config->intf_assoc[iad_num] = - (struct usb_interface_assoc_descriptor - *)header; + config->intf_assoc[iad_num] = d; iad_num++; } diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h index a451903a685..a0b92096f6f 100644 --- a/drivers/usb/storage/uas-detect.h +++ b/drivers/usb/storage/uas-detect.h @@ -9,7 +9,8 @@ static int uas_is_interface(struct usb_host_interface *intf) intf->desc.bInterfaceProtocol == USB_PR_UAS); } -static int uas_find_uas_alt_setting(struct usb_interface *intf) +static struct usb_host_interface *uas_find_uas_alt_setting( + struct usb_interface *intf) { int i; @@ -17,10 +18,10 @@ static int uas_find_uas_alt_setting(struct usb_interface *intf) struct usb_host_interface *alt = &intf->altsetting[i]; if (uas_is_interface(alt)) - return alt->desc.bAlternateSetting; + return alt; } - return -ENODEV; + return NULL; } static int uas_find_endpoints(struct usb_host_interface *alt, @@ -58,14 +59,14 @@ static int uas_use_uas_driver(struct usb_interface *intf, struct usb_device *udev = interface_to_usbdev(intf); struct usb_hcd *hcd = bus_to_hcd(udev->bus); unsigned long flags = id->driver_info; - int r, alt; - + struct usb_host_interface *alt; + int r; alt = uas_find_uas_alt_setting(intf); - if (alt < 0) + if (!alt) return 0; - r = uas_find_endpoints(&intf->altsetting[alt], eps); + r = uas_find_endpoints(alt, eps); if (r < 0) return 0; diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index 1fc03ef3d43..40533ce2be4 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -866,14 +866,14 @@ MODULE_DEVICE_TABLE(usb, uas_usb_ids); static int uas_switch_interface(struct usb_device *udev, struct usb_interface *intf) { - int alt; + struct usb_host_interface *alt; alt = uas_find_uas_alt_setting(intf); - if (alt < 0) - return alt; + if (!alt) + return -ENODEV; - return usb_set_interface(udev, - intf->altsetting[0].desc.bInterfaceNumber, alt); + return usb_set_interface(udev, alt->desc.bInterfaceNumber, + alt->desc.bAlternateSetting); } static int uas_configure_endpoints(struct uas_dev_info *devinfo) diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h index aa33fd1b2d4..400196c45b3 100644 --- a/include/uapi/linux/usb/ch9.h +++ b/include/uapi/linux/usb/ch9.h @@ -705,6 +705,7 @@ struct usb_interface_assoc_descriptor { __u8 iFunction; } __attribute__ ((packed)); +#define USB_DT_INTERFACE_ASSOCIATION_SIZE 8 /*-------------------------------------------------------------------------*/ diff --git a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c index 10e27c813e8..fad28c4283c 100644 --- a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c +++ b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c @@ -123,6 +123,7 @@ struct msm_compr_pdata { bool use_legacy_api; /* indicates use older asm apis*/ struct msm_compr_dec_params *dec_params[MSM_FRONTEND_DAI_MAX]; struct msm_compr_ch_map *ch_map[MSM_FRONTEND_DAI_MAX]; + bool is_in_use[MSM_FRONTEND_DAI_MAX]; }; struct msm_compr_audio { @@ -1113,11 +1114,16 @@ static int msm_compr_open(struct snd_compr_stream *cstream) { struct snd_compr_runtime *runtime = cstream->runtime; struct snd_soc_pcm_runtime *rtd = cstream->private_data; - struct msm_compr_audio *prtd; + struct msm_compr_audio *prtd = NULL; struct msm_compr_pdata *pdata = snd_soc_platform_get_drvdata(rtd->platform); pr_debug("%s\n", __func__); + if (pdata->is_in_use[rtd->dai_link->be_id] == true) { + pr_err("%s: %s is already in use,err: %d ", + __func__, rtd->dai_link->cpu_dai_name, -EBUSY); + return -EBUSY; + } prtd = kzalloc(sizeof(struct msm_compr_audio), GFP_KERNEL); if (prtd == NULL) { pr_err("Failed to allocate memory for msm_compr_audio\n"); @@ -1129,7 +1135,7 @@ static int msm_compr_open(struct snd_compr_stream *cstream) pdata->cstream[rtd->dai_link->be_id] = cstream; pdata->audio_effects[rtd->dai_link->be_id] = kzalloc(sizeof(struct msm_compr_audio_effects), GFP_KERNEL); - if (!pdata->audio_effects[rtd->dai_link->be_id]) { + if (pdata->audio_effects[rtd->dai_link->be_id] == NULL) { pr_err("%s: Could not allocate memory for effects\n", __func__); pdata->cstream[rtd->dai_link->be_id] = NULL; kfree(prtd); @@ -1137,10 +1143,11 @@ static int msm_compr_open(struct snd_compr_stream *cstream) } pdata->dec_params[rtd->dai_link->be_id] = kzalloc(sizeof(struct msm_compr_dec_params), GFP_KERNEL); - if (!pdata->dec_params[rtd->dai_link->be_id]) { + if (pdata->dec_params[rtd->dai_link->be_id] == NULL) { pr_err("%s: Could not allocate memory for dec params\n", __func__); kfree(pdata->audio_effects[rtd->dai_link->be_id]); + pdata->audio_effects[rtd->dai_link->be_id] = NULL; pdata->cstream[rtd->dai_link->be_id] = NULL; kfree(prtd); return -ENOMEM; @@ -1188,7 +1195,9 @@ static int msm_compr_open(struct snd_compr_stream *cstream) if (!prtd->audio_client) { pr_err("%s: Could not allocate memory for client\n", __func__); kfree(pdata->audio_effects[rtd->dai_link->be_id]); + pdata->audio_effects[rtd->dai_link->be_id] = NULL; kfree(pdata->dec_params[rtd->dai_link->be_id]); + pdata->dec_params[rtd->dai_link->be_id] = NULL; pdata->cstream[rtd->dai_link->be_id] = NULL; runtime->private_data = NULL; kfree(prtd); @@ -1198,7 +1207,7 @@ static int msm_compr_open(struct snd_compr_stream *cstream) pr_debug("%s: session ID %d\n", __func__, prtd->audio_client->session); prtd->audio_client->perf_mode = false; prtd->session_id = prtd->audio_client->session; - + pdata->is_in_use[rtd->dai_link->be_id] = true; return 0; } @@ -1288,10 +1297,15 @@ static int msm_compr_free(struct snd_compr_stream *cstream) q6asm_audio_client_free(ac); - kfree(pdata->audio_effects[soc_prtd->dai_link->be_id]); - pdata->audio_effects[soc_prtd->dai_link->be_id] = NULL; - kfree(pdata->dec_params[soc_prtd->dai_link->be_id]); - pdata->dec_params[soc_prtd->dai_link->be_id] = NULL; + if (pdata->audio_effects[soc_prtd->dai_link->be_id] != NULL) { + kfree(pdata->audio_effects[soc_prtd->dai_link->be_id]); + pdata->audio_effects[soc_prtd->dai_link->be_id] = NULL; + } + if (pdata->dec_params[soc_prtd->dai_link->be_id] != NULL) { + kfree(pdata->dec_params[soc_prtd->dai_link->be_id]); + pdata->dec_params[soc_prtd->dai_link->be_id] = NULL; + } + pdata->is_in_use[soc_prtd->dai_link->be_id] = false; kfree(prtd); runtime->private_data = NULL; @@ -2923,6 +2937,7 @@ static int msm_compr_probe(struct snd_soc_platform *platform) pdata->dec_params[i] = NULL; pdata->cstream[i] = NULL; pdata->ch_map[i] = NULL; + pdata->is_in_use[i] = false; } snd_soc_add_platform_controls(platform, msm_compr_gapless_controls, diff --git a/sound/usb/card.c b/sound/usb/card.c index 8c11ad2ca28..6cd163b4394 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c @@ -221,6 +221,7 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif) struct usb_interface *usb_iface; void *control_header; int i, protocol; + int rest_bytes; usb_iface = usb_ifnum_to_if(dev, ctrlif); if (!usb_iface) { @@ -247,6 +248,15 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif) return -EINVAL; } + rest_bytes = (void *)(host_iface->extra + host_iface->extralen) - + control_header; + + /* just to be sure -- this shouldn't hit at all */ + if (rest_bytes <= 0) { + dev_err(&dev->dev, "invalid control header\n"); + return -EINVAL; + } + switch (protocol) { default: dev_warn(&dev->dev, @@ -257,11 +267,21 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif) case UAC_VERSION_1: { struct uac1_ac_header_descriptor *h1 = control_header; + if (rest_bytes < sizeof(*h1)) { + dev_err(&dev->dev, "too short v1 buffer descriptor\n"); + return -EINVAL; + } + if (!h1->bInCollection) { dev_info(&dev->dev, "skipping empty audio interface (v1)\n"); return -EINVAL; } + if (rest_bytes < h1->bLength) { + dev_err(&dev->dev, "invalid buffer length (v1)\n"); + return -EINVAL; + } + if (h1->bLength < sizeof(*h1) + h1->bInCollection) { dev_err(&dev->dev, "invalid UAC_HEADER (v1)\n"); return -EINVAL; diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 1f1dc5c485a..8cedec1c151 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -2165,6 +2165,9 @@ static int parse_audio_unit(struct mixer_build *state, int unitid) static void snd_usb_mixer_free(struct usb_mixer_interface *mixer) { + /* kill pending URBs */ + snd_usb_mixer_disconnect(&mixer->list); + kfree(mixer->id_elems); if (mixer->urb) { kfree(mixer->urb->transfer_buffer); @@ -2501,8 +2504,13 @@ void snd_usb_mixer_disconnect(struct list_head *p) struct usb_mixer_interface *mixer; mixer = list_entry(p, struct usb_mixer_interface, list); - usb_kill_urb(mixer->urb); - usb_kill_urb(mixer->rc_urb); + if (mixer->disconnected) + return; + if (mixer->urb) + usb_kill_urb(mixer->urb); + if (mixer->rc_urb) + usb_kill_urb(mixer->rc_urb); + mixer->disconnected = true; } #ifdef CONFIG_PM diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h index 73b1f649447..cdff31de12d 100644 --- a/sound/usb/mixer.h +++ b/sound/usb/mixer.h @@ -23,6 +23,7 @@ struct usb_mixer_interface { u8 audigy2nx_leds[3]; u8 xonar_u1_status; + bool disconnected; }; #define MAX_CHANNELS 16 /* max logical channels */ |
