From 9dfc28b6308096e48b54c28259825a1200f60742 Mon Sep 17 00:00:00 2001 From: Jonathan Austin Date: Thu, 18 Apr 2013 18:37:24 +0100 Subject: ARM: mpu: protect the vectors page with an MPU region Without an MMU it is possible for userspace programs to start executing code in places that they have no business executing. The MPU allows some level of protection against this. This patch protects the vectors page from access by userspace processes. Userspace tasks that dereference a null pointer are already protected by an svc at 0x0 that kills them. However when tasks use an offset from a null pointer (eg a function in a null struct) they miss this carefully placed svc and enter the exception vectors in user mode, ending up in the kernel. This patch causes programs that do this to receive a SEGV instead of happily entering the kernel in user-mode, and hence avoid a 'Bad Mode' panic. As part of this change it is necessary to make sigreturn happen via the stack when there is not an sa_restorer function. This change is invisible to userspace, and irrelevant to code compiled using a uClibc toolchain, which always uses an sa_restorer function. Because we don't get to remap the vectors in !MMU kuser_helpers are not in a defined location, and hence aren't usable. This means we don't need to worry about keeping them accessible from PL0 Signed-off-by: Jonathan Austin Reviewed-by: Will Deacon CC: Nicolas Pitre CC: Catalin Marinas --- arch/arm/kernel/signal.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'arch/arm/kernel/signal.c') diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 296786bdbb73..1c16c35c271a 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -392,14 +392,19 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig, if (ksig->ka.sa.sa_flags & SA_SIGINFO) idx += 3; + /* + * Put the sigreturn code on the stack no matter which return + * mechanism we use in order to remain ABI compliant + */ if (__put_user(sigreturn_codes[idx], rc) || __put_user(sigreturn_codes[idx+1], rc+1)) return 1; - if (cpsr & MODE32_BIT) { + if ((cpsr & MODE32_BIT) && !IS_ENABLED(CONFIG_ARM_MPU)) { /* * 32-bit code can use the new high-page - * signal return code support. + * signal return code support except when the MPU has + * protected the vectors page from PL0 */ retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb; } else { -- cgit v1.2.3 From 48be69a026b2c17350a5ef18a1959a919f60be7d Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 24 Jul 2013 00:29:18 +0100 Subject: ARM: move signal handlers into a vdso-like page Move the signal handlers into a VDSO page rather than keeping them in the vectors page. This allows us to place them randomly within this page, and also map the page at a random location within userspace further protecting these code fragments from ROP attacks. The new VDSO page is also poisoned in the same way as the vector page. Signed-off-by: Russell King --- arch/arm/kernel/signal.c | 52 +++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 45 insertions(+), 7 deletions(-) (limited to 'arch/arm/kernel/signal.c') diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 1c16c35c271a..0f17e06d51e6 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -8,6 +8,7 @@ * published by the Free Software Foundation. */ #include +#include #include #include #include @@ -15,12 +16,11 @@ #include #include +#include #include #include #include -#include "signal.h" - /* * For ARM syscalls, we encode the syscall number into the instruction. */ @@ -40,11 +40,13 @@ #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE)) #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE)) -const unsigned long sigreturn_codes[7] = { +static const unsigned long sigreturn_codes[7] = { MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN, }; +static unsigned long signal_return_offset; + #ifdef CONFIG_CRUNCH static int preserve_crunch_context(struct crunch_sigframe __user *frame) { @@ -401,12 +403,15 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig, return 1; if ((cpsr & MODE32_BIT) && !IS_ENABLED(CONFIG_ARM_MPU)) { + struct mm_struct *mm = current->mm; + /* - * 32-bit code can use the new high-page - * signal return code support except when the MPU has - * protected the vectors page from PL0 + * 32-bit code can use the signal return page + * except when the MPU has protected the vectors + * page from PL0 */ - retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb; + retcode = mm->context.sigpage + signal_return_offset + + (idx << 2) + thumb; } else { /* * Ensure that the instruction cache sees @@ -608,3 +613,36 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) } while (thread_flags & _TIF_WORK_MASK); return 0; } + +static struct page *signal_page; + +struct page *get_signal_page(void) +{ + if (!signal_page) { + unsigned long ptr; + unsigned offset; + void *addr; + + signal_page = alloc_pages(GFP_KERNEL, 0); + + if (!signal_page) + return NULL; + + addr = page_address(signal_page); + + /* Give the signal return code some randomness */ + offset = 0x200 + (get_random_int() & 0x7fc); + signal_return_offset = offset; + + /* + * Copy signal return handlers into the vector page, and + * set sigreturn to be a pointer to these. + */ + memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes)); + + ptr = (unsigned long)addr + offset; + flush_icache_range(ptr, ptr + sizeof(sigreturn_codes)); + } + + return signal_page; +} -- cgit v1.2.3 From e0d407564b532d978b03ceccebd224a05d02f111 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 3 Aug 2013 10:30:05 +0100 Subject: ARM: fix a cockup in 48be69a02 (ARM: move signal handlers into a vdso-like page) Unfortunately, I never committed the fix to a nasty oops which can occur as a result of that commit: ------------[ cut here ]------------ kernel BUG at /home/olof/work/batch/include/linux/mm.h:414! Internal error: Oops - BUG: 0 [#1] PREEMPT SMP ARM Modules linked in: CPU: 0 PID: 490 Comm: killall5 Not tainted 3.11.0-rc3-00288-gabe0308 #53 task: e90acac0 ti: e9be8000 task.ti: e9be8000 PC is at special_mapping_fault+0xa4/0xc4 LR is at __do_fault+0x68/0x48c This doesn't show up unless you do quite a bit of testing; a simple boot test does not do this, so all my nightly tests were passing fine. The reason for this is that install_special_mapping() expects the page array to stick around, and as this was only inserting one page which was stored on the kernel stack, that's why this was blowing up. Reported-by: Olof Johansson Tested-by: Olof Johansson Signed-off-by: Russell King --- arch/arm/kernel/signal.c | 41 +++++++++++++++++++---------------------- 1 file changed, 19 insertions(+), 22 deletions(-) (limited to 'arch/arm/kernel/signal.c') diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 0f17e06d51e6..39e7105a9b70 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -614,35 +614,32 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) return 0; } -static struct page *signal_page; - struct page *get_signal_page(void) { - if (!signal_page) { - unsigned long ptr; - unsigned offset; - void *addr; + unsigned long ptr; + unsigned offset; + struct page *page; + void *addr; - signal_page = alloc_pages(GFP_KERNEL, 0); + page = alloc_pages(GFP_KERNEL, 0); - if (!signal_page) - return NULL; + if (!page) + return NULL; - addr = page_address(signal_page); + addr = page_address(page); - /* Give the signal return code some randomness */ - offset = 0x200 + (get_random_int() & 0x7fc); - signal_return_offset = offset; + /* Give the signal return code some randomness */ + offset = 0x200 + (get_random_int() & 0x7fc); + signal_return_offset = offset; - /* - * Copy signal return handlers into the vector page, and - * set sigreturn to be a pointer to these. - */ - memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes)); + /* + * Copy signal return handlers into the vector page, and + * set sigreturn to be a pointer to these. + */ + memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes)); - ptr = (unsigned long)addr + offset; - flush_icache_range(ptr, ptr + sizeof(sigreturn_codes)); - } + ptr = (unsigned long)addr + offset; + flush_icache_range(ptr, ptr + sizeof(sigreturn_codes)); - return signal_page; + return page; } -- cgit v1.2.3 From 8c0cc8a5d90bc7373a7a9e7f7a40eb41f51e03fc Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 3 Aug 2013 10:39:51 +0100 Subject: ARM: fix nommu builds with 48be69a02 (ARM: move signal handlers into a vdso-like page) Olof reports that noMMU builds error out with: arch/arm/kernel/signal.c: In function 'setup_return': arch/arm/kernel/signal.c:413:25: error: 'mm_context_t' has no member named 'sigpage' This shows one of the evilnesses of IS_ENABLED(). Get rid of it here and replace it with #ifdef's - and as no noMMU platform can make use of sigpage, depend on CONIFG_MMU not CONFIG_ARM_MPU. Reported-by: Olof Johansson Signed-off-by: Russell King --- arch/arm/kernel/signal.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'arch/arm/kernel/signal.c') diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 39e7105a9b70..ab3304225272 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -402,7 +402,8 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig, __put_user(sigreturn_codes[idx+1], rc+1)) return 1; - if ((cpsr & MODE32_BIT) && !IS_ENABLED(CONFIG_ARM_MPU)) { +#ifdef CONFIG_MMU + if (cpsr & MODE32_BIT) { struct mm_struct *mm = current->mm; /* @@ -412,7 +413,9 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig, */ retcode = mm->context.sigpage + signal_return_offset + (idx << 2) + thumb; - } else { + } else +#endif + { /* * Ensure that the instruction cache sees * the return code written onto the stack. -- cgit v1.2.3