From 086e47b6c959ee557e9adefe72b8800c62d0ac34 Mon Sep 17 00:00:00 2001 From: Sachin Kamat Date: Sat, 13 Oct 2012 09:25:58 +0100 Subject: arm64: Remove duplicate inclusion of mmu_context.h in smp.c asm/mmu_context.h was included twice. Signed-off-by: Sachin Kamat Signed-off-by: Catalin Marinas --- arch/arm64/kernel/smp.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index b711525be21f..226b6bf6e9c2 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -46,7 +46,6 @@ #include #include #include -#include /* * as from 2.5, kernels no longer have an init_tasks structure -- cgit v1.2.3 From fea2acaa5ca01eb2996226b2ae177aa66b222511 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 16 Oct 2012 11:26:57 +0100 Subject: arm64: Select MODULES_USE_ELF_RELA With commit 786d35d4 (make most arch asm/module.h files use asm-generic/module.h) arm64 needs to enable MODULES_USE_ELF_RELA for loadable modules. Signed-off-by: Catalin Marinas Acked-by: Will Deacon --- arch/arm64/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 7ff68c946073..ef54a59a9e89 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -22,6 +22,7 @@ config ARM64 select HAVE_PERF_EVENTS select HAVE_SPARSE_IRQ select IRQ_DOMAIN + select MODULES_USE_ELF_RELA select NO_BOOTMEM select OF select OF_EARLY_FLATTREE -- cgit v1.2.3 From c60b0c2817bd6a990b08a7651e9cf630414665f5 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 16 Oct 2012 11:44:53 +0100 Subject: arm64: Fix the update_vsyscall() prototype With commit 576094b7 (time: Introduce new GENERIC_TIME_VSYSCALL) the old update_vsyscall() prototype is no longer available. This patch updates the arm64 port. Signed-off-by: Catalin Marinas Acked-by: Will Deacon Cc: John Stultz --- arch/arm64/kernel/vdso.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 17948fc7d663..ba457943a16b 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -222,11 +223,10 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm) /* * Update the vDSO data page to keep in sync with kernel timekeeping. */ -void update_vsyscall(struct timespec *ts, struct timespec *wtm, - struct clocksource *clock, u32 mult) +void update_vsyscall(struct timekeeper *tk) { struct timespec xtime_coarse; - u32 use_syscall = strcmp(clock->name, "arch_sys_counter"); + u32 use_syscall = strcmp(tk->clock->name, "arch_sys_counter"); ++vdso_data->tb_seq_count; smp_wmb(); @@ -237,13 +237,13 @@ void update_vsyscall(struct timespec *ts, struct timespec *wtm, vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec; if (!use_syscall) { - vdso_data->cs_cycle_last = clock->cycle_last; - vdso_data->xtime_clock_sec = ts->tv_sec; - vdso_data->xtime_clock_nsec = ts->tv_nsec; - vdso_data->cs_mult = mult; - vdso_data->cs_shift = clock->shift; - vdso_data->wtm_clock_sec = wtm->tv_sec; - vdso_data->wtm_clock_nsec = wtm->tv_nsec; + vdso_data->cs_cycle_last = tk->clock->cycle_last; + vdso_data->xtime_clock_sec = tk->xtime_sec; + vdso_data->xtime_clock_nsec = tk->xtime_nsec >> tk->shift; + vdso_data->cs_mult = tk->mult; + vdso_data->cs_shift = tk->shift; + vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec; + vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec; } smp_wmb(); -- cgit v1.2.3 From f71a1a42667f576ec736bb1200eba2118fee3a22 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 16 Oct 2012 12:00:29 +0100 Subject: arm64: Ignore memory blocks below PHYS_OFFSET According to Documentation/arm64/booting.txt, the kernel image must be loaded at a pre-defined offset from the start of RAM so that the kernel can calculate PHYS_OFFSET based on this address. If the DT contains memory blocks below this PHYS_OFFSET, report them and ignore the corresponding memory range. Signed-off-by: Catalin Marinas --- arch/arm64/kernel/setup.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 48ffb9fb3fe3..7665a9bfdb1e 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -170,7 +170,19 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys) void __init early_init_dt_add_memory_arch(u64 base, u64 size) { + base &= PAGE_MASK; size &= PAGE_MASK; + if (base + size < PHYS_OFFSET) { + pr_warning("Ignoring memory block 0x%llx - 0x%llx\n", + base, base + size); + return; + } + if (base < PHYS_OFFSET) { + pr_warning("Ignoring memory range 0x%llx - 0x%llx\n", + base, PHYS_OFFSET); + size -= PHYS_OFFSET - base; + base = PHYS_OFFSET; + } memblock_add(base, size); } -- cgit v1.2.3 From 16dd46bb781a1d37eeb2377e8e48276e9d14d15d Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 16 Oct 2012 17:07:46 +0100 Subject: arm64: No need to set the x0-x2 registers in start_thread() For historical reasons, ARM used to set r0-r2 in start_thread() to the first values on the user stack when starting a new user application. The same logic has been inherited in AArch64. The x0 register is overridden by the sys_execve() return value so it's always zero on success. The x1 and x2 registers are ignored by AArch64 and EABI AArch32 applications, so we can safely remove the register setting for both native and compat user space. This also fixes a potential fault with the kernel accessing user space stack directly. Signed-off-by: Catalin Marinas Reported-by: Al Viro --- arch/arm64/include/asm/processor.h | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 39a208a392f7..5d810044feda 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -92,30 +92,20 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc) static inline void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) { - unsigned long *stack = (unsigned long *)sp; - start_thread_common(regs, pc); regs->pstate = PSR_MODE_EL0t; regs->sp = sp; - regs->regs[2] = stack[2]; /* x2 (envp) */ - regs->regs[1] = stack[1]; /* x1 (argv) */ - regs->regs[0] = stack[0]; /* x0 (argc) */ } #ifdef CONFIG_COMPAT static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) { - unsigned int *stack = (unsigned int *)sp; - start_thread_common(regs, pc); regs->pstate = COMPAT_PSR_MODE_USR; if (pc & 1) regs->pstate |= COMPAT_PSR_T_BIT; regs->compat_sp = sp; - regs->regs[2] = stack[2]; /* x2 (envp) */ - regs->regs[1] = stack[1]; /* x1 (argv) */ - regs->regs[0] = stack[0]; /* x0 (argc) */ } #endif -- cgit v1.2.3 From 7797d17c591ae62c6f43c6de4fdb8beeb50eb692 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 11 Oct 2012 12:10:57 +0100 Subject: arm64: ptrace: make structure padding explicit for debug registers The user_hwdebug_state structure contains implicit padding to conform to the alignment requirements of the AArch64 ABI (namely that aggregates must be aligned to their most aligned member). This patch fixes the ptrace functions operating on struct user_hwdebug_state so that the padding is handled correctly. Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/uapi/asm/ptrace.h | 3 ++- arch/arm64/kernel/ptrace.c | 36 ++++++++++++++++++++++++++++++------ 2 files changed, 32 insertions(+), 7 deletions(-) diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h index 9b131b4efa0b..6913643bbe54 100644 --- a/arch/arm64/include/uapi/asm/ptrace.h +++ b/arch/arm64/include/uapi/asm/ptrace.h @@ -79,13 +79,14 @@ struct user_fpsimd_state { struct user_hwdebug_state { __u32 dbg_info; + __u32 pad; struct { __u64 addr; __u32 ctrl; + __u32 pad; } dbg_regs[16]; }; - #endif /* __ASSEMBLY__ */ #endif /* _UAPI__ASM_PTRACE_H */ diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 2ea3968367c2..c62d39d5c99f 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -372,7 +372,7 @@ static int ptrace_hbp_set_addr(unsigned int note_type, #define PTRACE_HBP_ADDR_SZ sizeof(u64) #define PTRACE_HBP_CTRL_SZ sizeof(u32) -#define PTRACE_HBP_REG_OFF sizeof(u32) +#define PTRACE_HBP_PAD_SZ sizeof(u32) static int hw_break_get(struct task_struct *target, const struct user_regset *regset, @@ -380,7 +380,7 @@ static int hw_break_get(struct task_struct *target, void *kbuf, void __user *ubuf) { unsigned int note_type = regset->core_note_type; - int ret, idx = 0, offset = PTRACE_HBP_REG_OFF, limit; + int ret, idx = 0, offset, limit; u32 info, ctrl; u64 addr; @@ -389,11 +389,20 @@ static int hw_break_get(struct task_struct *target, if (ret) return ret; - ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &info, 0, 4); + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &info, 0, + sizeof(info)); + if (ret) + return ret; + + /* Pad */ + offset = offsetof(struct user_hwdebug_state, pad); + ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, offset, + offset + PTRACE_HBP_PAD_SZ); if (ret) return ret; /* (address, ctrl) registers */ + offset = offsetof(struct user_hwdebug_state, dbg_regs); limit = regset->n * regset->size; while (count && offset < limit) { ret = ptrace_hbp_get_addr(note_type, target, idx, &addr); @@ -413,6 +422,13 @@ static int hw_break_get(struct task_struct *target, if (ret) return ret; offset += PTRACE_HBP_CTRL_SZ; + + ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, + offset, + offset + PTRACE_HBP_PAD_SZ); + if (ret) + return ret; + offset += PTRACE_HBP_PAD_SZ; idx++; } @@ -425,12 +441,13 @@ static int hw_break_set(struct task_struct *target, const void *kbuf, const void __user *ubuf) { unsigned int note_type = regset->core_note_type; - int ret, idx = 0, offset = PTRACE_HBP_REG_OFF, limit; + int ret, idx = 0, offset, limit; u32 ctrl; u64 addr; - /* Resource info */ - ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, 4); + /* Resource info and pad */ + offset = offsetof(struct user_hwdebug_state, dbg_regs); + ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset); if (ret) return ret; @@ -454,6 +471,13 @@ static int hw_break_set(struct task_struct *target, if (ret) return ret; offset += PTRACE_HBP_CTRL_SZ; + + ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, + offset, + offset + PTRACE_HBP_PAD_SZ); + if (ret) + return ret; + offset += PTRACE_HBP_PAD_SZ; idx++; } -- cgit v1.2.3 From 8f34a1da35aed7b438a2de8ac27723a5472e8399 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 18 Oct 2012 15:17:00 +0100 Subject: arm64: ptrace: use HW_BREAKPOINT_EMPTY type for disabled breakpoints If a debugger tries to zero a hardware debug control register, the kernel will try to infer both the type and length of the breakpoint in order to sanity-check against the requested regset type. This will fail because the encoding will appear as a zero-length breakpoint. This patch changes the control register setting so that disabled breakpoints are treated as HW_BREAKPOINT_EMPTY and no further sanity-checking is required. Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/kernel/ptrace.c | 37 +++++++++++++++++++++---------------- 1 file changed, 21 insertions(+), 16 deletions(-) diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index c62d39d5c99f..6e1e77f1831c 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -234,28 +234,33 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type, struct arch_hw_breakpoint_ctrl ctrl, struct perf_event_attr *attr) { - int err, len, type; + int err, len, type, disabled = !ctrl.enabled; - err = arch_bp_generic_fields(ctrl, &len, &type); - if (err) - return err; - - switch (note_type) { - case NT_ARM_HW_BREAK: - if ((type & HW_BREAKPOINT_X) != type) - return -EINVAL; - break; - case NT_ARM_HW_WATCH: - if ((type & HW_BREAKPOINT_RW) != type) + if (disabled) { + len = 0; + type = HW_BREAKPOINT_EMPTY; + } else { + err = arch_bp_generic_fields(ctrl, &len, &type); + if (err) + return err; + + switch (note_type) { + case NT_ARM_HW_BREAK: + if ((type & HW_BREAKPOINT_X) != type) + return -EINVAL; + break; + case NT_ARM_HW_WATCH: + if ((type & HW_BREAKPOINT_RW) != type) + return -EINVAL; + break; + default: return -EINVAL; - break; - default: - return -EINVAL; + } } attr->bp_len = len; attr->bp_type = type; - attr->disabled = !ctrl.enabled; + attr->disabled = disabled; return 0; } -- cgit v1.2.3 From aeed41a9371ee02257b608eb06a9058507a7d0f4 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 19 Oct 2012 17:33:27 +0100 Subject: arm64: fix alignment padding in assembly code An interesting effect of using the generic version of linkage.h is that the padding is defined in terms of x86 NOPs, which can have even more interesting effects when the assembly code looks like this: ENTRY(func1) mov x0, xzr ENDPROC(func1) // fall through ENTRY(func2) mov x0, #1 ret ENDPROC(func2) Admittedly, the code is not very nice. But having code from another architecture doesn't look completely sane either. The fix is to add arm64's version of linkage.h, which causes the insertion of proper AArch64 NOPs. Signed-off-by: Marc Zyngier Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/Kbuild | 1 - arch/arm64/include/asm/linkage.h | 7 +++++++ 2 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 arch/arm64/include/asm/linkage.h diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index fe77e51a7847..a581a2205938 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild @@ -18,7 +18,6 @@ generic-y += ipcbuf.h generic-y += irq_regs.h generic-y += kdebug.h generic-y += kmap_types.h -generic-y += linkage.h generic-y += local.h generic-y += local64.h generic-y += mman.h diff --git a/arch/arm64/include/asm/linkage.h b/arch/arm64/include/asm/linkage.h new file mode 100644 index 000000000000..636c1bced7d4 --- /dev/null +++ b/arch/arm64/include/asm/linkage.h @@ -0,0 +1,7 @@ +#ifndef __ASM_LINKAGE_H +#define __ASM_LINKAGE_H + +#define __ALIGN .align 4 +#define __ALIGN_STR ".align 4" + +#endif -- cgit v1.2.3