From 30650239adc9e4e9439256d6988e521518dccbb3 Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Mon, 29 Apr 2013 18:07:47 +0000 Subject: powerpc: Add an in memory udbg console This patch adds a new udbg early debug console which utilises statically defined input and output buffers stored within the kernel BSS. It is primarily designed to assist with bring up of new hardware which may not have a working console but which has a method of reading/writing kernel memory. This version incorporates comments made by Ben H (thanks!). Changes from v1: - Add memory barriers. - Ensure updating of read/write positions is atomic. Signed-off-by: Alistair Popple Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/udbg.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h index 5a7510e9d09d..dc590919f8eb 100644 --- a/arch/powerpc/include/asm/udbg.h +++ b/arch/powerpc/include/asm/udbg.h @@ -52,6 +52,7 @@ extern void __init udbg_init_40x_realmode(void); extern void __init udbg_init_cpm(void); extern void __init udbg_init_usbgecko(void); extern void __init udbg_init_wsp(void); +extern void __init udbg_init_memcons(void); extern void __init udbg_init_ehv_bc(void); extern void __init udbg_init_ps3gelic(void); extern void __init udbg_init_debug_opal_raw(void); -- cgit v1.2.3 From 73ed148aea9dc0508be7e30e7a447f55c1b2f378 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Fri, 10 May 2013 16:59:18 +1000 Subject: powerpc/powernv: Improve kexec reliability We add a machine_shutdown hook that frees the OPAL interrupts (so they get masked at the source and don't fire while kexec'ing) and which triggers an IODA reset on all the PCIe host bridges which will have the effect of blocking all DMAs and subsequent PCIs interrupts. Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/opal.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index b6c8b58b1d76..b2906adb89d3 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h @@ -563,6 +563,8 @@ extern void opal_nvram_init(void); extern int opal_machine_check(struct pt_regs *regs); +extern void opal_shutdown(void); + #endif /* __ASSEMBLY__ */ #endif /* __OPAL_H */ -- cgit v1.2.3 From 4a3b8d0b833707811011ad14dd12dabf31fa690a Mon Sep 17 00:00:00 2001 From: Scott Wood Date: Thu, 9 May 2013 22:09:41 -0500 Subject: powerpc: hard_irq_disable(): Call trace_hardirqs_off after disabling lockdep.c has this: /* * So we're supposed to get called after you mask local IRQs, * but for some reason the hardware doesn't quite think you did * a proper job. */ if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) return; Since irqs_disabled() is based on soft_enabled(), that (not just the hard EE bit) needs to be 0 before we call trace_hardirqs_off. Signed-off-by: Scott Wood --- arch/powerpc/include/asm/hw_irq.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index d615b28dda82..ba713f166fa5 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -96,11 +96,12 @@ static inline bool arch_irqs_disabled(void) #endif #define hard_irq_disable() do { \ + u8 _was_enabled = get_paca()->soft_enabled; \ __hard_irq_disable(); \ - if (local_paca->soft_enabled) \ - trace_hardirqs_off(); \ get_paca()->soft_enabled = 0; \ get_paca()->irq_happened |= PACA_IRQ_HARD_DIS; \ + if (_was_enabled) \ + trace_hardirqs_off(); \ } while(0) static inline bool lazy_irq_pending(void) -- cgit v1.2.3 From 613e60a66181acc5c8b63268e939622162da2393 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Sat, 11 May 2013 22:33:19 +0000 Subject: powerpc/mm: Use the correct mask value when looking at pgtable address Our pgtable are 2*sizeof(pte_t)*PTRS_PER_PTE which is PTE_FRAG_SIZE. Instead of depending on frag size, mask with PMD_MASKED_BITS. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/pgalloc-64.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h index 91acb12bac92..b66ae722a8e9 100644 --- a/arch/powerpc/include/asm/pgalloc-64.h +++ b/arch/powerpc/include/asm/pgalloc-64.h @@ -186,7 +186,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, static inline pgtable_t pmd_pgtable(pmd_t pmd) { - return (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE); + return (pgtable_t)(pmd_val(pmd) & ~PMD_MASKED_BITS); } static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, -- cgit v1.2.3 From 83d5e64b7efa7f39b10ff5e92792e807a720289c Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 6 May 2013 10:51:00 +0000 Subject: powerpc: Fix build errors STRICT_MM_TYPECHECKS Signed-off-by: Aneesh Kumar K.V Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/pte-hash64-64k.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h index 3e13e23e4fdf..d836d945068d 100644 --- a/arch/powerpc/include/asm/pte-hash64-64k.h +++ b/arch/powerpc/include/asm/pte-hash64-64k.h @@ -47,7 +47,7 @@ * generic accessors and iterators here */ #define __real_pte(e,p) ((real_pte_t) { \ - (e), ((e) & _PAGE_COMBO) ? \ + (e), (pte_val(e) & _PAGE_COMBO) ? \ (pte_val(*((p) + PTRS_PER_PTE))) : 0 }) #define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \ (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf)) -- cgit v1.2.3 From 120496ac2d2d60aee68d3123a68169502a85f4b5 Mon Sep 17 00:00:00 2001 From: Robert Jennings Date: Tue, 7 May 2013 04:34:11 +0000 Subject: powerpc: Bring all threads online prior to migration/hibernation This patch brings online all threads which are present but not online prior to migration/hibernation. After migration/hibernation those threads are taken back offline. During migration/hibernation all online CPUs must call H_JOIN, this is required by the hypervisor. Without this patch, threads that are offline (H_CEDE'd) will not be woken to make the H_JOIN call and the OS will be deadlocked (all threads either JOIN'd or CEDE'd). Cc: Signed-off-by: Robert Jennings Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/rtas.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index a8bc2bb4adc9..34fd70488d83 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -264,6 +264,8 @@ extern void rtas_progress(char *s, unsigned short hex); extern void rtas_initialize(void); extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data); extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data); +extern int rtas_online_cpus_mask(cpumask_var_t cpus); +extern int rtas_offline_cpus_mask(cpumask_var_t cpus); extern int rtas_ibm_suspend_me(struct rtas_args *); struct rtc_time; -- cgit v1.2.3 From 75b93da43aa1132bf23dafbb4badb028ccf78129 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 14 May 2013 15:10:02 +1000 Subject: powerpc/powernv: Detect OPAL v3 API version Future firmwares will support that new version Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/firmware.h | 4 +++- arch/powerpc/include/asm/opal.h | 3 ++- 2 files changed, 5 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h index 0df54646f968..681bc0314b6b 100644 --- a/arch/powerpc/include/asm/firmware.h +++ b/arch/powerpc/include/asm/firmware.h @@ -52,6 +52,7 @@ #define FW_FEATURE_BEST_ENERGY ASM_CONST(0x0000000080000000) #define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000) #define FW_FEATURE_PRRN ASM_CONST(0x0000000200000000) +#define FW_FEATURE_OPALv3 ASM_CONST(0x0000000400000000) #ifndef __ASSEMBLY__ @@ -69,7 +70,8 @@ enum { FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY | FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN, FW_FEATURE_PSERIES_ALWAYS = 0, - FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2, + FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2 | + FW_FEATURE_OPALv3, FW_FEATURE_POWERNV_ALWAYS = 0, FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1, FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1, diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index b2906adb89d3..cbb9305ab15a 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h @@ -243,7 +243,8 @@ enum OpalMCE_TlbErrorType { enum OpalThreadStatus { OPAL_THREAD_INACTIVE = 0x0, - OPAL_THREAD_STARTED = 0x1 + OPAL_THREAD_STARTED = 0x1, + OPAL_THREAD_UNAVAILABLE = 0x2 /* opal-v3 */ }; enum OpalPciBusCompare { -- cgit v1.2.3 From 22ecbe8dcef4f6bf80ed4358600e8f6e03105c5c Mon Sep 17 00:00:00 2001 From: Li Zhong Date: Mon, 13 May 2013 16:16:40 +0000 Subject: powerpc: Syscall hooks for context tracking subsystem This is the syscall slow path hooks for context tracking subsystem, corresponding to [PATCH] x86: Syscall hooks for userspace RCU extended QS commit bf5a3c13b939813d28ce26c01425054c740d6731 TIF_MEMDIE is moved to the second 16-bits (with value 17), as it seems there is no asm code using it. TIF_NOHZ is added to _TIF_SYCALL_T_OR_A, so it is better for it to be in the same 16 bits with others in the group, so in the asm code, andi. with this group could work. Signed-off-by: Li Zhong Acked-by: Frederic Weisbecker Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/thread_info.h | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index 8ceea14d6fe4..ba7b1973866e 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -97,7 +97,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SINGLESTEP 8 /* singlestepping active */ -#define TIF_MEMDIE 9 /* is terminating due to OOM killer */ +#define TIF_NOHZ 9 /* in adaptive nohz mode */ #define TIF_SECCOMP 10 /* secure computing */ #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */ #define TIF_NOERROR 12 /* Force successful syscall return */ @@ -106,6 +106,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */ #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation for stack store? */ +#define TIF_MEMDIE 17 /* is terminating due to OOM killer */ /* as above, but as bit values */ #define _TIF_SYSCALL_TRACE (1< Date: Mon, 13 May 2013 16:16:43 +0000 Subject: powerpc: Use the new schedule_user API on userspace preemption This patch corresponds to [PATCH] x86: Use the new schedule_user API on userspace preemption commit 0430499ce9d78691f3985962021b16bf8f8a8048 Signed-off-by: Li Zhong Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/context_tracking.h | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 arch/powerpc/include/asm/context_tracking.h (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/context_tracking.h b/arch/powerpc/include/asm/context_tracking.h new file mode 100644 index 000000000000..b6f5a33b8ee2 --- /dev/null +++ b/arch/powerpc/include/asm/context_tracking.h @@ -0,0 +1,10 @@ +#ifndef _ASM_POWERPC_CONTEXT_TRACKING_H +#define _ASM_POWERPC_CONTEXT_TRACKING_H + +#ifdef CONFIG_CONTEXT_TRACKING +#define SCHEDULE_USER bl .schedule_user +#else +#define SCHEDULE_USER bl .schedule +#endif + +#endif -- cgit v1.2.3 From 59affcd3e460b97492bc1aa2b843bafe7c54f596 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 21 May 2013 16:31:12 +0000 Subject: powerpc: Context switch more PMU related SPRs In commit 9353374 "Context switch the new EBB SPRs" we added support for context switching some new EBB SPRs. However despite four of us signing off on that patch we missed some. To be fair these are not actually new SPRs, but they are now potentially user accessible so need to be context switched. Signed-off-by: Michael Ellerman Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/processor.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index d7e67ca8b4a6..594db6bc093c 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -284,6 +284,12 @@ struct thread_struct { unsigned long ebbrr; unsigned long ebbhr; unsigned long bescr; + unsigned long siar; + unsigned long sdar; + unsigned long sier; + unsigned long mmcr0; + unsigned long mmcr2; + unsigned long mmcra; #endif }; -- cgit v1.2.3 From b72c1f651491e4cd33ddec79c504a49071a512f0 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 21 May 2013 22:58:21 +0000 Subject: powerpc: Make radeon 32-bit MSI quirk work on powernv This moves the quirk itself to pci_64.c as to get built on all ppc64 platforms (the only ones with a pci_dn), factors the two implementations of get_pdn() into a single pci_get_dn() and use the quirk to do 32-bit MSIs on IODA based powernv platforms. Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/pci-bridge.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h index 8b11b5bd9938..2c1d8cb9b265 100644 --- a/arch/powerpc/include/asm/pci-bridge.h +++ b/arch/powerpc/include/asm/pci-bridge.h @@ -174,6 +174,8 @@ struct pci_dn { /* Get the pointer to a device_node's pci_dn */ #define PCI_DN(dn) ((struct pci_dn *) (dn)->data) +extern struct pci_dn *pci_get_pdn(struct pci_dev *pdev); + extern void * update_dn_pci_info(struct device_node *dn, void *data); static inline int pci_device_from_OF_node(struct device_node *np, -- cgit v1.2.3 From 35f7097fcedec63fcba1852dbee96f74a2d90878 Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Sun, 26 May 2013 18:09:37 +0000 Subject: powerpc/tm: Make room for hypervisor in abort cause codes PAPR carves out 0xff-0xe0 for hypervisor use of transactional memory software abort cause codes. Unfortunately we don't respect this currently. Below fixes this to move our cause codes to below this region. Signed-off-by: Michael Neuling Cc: # 3.9 only Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/reg.h | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index a6136515c7f2..8f6a94b2dc99 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -113,14 +113,15 @@ /* Reason codes describing kernel causes for transaction aborts. By convention, bit0 is copied to TEXASR[56] (IBM bit 7) which is set if - the failure is persistent. + the failure is persistent. PAPR saves 0xff-0xe0 for the hypervisor. */ -#define TM_CAUSE_RESCHED 0xfe -#define TM_CAUSE_TLBI 0xfc -#define TM_CAUSE_FAC_UNAV 0xfa -#define TM_CAUSE_SYSCALL 0xf9 /* Persistent */ -#define TM_CAUSE_MISC 0xf6 -#define TM_CAUSE_SIGNAL 0xf4 +#define TM_CAUSE_PERSISTENT 0x01 +#define TM_CAUSE_RESCHED 0xde +#define TM_CAUSE_TLBI 0xdc +#define TM_CAUSE_FAC_UNAV 0xda +#define TM_CAUSE_SYSCALL 0xd8 /* future use */ +#define TM_CAUSE_MISC 0xd6 /* future use */ +#define TM_CAUSE_SIGNAL 0xd4 #if defined(CONFIG_PPC_BOOK3S_64) #define MSR_64BIT MSR_SF -- cgit v1.2.3 From 6ce6c629fd8254b3177650de99699682ff7f6707 Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Sun, 26 May 2013 18:09:39 +0000 Subject: powerpc/tm: Abort on emulation and alignment faults If we are emulating an instruction inside an active user transaction that touches memory, the kernel can't emulate it as it operates in transactional suspend context. We need to abort these transactions and send them back to userspace for the hardware to rollback. We can service these if the user transaction is in suspend mode, since the kernel will operate in the same suspend context. This adds a check to all alignment faults and to specific instruction emulations (only string instructions for now). If the user process is in an active (non-suspended) transaction, we abort the transaction go back to userspace allowing the HW to roll back the transaction and tell the user of the failure. This also adds new tm abort cause codes to report the reason of the persistent error to the user. Crappy test case here http://neuling.org/devel/junkcode/aligntm.c Signed-off-by: Michael Neuling Cc: # v3.9 Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/reg.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 8f6a94b2dc99..d0528e0d6db8 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -122,6 +122,8 @@ #define TM_CAUSE_SYSCALL 0xd8 /* future use */ #define TM_CAUSE_MISC 0xd6 /* future use */ #define TM_CAUSE_SIGNAL 0xd4 +#define TM_CAUSE_ALIGNMENT 0xd2 +#define TM_CAUSE_EMULATE 0xd0 #if defined(CONFIG_PPC_BOOK3S_64) #define MSR_64BIT MSR_SF -- cgit v1.2.3 From b75c100ef24894bd2c8b52e123bcc5f191c5d9fd Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Sun, 26 May 2013 18:30:56 +0000 Subject: powerpc/tm: Move TM abort cause codes to uapi These cause codes are usable by userspace, so let's export to uapi. Signed-off-by: Michael Neuling Cc: # v3.9 Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/reg.h | 14 -------------- arch/powerpc/include/asm/tm.h | 2 ++ 2 files changed, 2 insertions(+), 14 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index d0528e0d6db8..4a9e408644fe 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -111,20 +111,6 @@ #define MSR_TM_TRANSACTIONAL(x) (((x) & MSR_TS_MASK) == MSR_TS_T) #define MSR_TM_SUSPENDED(x) (((x) & MSR_TS_MASK) == MSR_TS_S) -/* Reason codes describing kernel causes for transaction aborts. By - convention, bit0 is copied to TEXASR[56] (IBM bit 7) which is set if - the failure is persistent. PAPR saves 0xff-0xe0 for the hypervisor. -*/ -#define TM_CAUSE_PERSISTENT 0x01 -#define TM_CAUSE_RESCHED 0xde -#define TM_CAUSE_TLBI 0xdc -#define TM_CAUSE_FAC_UNAV 0xda -#define TM_CAUSE_SYSCALL 0xd8 /* future use */ -#define TM_CAUSE_MISC 0xd6 /* future use */ -#define TM_CAUSE_SIGNAL 0xd4 -#define TM_CAUSE_ALIGNMENT 0xd2 -#define TM_CAUSE_EMULATE 0xd0 - #if defined(CONFIG_PPC_BOOK3S_64) #define MSR_64BIT MSR_SF diff --git a/arch/powerpc/include/asm/tm.h b/arch/powerpc/include/asm/tm.h index 4b4449abf3f8..9dfbc34bdbf5 100644 --- a/arch/powerpc/include/asm/tm.h +++ b/arch/powerpc/include/asm/tm.h @@ -5,6 +5,8 @@ * Copyright 2012 Matt Evans & Michael Neuling, IBM Corporation. */ +#include + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM extern void do_load_up_transact_fpu(struct thread_struct *thread); extern void do_load_up_transact_altivec(struct thread_struct *thread); -- cgit v1.2.3 From 2b3f8e87cf99a33fb6faf5026d7147748bbd77b6 Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Sun, 26 May 2013 18:09:41 +0000 Subject: powerpc/tm: Fix userspace stack corruption on signal delivery for active transactions When in an active transaction that takes a signal, we need to be careful with the stack. It's possible that the stack has moved back up after the tbegin. The obvious case here is when the tbegin is called inside a function that returns before a tend. In this case, the stack is part of the checkpointed transactional memory state. If we write over this non transactionally or in suspend, we are in trouble because if we get a tm abort, the program counter and stack pointer will be back at the tbegin but our in memory stack won't be valid anymore. To avoid this, when taking a signal in an active transaction, we need to use the stack pointer from the checkpointed state, rather than the speculated state. This ensures that the signal context (written tm suspended) will be written below the stack required for the rollback. The transaction is aborted becuase of the treclaim, so any memory written between the tbegin and the signal will be rolled back anyway. For signals taken in non-TM or suspended mode, we use the normal/non-checkpointed stack pointer. Tested with 64 and 32 bit signals Signed-off-by: Michael Neuling Cc: # v3.9 Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/processor.h | 13 ++++--------- arch/powerpc/include/asm/signal.h | 3 +++ 2 files changed, 7 insertions(+), 9 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 594db6bc093c..14a658363698 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -409,21 +409,16 @@ static inline void prefetchw(const void *x) #endif #ifdef CONFIG_PPC64 -static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32) +static inline unsigned long get_clean_sp(unsigned long sp, int is_32) { - unsigned long sp; - if (is_32) - sp = regs->gpr[1] & 0x0ffffffffUL; - else - sp = regs->gpr[1]; - + return sp & 0x0ffffffffUL; return sp; } #else -static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32) +static inline unsigned long get_clean_sp(unsigned long sp, int is_32) { - return regs->gpr[1]; + return sp; } #endif diff --git a/arch/powerpc/include/asm/signal.h b/arch/powerpc/include/asm/signal.h index fbe66c463891..9322c28aebd2 100644 --- a/arch/powerpc/include/asm/signal.h +++ b/arch/powerpc/include/asm/signal.h @@ -3,5 +3,8 @@ #define __ARCH_HAS_SA_RESTORER #include +#include + +extern unsigned long get_tm_stackpointer(struct pt_regs *regs); #endif /* _ASM_POWERPC_SIGNAL_H */ -- cgit v1.2.3 From a515348fc69fd1d9e8ebd34a16f1026d7fe32048 Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Wed, 29 May 2013 19:34:27 +0000 Subject: powerpc/pseries: Kill all prefetch streams on context switch On context switch, we should have no prefetch streams leak from one userspace process to another. This frees up prefetch resources for the next process. Based on patch from Milton Miller. Signed-off-by: Michael Neuling Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/ppc_asm.h | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index cea8496091ff..2f1b6c5f8174 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -523,6 +523,17 @@ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,946) #define PPC440EP_ERR42 #endif +/* The following stops all load and store data streams associated with stream + * ID (ie. streams created explicitly). The embedded and server mnemonics for + * dcbt are different so we use machine "power4" here explicitly. + */ +#define DCBT_STOP_ALL_STREAM_IDS(scratch) \ +.machine push ; \ +.machine "power4" ; \ + lis scratch,0x60000000@h; \ + dcbt r0,scratch,0b01010; \ +.machine pop + /* * toreal/fromreal/tophys/tovirt macros. 32-bit BookE makes them * keep the address intact to be compatible with code shared with -- cgit v1.2.3 From 8e44ddc3f34d22c55f2977ac8b160609935d37ca Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Thu, 23 May 2013 15:42:21 +0000 Subject: powerpc/kvm/book3s: Add support for H_IPOLL and H_XIRR_X in XICS emulation This adds the remaining two hypercalls defined by PAPR for manipulating the XICS interrupt controller, H_IPOLL and H_XIRR_X. H_IPOLL returns information about the priority and pending interrupts for a virtual cpu, without changing any state. H_XIRR_X is like H_XIRR in that it reads and acknowledges the highest-priority pending interrupt, but it also returns the timestamp (timebase register value) from when the interrupt was first received by the hypervisor. Currently we just return the current time, since we don't do any software queueing of virtual interrupts inside the XICS emulation code. These hcalls are not currently used by Linux guests, but may be in future. Signed-off-by: Paul Mackerras Acked-by: Scott Wood Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/hvcall.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index cf4df8e2139a..0c7f2bfcf134 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -264,6 +264,7 @@ #define H_GET_MPP 0x2D4 #define H_HOME_NODE_ASSOCIATIVITY 0x2EC #define H_BEST_ENERGY 0x2F4 +#define H_XIRR_X 0x2FC #define H_RANDOM 0x300 #define H_COP 0x304 #define H_GET_MPP_X 0x314 -- cgit v1.2.3 From 82a9f16adc12f51c3f8ea59a7c3c120241aff836 Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Thu, 16 May 2013 20:27:31 +0000 Subject: powerpc/hw_breakpoints: Add DABRX cpu feature to fix 32-bit regression When introducing support for DABRX in 4474ef0, we broke older 32-bit CPUs that don't have that register. Some CPUs have a DABR but not DABRX. Configuration are: - No 32bit CPUs have DABRX but some have DABR. - POWER4+ and below have the DABR but no DABRX. - 970 and POWER5 and above have DABR and DABRX. - POWER8 has DAWR, hence no DABRX. This introduces CPU_FTR_DABRX and sets it on appropriate CPUs. We use the top 64 bits for CPU FTR bits since only 64 bit CPUs have this. Processors that don't have the DABRX will still work as they will fall back to software filtering these breakpoints via perf_exclude_event(). Signed-off-by: Michael Neuling Reported-by: "Gorelik, Jacob (335F)" cc: stable@vger.kernel.org (v3.9 only) Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/cputable.h | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 26807e5aff51..6f3887d884d2 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -176,6 +176,7 @@ extern const char *powerpc_base_platform; #define CPU_FTR_CFAR LONG_ASM_CONST(0x0100000000000000) #define CPU_FTR_HAS_PPR LONG_ASM_CONST(0x0200000000000000) #define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000) +#define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000) #ifndef __ASSEMBLY__ @@ -394,19 +395,20 @@ extern const char *powerpc_base_platform; CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_201 | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \ CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS | \ - CPU_FTR_HVMODE) + CPU_FTR_HVMODE | CPU_FTR_DABRX) #define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | CPU_FTR_PURR | \ - CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB) + CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_DABRX) #define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | \ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \ - CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR) + CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR | \ + CPU_FTR_DABRX) #define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\ CPU_FTR_MMCRA | CPU_FTR_SMT | \ @@ -415,7 +417,7 @@ extern const char *powerpc_base_platform; CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | \ - CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR) + CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR | CPU_FTR_DABRX) #define CPU_FTRS_POWER8 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\ CPU_FTR_MMCRA | CPU_FTR_SMT | \ @@ -430,14 +432,15 @@ extern const char *powerpc_base_platform; CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_PAUSE_ZERO | CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ | \ - CPU_FTR_UNALIGNED_LD_STD) + CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_DABRX) #define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | \ - CPU_FTR_PURR | CPU_FTR_REAL_LE) + CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_DABRX) #define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2) #define CPU_FTRS_A2 (CPU_FTR_USE_TB | CPU_FTR_SMT | CPU_FTR_DBELL | \ - CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN | CPU_FTR_ICSWX) + CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN | \ + CPU_FTR_ICSWX | CPU_FTR_DABRX ) #ifdef __powerpc64__ #ifdef CONFIG_PPC_BOOK3E -- cgit v1.2.3 From 4edd1ae91baa63e120b414647c79a7aa5ca50ae7 Mon Sep 17 00:00:00 2001 From: Mihai Caraman Date: Thu, 6 Jun 2013 19:16:29 -0500 Subject: kvm/ppc/booke64: Fix AltiVec interrupt numbers and build breakage Interrupt numbers defined for Book3E follows IVORs definition. Align BOOKE_INTERRUPT_ALTIVEC_UNAVAIL and BOOKE_INTERRUPT_ALTIVEC_ASSIST to this rule which also fixes the build breakage. IVORs 32 and 33 are shared so reflect this in the interrupts naming. This fixes a build break for 64-bit booke KVM. Signed-off-by: Mihai Caraman Signed-off-by: Scott Wood Signed-off-by: Gleb Natapov --- arch/powerpc/include/asm/kvm_asm.h | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h index b9dd382cb349..851bac7afa4b 100644 --- a/arch/powerpc/include/asm/kvm_asm.h +++ b/arch/powerpc/include/asm/kvm_asm.h @@ -54,8 +54,16 @@ #define BOOKE_INTERRUPT_DEBUG 15 /* E500 */ -#define BOOKE_INTERRUPT_SPE_UNAVAIL 32 -#define BOOKE_INTERRUPT_SPE_FP_DATA 33 +#define BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL 32 +#define BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST 33 +/* + * TODO: Unify 32-bit and 64-bit kernel exception handlers to use same defines + */ +#define BOOKE_INTERRUPT_SPE_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL +#define BOOKE_INTERRUPT_SPE_FP_DATA BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST +#define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL +#define BOOKE_INTERRUPT_ALTIVEC_ASSIST \ + BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST #define BOOKE_INTERRUPT_SPE_FP_ROUND 34 #define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35 #define BOOKE_INTERRUPT_DOORBELL 36 @@ -67,10 +75,6 @@ #define BOOKE_INTERRUPT_HV_SYSCALL 40 #define BOOKE_INTERRUPT_HV_PRIV 41 -/* altivec */ -#define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL 42 -#define BOOKE_INTERRUPT_ALTIVEC_ASSIST 43 - /* book3s */ #define BOOK3S_INTERRUPT_SYSTEM_RESET 0x100 -- cgit v1.2.3 From 0e37739b1c96d65e6433998454985de994383019 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 13 Jun 2013 21:04:56 +1000 Subject: powerpc: Fix stack overflow crash in resume_kernel when ftracing It's possible for us to crash when running with ftrace enabled, eg: Bad kernel stack pointer bffffd12 at c00000000000a454 cpu 0x3: Vector: 300 (Data Access) at [c00000000ffe3d40] pc: c00000000000a454: resume_kernel+0x34/0x60 lr: c00000000000335c: performance_monitor_common+0x15c/0x180 sp: bffffd12 msr: 8000000000001032 dar: bffffd12 dsisr: 42000000 If we look at current's stack (paca->__current->stack) we see it is equal to c0000002ecab0000. Our stack is 16K, and comparing to paca->kstack (c0000002ecab3e30) we can see that we have overflowed our kernel stack. This leads to us writing over our struct thread_info, and in this case we have corrupted thread_info->flags and set _TIF_EMULATE_STACK_STORE. Dumping the stack we see: 3:mon> t c0000002ecab0000 [c0000002ecab0000] c00000000002131c .performance_monitor_exception+0x5c/0x70 [c0000002ecab0080] c00000000000335c performance_monitor_common+0x15c/0x180 --- Exception: f01 (Performance Monitor) at c0000000000fb2ec .trace_hardirqs_off+0x1c/0x30 [c0000002ecab0370] c00000000016fdb0 .trace_graph_entry+0xb0/0x280 (unreliable) [c0000002ecab0410] c00000000003d038 .prepare_ftrace_return+0x98/0x130 [c0000002ecab04b0] c00000000000a920 .ftrace_graph_caller+0x14/0x28 [c0000002ecab0520] c0000000000d6b58 .idle_cpu+0x18/0x90 [c0000002ecab05a0] c00000000000a934 .return_to_handler+0x0/0x34 [c0000002ecab0620] c00000000001e660 .timer_interrupt+0x160/0x300 [c0000002ecab06d0] c0000000000025dc decrementer_common+0x15c/0x180 --- Exception: 901 (Decrementer) at c0000000000104d4 .arch_local_irq_restore+0x74/0xa0 [c0000002ecab09c0] c0000000000fe044 .trace_hardirqs_on+0x14/0x30 (unreliable) [c0000002ecab0fb0] c00000000016fe3c .trace_graph_entry+0x13c/0x280 [c0000002ecab1050] c00000000003d038 .prepare_ftrace_return+0x98/0x130 [c0000002ecab10f0] c00000000000a920 .ftrace_graph_caller+0x14/0x28 [c0000002ecab1160] c0000000000161f0 .__ppc64_runlatch_on+0x10/0x40 [c0000002ecab11d0] c00000000000a934 .return_to_handler+0x0/0x34 --- Exception: 901 (Decrementer) at c0000000000104d4 .arch_local_irq_restore+0x74/0xa0 ... and so on __ppc64_runlatch_on() is called from RUNLATCH_ON in the exception entry path. At that point the irq state is not consistent, ie. interrupts are hard disabled (by the exception entry), but the paca soft-enabled flag may be out of sync. This leads to the local_irq_restore() in trace_graph_entry() actually enabling interrupts, which we do not want. Because we have not yet reprogrammed the decrementer we immediately take another decrementer exception, and recurse. The fix is twofold. Firstly make sure we call DISABLE_INTS before calling RUNLATCH_ON. The badly named DISABLE_INTS actually reconciles the irq state in the paca with the hardware, making it safe again to call local_irq_save/restore(). Although that should be sufficient to fix the bug, we also mark the runlatch routines as notrace. They are called very early in the exception entry and we are asking for trouble tracing them. They are also fairly uninteresting and tracing them just adds unnecessary overhead. [ This regression was introduced by fe1952fc0afb9a2e4c79f103c08aef5d13db1873 "powerpc: Rework runlatch code" by myself --BenH ] CC: [v3.4+] Signed-off-by: Michael Ellerman Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/exception-64s.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 8e5fae8beaf6..46793b58a761 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -513,7 +513,7 @@ label##_common: \ */ #define STD_EXCEPTION_COMMON_ASYNC(trap, label, hdlr) \ EXCEPTION_COMMON(trap, label, hdlr, ret_from_except_lite, \ - FINISH_NAP;RUNLATCH_ON;DISABLE_INTS) + FINISH_NAP;DISABLE_INTS;RUNLATCH_ON) /* * When the idle code in power4_idle puts the CPU into NAP mode, -- cgit v1.2.3