summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-04-19 07:26:21 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2010-04-19 07:26:21 -0700
commiteb3e5cce2b39a266a1a167fa4290939db20ef5d6 (patch)
treec4a0e4f4528de93d0c35c5a000e3cb7546dfce32
parent13bd8e4673d527a9e48f41956b11d391e7c2cfe0 (diff)
parentb1cdbb5f8342d99b732c5535ee7d2de8e7b2cc2e (diff)
Merge master.kernel.org:/home/rmk/linux-2.6-arm
* master.kernel.org:/home/rmk/linux-2.6-arm: ARM: 5974/1: arm/mach-at91 Makefile: remove two blanks. ARM: 6052/1: kdump: make kexec work in interrupt context ARM: 6051/1: VFP: preserve the HW context when calling signal handlers ARM: 6050/1: VFP: fix the SMP versions of vfp_{sync,flush}_hwstate ARM: 6007/1: fix highmem with VIPT cache and DMA ARM: 5975/1: AT91 slow-clock suspend: don't wait when turning PLLs off
-rw-r--r--arch/arm/include/asm/highmem.h15
-rw-r--r--arch/arm/include/asm/kmap_types.h1
-rw-r--r--arch/arm/include/asm/ucontext.h23
-rw-r--r--arch/arm/include/asm/user.h12
-rw-r--r--arch/arm/kernel/signal.c93
-rw-r--r--arch/arm/mach-at91/Makefile4
-rw-r--r--arch/arm/mach-at91/pm_slowclock.S4
-rw-r--r--arch/arm/mm/copypage-v6.c9
-rw-r--r--arch/arm/mm/dma-mapping.c5
-rw-r--r--arch/arm/mm/flush.c25
-rw-r--r--arch/arm/mm/highmem.c87
-rw-r--r--arch/arm/mm/mmu.c10
-rw-r--r--arch/arm/vfp/vfpmodule.c31
13 files changed, 251 insertions, 68 deletions
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h
index 7f36d00600b4..feb988a7ec37 100644
--- a/arch/arm/include/asm/highmem.h
+++ b/arch/arm/include/asm/highmem.h
@@ -11,7 +11,11 @@
#define kmap_prot PAGE_KERNEL
-#define flush_cache_kmaps() flush_cache_all()
+#define flush_cache_kmaps() \
+ do { \
+ if (cache_is_vivt()) \
+ flush_cache_all(); \
+ } while (0)
extern pte_t *pkmap_page_table;
@@ -21,11 +25,20 @@ extern void *kmap_high(struct page *page);
extern void *kmap_high_get(struct page *page);
extern void kunmap_high(struct page *page);
+extern void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte);
+extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte);
+
+/*
+ * The following functions are already defined by <linux/highmem.h>
+ * when CONFIG_HIGHMEM is not set.
+ */
+#ifdef CONFIG_HIGHMEM
extern void *kmap(struct page *page);
extern void kunmap(struct page *page);
extern void *kmap_atomic(struct page *page, enum km_type type);
extern void kunmap_atomic(void *kvaddr, enum km_type type);
extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
extern struct page *kmap_atomic_to_page(const void *ptr);
+#endif
#endif
diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
index c019949a5189..c4b2ea3fbe42 100644
--- a/arch/arm/include/asm/kmap_types.h
+++ b/arch/arm/include/asm/kmap_types.h
@@ -18,6 +18,7 @@ enum km_type {
KM_IRQ1,
KM_SOFTIRQ0,
KM_SOFTIRQ1,
+ KM_L1_CACHE,
KM_L2_CACHE,
KM_TYPE_NR
};
diff --git a/arch/arm/include/asm/ucontext.h b/arch/arm/include/asm/ucontext.h
index bf65e9f4525d..47f023aa8495 100644
--- a/arch/arm/include/asm/ucontext.h
+++ b/arch/arm/include/asm/ucontext.h
@@ -59,23 +59,22 @@ struct iwmmxt_sigframe {
#endif /* CONFIG_IWMMXT */
#ifdef CONFIG_VFP
-#if __LINUX_ARM_ARCH__ < 6
-/* For ARM pre-v6, we use fstmiax and fldmiax. This adds one extra
- * word after the registers, and a word of padding at the end for
- * alignment. */
#define VFP_MAGIC 0x56465001
-#define VFP_STORAGE_SIZE 152
-#else
-#define VFP_MAGIC 0x56465002
-#define VFP_STORAGE_SIZE 144
-#endif
struct vfp_sigframe
{
unsigned long magic;
unsigned long size;
- union vfp_state storage;
-};
+ struct user_vfp ufp;
+ struct user_vfp_exc ufp_exc;
+} __attribute__((__aligned__(8)));
+
+/*
+ * 8 byte for magic and size, 264 byte for ufp, 12 bytes for ufp_exc,
+ * 4 bytes padding.
+ */
+#define VFP_STORAGE_SIZE sizeof(struct vfp_sigframe)
+
#endif /* CONFIG_VFP */
/*
@@ -91,7 +90,7 @@ struct aux_sigframe {
#ifdef CONFIG_IWMMXT
struct iwmmxt_sigframe iwmmxt;
#endif
-#if 0 && defined CONFIG_VFP /* Not yet saved. */
+#ifdef CONFIG_VFP
struct vfp_sigframe vfp;
#endif
/* Something that isn't a valid magic number for any coprocessor. */
diff --git a/arch/arm/include/asm/user.h b/arch/arm/include/asm/user.h
index df95e050f9dd..05ac4b06876a 100644
--- a/arch/arm/include/asm/user.h
+++ b/arch/arm/include/asm/user.h
@@ -83,11 +83,21 @@ struct user{
/*
* User specific VFP registers. If only VFPv2 is present, registers 16 to 31
- * are ignored by the ptrace system call.
+ * are ignored by the ptrace system call and the signal handler.
*/
struct user_vfp {
unsigned long long fpregs[32];
unsigned long fpscr;
};
+/*
+ * VFP exception registers exposed to user space during signal delivery.
+ * Fields not relavant to the current VFP architecture are ignored.
+ */
+struct user_vfp_exc {
+ unsigned long fpexc;
+ unsigned long fpinst;
+ unsigned long fpinst2;
+};
+
#endif /* _ARM_USER_H */
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index e7714f367eb8..907d5a620bca 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -18,6 +18,7 @@
#include <asm/cacheflush.h>
#include <asm/ucontext.h>
#include <asm/unistd.h>
+#include <asm/vfp.h>
#include "ptrace.h"
#include "signal.h"
@@ -175,6 +176,90 @@ static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame)
#endif
+#ifdef CONFIG_VFP
+
+static int preserve_vfp_context(struct vfp_sigframe __user *frame)
+{
+ struct thread_info *thread = current_thread_info();
+ struct vfp_hard_struct *h = &thread->vfpstate.hard;
+ const unsigned long magic = VFP_MAGIC;
+ const unsigned long size = VFP_STORAGE_SIZE;
+ int err = 0;
+
+ vfp_sync_hwstate(thread);
+ __put_user_error(magic, &frame->magic, err);
+ __put_user_error(size, &frame->size, err);
+
+ /*
+ * Copy the floating point registers. There can be unused
+ * registers see asm/hwcap.h for details.
+ */
+ err |= __copy_to_user(&frame->ufp.fpregs, &h->fpregs,
+ sizeof(h->fpregs));
+ /*
+ * Copy the status and control register.
+ */
+ __put_user_error(h->fpscr, &frame->ufp.fpscr, err);
+
+ /*
+ * Copy the exception registers.
+ */
+ __put_user_error(h->fpexc, &frame->ufp_exc.fpexc, err);
+ __put_user_error(h->fpinst, &frame->ufp_exc.fpinst, err);
+ __put_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err);
+
+ return err ? -EFAULT : 0;
+}
+
+static int restore_vfp_context(struct vfp_sigframe __user *frame)
+{
+ struct thread_info *thread = current_thread_info();
+ struct vfp_hard_struct *h = &thread->vfpstate.hard;
+ unsigned long magic;
+ unsigned long size;
+ unsigned long fpexc;
+ int err = 0;
+
+ __get_user_error(magic, &frame->magic, err);
+ __get_user_error(size, &frame->size, err);
+
+ if (err)
+ return -EFAULT;
+ if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
+ return -EINVAL;
+
+ /*
+ * Copy the floating point registers. There can be unused
+ * registers see asm/hwcap.h for details.
+ */
+ err |= __copy_from_user(&h->fpregs, &frame->ufp.fpregs,
+ sizeof(h->fpregs));
+ /*
+ * Copy the status and control register.
+ */
+ __get_user_error(h->fpscr, &frame->ufp.fpscr, err);
+
+ /*
+ * Sanitise and restore the exception registers.
+ */
+ __get_user_error(fpexc, &frame->ufp_exc.fpexc, err);
+ /* Ensure the VFP is enabled. */
+ fpexc |= FPEXC_EN;
+ /* Ensure FPINST2 is invalid and the exception flag is cleared. */
+ fpexc &= ~(FPEXC_EX | FPEXC_FP2V);
+ h->fpexc = fpexc;
+
+ __get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err);
+ __get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err);
+
+ if (!err)
+ vfp_flush_hwstate(thread);
+
+ return err ? -EFAULT : 0;
+}
+
+#endif
+
/*
* Do a signal return; undo the signal stack. These are aligned to 64-bit.
*/
@@ -233,8 +318,8 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
err |= restore_iwmmxt_context(&aux->iwmmxt);
#endif
#ifdef CONFIG_VFP
-// if (err == 0)
-// err |= vfp_restore_state(&sf->aux.vfp);
+ if (err == 0)
+ err |= restore_vfp_context(&aux->vfp);
#endif
return err;
@@ -348,8 +433,8 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
err |= preserve_iwmmxt_context(&aux->iwmmxt);
#endif
#ifdef CONFIG_VFP
-// if (err == 0)
-// err |= vfp_save_state(&sf->aux.vfp);
+ if (err == 0)
+ err |= preserve_vfp_context(&aux->vfp);
#endif
__put_user_error(0, &aux->end_magic, err);
diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile
index 027dd570dcc3..d4004557532a 100644
--- a/arch/arm/mach-at91/Makefile
+++ b/arch/arm/mach-at91/Makefile
@@ -16,8 +16,8 @@ obj-$(CONFIG_ARCH_AT91SAM9261) += at91sam9261.o at91sam926x_time.o at91sam9261_d
obj-$(CONFIG_ARCH_AT91SAM9G10) += at91sam9261.o at91sam926x_time.o at91sam9261_devices.o sam9_smc.o
obj-$(CONFIG_ARCH_AT91SAM9263) += at91sam9263.o at91sam926x_time.o at91sam9263_devices.o sam9_smc.o
obj-$(CONFIG_ARCH_AT91SAM9RL) += at91sam9rl.o at91sam926x_time.o at91sam9rl_devices.o sam9_smc.o
-obj-$(CONFIG_ARCH_AT91SAM9G20) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o sam9_smc.o
- obj-$(CONFIG_ARCH_AT91SAM9G45) += at91sam9g45.o at91sam926x_time.o at91sam9g45_devices.o sam9_smc.o
+obj-$(CONFIG_ARCH_AT91SAM9G20) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o sam9_smc.o
+obj-$(CONFIG_ARCH_AT91SAM9G45) += at91sam9g45.o at91sam926x_time.o at91sam9g45_devices.o sam9_smc.o
obj-$(CONFIG_ARCH_AT91CAP9) += at91cap9.o at91sam926x_time.o at91cap9_devices.o sam9_smc.o
obj-$(CONFIG_ARCH_AT572D940HF) += at572d940hf.o at91sam926x_time.o at572d940hf_devices.o sam9_smc.o
obj-$(CONFIG_ARCH_AT91X40) += at91x40.o at91x40_time.o
diff --git a/arch/arm/mach-at91/pm_slowclock.S b/arch/arm/mach-at91/pm_slowclock.S
index 9fcbd6ca0090..9c5b48e68a71 100644
--- a/arch/arm/mach-at91/pm_slowclock.S
+++ b/arch/arm/mach-at91/pm_slowclock.S
@@ -175,8 +175,6 @@ ENTRY(at91_slow_clock)
orr r3, r3, #(1 << 29) /* bit 29 always set */
str r3, [r1, #(AT91_CKGR_PLLAR - AT91_PMC)]
- wait_pllalock
-
/* Save PLLB setting and disable it */
ldr r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)]
str r3, .saved_pllbr
@@ -184,8 +182,6 @@ ENTRY(at91_slow_clock)
mov r3, #AT91_PMC_PLLCOUNT
str r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)]
- wait_pllblock
-
/* Turn off the main oscillator */
ldr r3, [r1, #(AT91_CKGR_MOR - AT91_PMC)]
bic r3, r3, #AT91_PMC_MOSCEN
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 8bca4dea6dfa..f55fa1044f72 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -41,14 +41,7 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to,
kfrom = kmap_atomic(from, KM_USER0);
kto = kmap_atomic(to, KM_USER1);
copy_page(kto, kfrom);
-#ifdef CONFIG_HIGHMEM
- /*
- * kmap_atomic() doesn't set the page virtual address, and
- * kunmap_atomic() takes care of cache flushing already.
- */
- if (page_address(to) != NULL)
-#endif
- __cpuc_flush_dcache_area(kto, PAGE_SIZE);
+ __cpuc_flush_dcache_area(kto, PAGE_SIZE);
kunmap_atomic(kto, KM_USER1);
kunmap_atomic(kfrom, KM_USER0);
}
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 1351edc0b26f..13fa536d82e6 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -464,6 +464,11 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
vaddr += offset;
op(vaddr, len, dir);
kunmap_high(page);
+ } else if (cache_is_vipt()) {
+ pte_t saved_pte;
+ vaddr = kmap_high_l1_vipt(page, &saved_pte);
+ op(vaddr + offset, len, dir);
+ kunmap_high_l1_vipt(page, saved_pte);
}
} else {
vaddr = page_address(page) + offset;
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index e34f095e2090..c6844cb9b508 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -13,6 +13,7 @@
#include <asm/cacheflush.h>
#include <asm/cachetype.h>
+#include <asm/highmem.h>
#include <asm/smp_plat.h>
#include <asm/system.h>
#include <asm/tlbflush.h>
@@ -152,21 +153,25 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
void __flush_dcache_page(struct address_space *mapping, struct page *page)
{
- void *addr = page_address(page);
-
/*
* Writeback any data associated with the kernel mapping of this
* page. This ensures that data in the physical page is mutually
* coherent with the kernels mapping.
*/
-#ifdef CONFIG_HIGHMEM
- /*
- * kmap_atomic() doesn't set the page virtual address, and
- * kunmap_atomic() takes care of cache flushing already.
- */
- if (addr)
-#endif
- __cpuc_flush_dcache_area(addr, PAGE_SIZE);
+ if (!PageHighMem(page)) {
+ __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
+ } else {
+ void *addr = kmap_high_get(page);
+ if (addr) {
+ __cpuc_flush_dcache_area(addr, PAGE_SIZE);
+ kunmap_high(page);
+ } else if (cache_is_vipt()) {
+ pte_t saved_pte;
+ addr = kmap_high_l1_vipt(page, &saved_pte);
+ __cpuc_flush_dcache_area(addr, PAGE_SIZE);
+ kunmap_high_l1_vipt(page, saved_pte);
+ }
+ }
/*
* If this is a page cache page, and we have an aliasing VIPT cache,
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 2be1ec7c1b41..77b030f5ec09 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -79,7 +79,8 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
unsigned int idx = type + KM_TYPE_NR * smp_processor_id();
if (kvaddr >= (void *)FIXADDR_START) {
- __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
+ if (cache_is_vivt())
+ __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
@@ -124,3 +125,87 @@ struct page *kmap_atomic_to_page(const void *ptr)
pte = TOP_PTE(vaddr);
return pte_page(*pte);
}
+
+#ifdef CONFIG_CPU_CACHE_VIPT
+
+#include <linux/percpu.h>
+
+/*
+ * The VIVT cache of a highmem page is always flushed before the page
+ * is unmapped. Hence unmapped highmem pages need no cache maintenance
+ * in that case.
+ *
+ * However unmapped pages may still be cached with a VIPT cache, and
+ * it is not possible to perform cache maintenance on them using physical
+ * addresses unfortunately. So we have no choice but to set up a temporary
+ * virtual mapping for that purpose.
+ *
+ * Yet this VIPT cache maintenance may be triggered from DMA support
+ * functions which are possibly called from interrupt context. As we don't
+ * want to keep interrupt disabled all the time when such maintenance is
+ * taking place, we therefore allow for some reentrancy by preserving and
+ * restoring the previous fixmap entry before the interrupted context is
+ * resumed. If the reentrancy depth is 0 then there is no need to restore
+ * the previous fixmap, and leaving the current one in place allow it to
+ * be reused the next time without a TLB flush (common with DMA).
+ */
+
+static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth);
+
+void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte)
+{
+ unsigned int idx, cpu = smp_processor_id();
+ int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
+ unsigned long vaddr, flags;
+ pte_t pte, *ptep;
+
+ idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+ ptep = TOP_PTE(vaddr);
+ pte = mk_pte(page, kmap_prot);
+
+ if (!in_interrupt())
+ preempt_disable();
+
+ raw_local_irq_save(flags);
+ (*depth)++;
+ if (pte_val(*ptep) == pte_val(pte)) {
+ *saved_pte = pte;
+ } else {
+ *saved_pte = *ptep;
+ set_pte_ext(ptep, pte, 0);
+ local_flush_tlb_kernel_page(vaddr);
+ }
+ raw_local_irq_restore(flags);
+
+ return (void *)vaddr;
+}
+
+void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte)
+{
+ unsigned int idx, cpu = smp_processor_id();
+ int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
+ unsigned long vaddr, flags;
+ pte_t pte, *ptep;
+
+ idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+ ptep = TOP_PTE(vaddr);
+ pte = mk_pte(page, kmap_prot);
+
+ BUG_ON(pte_val(*ptep) != pte_val(pte));
+ BUG_ON(*depth <= 0);
+
+ raw_local_irq_save(flags);
+ (*depth)--;
+ if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) {
+ set_pte_ext(ptep, saved_pte, 0);
+ local_flush_tlb_kernel_page(vaddr);
+ }
+ raw_local_irq_restore(flags);
+
+ if (!in_interrupt())
+ preempt_enable();
+}
+
+#endif /* CONFIG_CPU_CACHE_VIPT */
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 4223d086aa17..241c24a1c18f 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1054,10 +1054,12 @@ void setup_mm_for_reboot(char mode)
pgd_t *pgd;
int i;
- if (current->mm && current->mm->pgd)
- pgd = current->mm->pgd;
- else
- pgd = init_mm.pgd;
+ /*
+ * We need to access to user-mode page tables here. For kernel threads
+ * we don't have any user-mode mappings so we use the context that we
+ * "borrowed".
+ */
+ pgd = current->active_mm->pgd;
base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT;
if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index a420cb949328..315a540c7ce5 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -428,26 +428,6 @@ static void vfp_pm_init(void)
static inline void vfp_pm_init(void) { }
#endif /* CONFIG_PM */
-/*
- * Synchronise the hardware VFP state of a thread other than current with the
- * saved one. This function is used by the ptrace mechanism.
- */
-#ifdef CONFIG_SMP
-void vfp_sync_hwstate(struct thread_info *thread)
-{
-}
-
-void vfp_flush_hwstate(struct thread_info *thread)
-{
- /*
- * On SMP systems, the VFP state is automatically saved at every
- * context switch. We mark the thread VFP state as belonging to a
- * non-existent CPU so that the saved one will be reloaded when
- * needed.
- */
- thread->vfpstate.hard.cpu = NR_CPUS;
-}
-#else
void vfp_sync_hwstate(struct thread_info *thread)
{
unsigned int cpu = get_cpu();
@@ -490,9 +470,18 @@ void vfp_flush_hwstate(struct thread_info *thread)
last_VFP_context[cpu] = NULL;
}
+#ifdef CONFIG_SMP
+ /*
+ * For SMP we still have to take care of the case where the thread
+ * migrates to another CPU and then back to the original CPU on which
+ * the last VFP user is still the same thread. Mark the thread VFP
+ * state as belonging to a non-existent CPU so that the saved one will
+ * be reloaded in the above case.
+ */
+ thread->vfpstate.hard.cpu = NR_CPUS;
+#endif
put_cpu();
}
-#endif
#include <linux/smp.h>