summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMandar padmawar <mpadmawar@nvidia.com>2014-05-09 06:59:53 -0700
committerMandar padmawar <mpadmawar@nvidia.com>2014-05-09 06:59:53 -0700
commitc3fbf3b449b9e9c7e3b2aa5acf3e4c205787155d (patch)
tree4279b24b75bfbb72c2b7120d20d093477ac41222
parent315ef73b778ca43ce461a6ff75a3c65ac9d38c42 (diff)
parent2667b986de4255c1c274d7f81d05364857dc37f8 (diff)
Merge commit 'refs/changes/86/405286/2' of ssh://git-master:12001/linux-3.10 into promotion_build
-rw-r--r--Documentation/arm64/booting.txt16
-rw-r--r--Documentation/arm64/memory.txt27
-rw-r--r--Documentation/arm64/tagged-pointers.txt14
-rw-r--r--arch/Kconfig3
-rw-r--r--arch/alpha/Kconfig1
-rw-r--r--arch/alpha/include/asm/Kbuild2
-rw-r--r--arch/arc/Kconfig1
-rw-r--r--arch/arc/include/asm/Kbuild2
-rw-r--r--arch/arc/kernel/devtree.c6
-rw-r--r--arch/arc/mm/fault.c1
-rw-r--r--arch/arc/mm/init.c8
-rw-r--r--arch/arm/Kconfig3
-rw-r--r--arch/arm/include/asm/Kbuild2
-rw-r--r--arch/arm/include/asm/arch_timer.h23
-rw-r--r--arch/arm/include/asm/atomic.h24
-rw-r--r--arch/arm/include/asm/barrier.h15
-rw-r--r--arch/arm/include/asm/elf.h2
-rw-r--r--arch/arm/kernel/hw_breakpoint.c14
-rw-r--r--arch/arm/kernel/kprobes.c8
-rw-r--r--arch/arm/kernel/module.c2
-rw-r--r--arch/arm/kernel/perf_event_cpu.c2
-rw-r--r--arch/arm/kvm/arm.c10
-rw-r--r--arch/arm/mm/init.c14
-rw-r--r--arch/arm64/Kconfig20
-rw-r--r--arch/arm64/Makefile6
-rw-r--r--arch/arm64/boot/dts/foundation-v8.dts2
-rw-r--r--arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi6
-rw-r--r--arch/arm64/configs/defconfig23
-rw-r--r--arch/arm64/include/asm/Kbuild2
-rw-r--r--arch/arm64/include/asm/arch_timer.h65
-rw-r--r--arch/arm64/include/asm/assembler.h31
-rw-r--r--arch/arm64/include/asm/atomic.h67
-rw-r--r--arch/arm64/include/asm/barrier.h52
-rw-r--r--arch/arm64/include/asm/cmpxchg.h47
-rw-r--r--arch/arm64/include/asm/compat.h14
-rw-r--r--arch/arm64/include/asm/cputype.h18
-rw-r--r--arch/arm64/include/asm/debug-monitors.h28
-rw-r--r--arch/arm64/include/asm/dma-mapping.h3
-rw-r--r--arch/arm64/include/asm/elf.h21
-rw-r--r--arch/arm64/include/asm/futex.h11
-rw-r--r--arch/arm64/include/asm/hardirq.h2
-rw-r--r--arch/arm64/include/asm/hwcap.h13
-rw-r--r--arch/arm64/include/asm/insn.h108
-rw-r--r--arch/arm64/include/asm/io.h3
-rw-r--r--arch/arm64/include/asm/jump_label.h52
-rw-r--r--arch/arm64/include/asm/memory.h14
-rw-r--r--arch/arm64/include/asm/neon.h14
-rw-r--r--arch/arm64/include/asm/pgalloc.h9
-rw-r--r--arch/arm64/include/asm/pgtable-2level-hwdef.h4
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h4
-rw-r--r--arch/arm64/include/asm/pgtable.h82
-rw-r--r--arch/arm64/include/asm/processor.h5
-rw-r--r--arch/arm64/include/asm/ptrace.h1
-rw-r--r--arch/arm64/include/asm/spinlock.h18
-rw-r--r--arch/arm64/include/asm/spinlock_types.h5
-rw-r--r--arch/arm64/include/asm/uaccess.h25
-rw-r--r--arch/arm64/include/asm/virt.h3
-rw-r--r--arch/arm64/include/asm/word-at-a-time.h94
-rw-r--r--arch/arm64/include/uapi/asm/byteorder.h4
-rw-r--r--arch/arm64/include/uapi/asm/hwcap.h7
-rw-r--r--arch/arm64/kernel/Makefile3
-rw-r--r--arch/arm64/kernel/arm64ksyms.c6
-rw-r--r--arch/arm64/kernel/debug-monitors.c121
-rw-r--r--arch/arm64/kernel/entry.S26
-rw-r--r--arch/arm64/kernel/fpsimd.c64
-rw-r--r--arch/arm64/kernel/head.S79
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c62
-rw-r--r--arch/arm64/kernel/insn.c304
-rw-r--r--arch/arm64/kernel/jump_label.c58
-rw-r--r--arch/arm64/kernel/kuser32.S55
-rw-r--r--arch/arm64/kernel/module.c158
-rw-r--r--arch/arm64/kernel/perf_event.c119
-rw-r--r--arch/arm64/kernel/process.c7
-rw-r--r--arch/arm64/kernel/ptrace.c40
-rw-r--r--arch/arm64/kernel/setup.c89
-rw-r--r--arch/arm64/kernel/signal32.c39
-rw-r--r--arch/arm64/kernel/smp.c24
-rw-r--r--arch/arm64/kernel/smp_spin_table.c11
-rw-r--r--arch/arm64/kernel/stacktrace.c2
-rw-r--r--arch/arm64/kernel/suspend.c23
-rw-r--r--arch/arm64/kernel/sys32.S22
-rw-r--r--arch/arm64/kernel/vdso.c5
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S41
-rw-r--r--arch/arm64/lib/Makefile3
-rw-r--r--arch/arm64/lib/bitops.S3
-rw-r--r--arch/arm64/lib/strncpy_from_user.S50
-rw-r--r--arch/arm64/lib/strnlen_user.S47
-rw-r--r--arch/arm64/mm/cache.S2
-rw-r--r--arch/arm64/mm/dma-mapping.c25
-rw-r--r--arch/arm64/mm/fault.c2
-rw-r--r--arch/arm64/mm/init.c23
-rw-r--r--arch/arm64/mm/ioremap.c19
-rw-r--r--arch/arm64/mm/mmu.c29
-rw-r--r--arch/arm64/mm/pgd.c11
-rw-r--r--arch/arm64/mm/proc-macros.S3
-rw-r--r--arch/arm64/mm/proc.S6
-rw-r--r--arch/arm64/mm/tlb.S2
-rw-r--r--arch/avr32/include/asm/Kbuild3
-rw-r--r--arch/blackfin/Kconfig1
-rw-r--r--arch/blackfin/include/asm/Kbuild2
-rw-r--r--arch/c6x/include/asm/Kbuild3
-rw-r--r--arch/c6x/kernel/devicetree.c37
-rw-r--r--arch/cris/include/asm/Kbuild4
-rw-r--r--arch/frv/include/asm/Kbuild2
-rw-r--r--arch/h8300/include/asm/Kbuild5
-rw-r--r--arch/hexagon/Kconfig1
-rw-r--r--arch/hexagon/include/asm/Kbuild2
-rw-r--r--arch/ia64/Kconfig1
-rw-r--r--arch/ia64/include/asm/Kbuild3
-rw-r--r--arch/ia64/include/asm/barrier.h23
-rw-r--r--arch/ia64/kernel/signal.c2
-rw-r--r--arch/m32r/Kconfig1
-rw-r--r--arch/m32r/include/asm/Kbuild2
-rw-r--r--arch/m68k/include/asm/Kbuild2
-rw-r--r--arch/metag/Kconfig1
-rw-r--r--arch/metag/include/asm/Kbuild2
-rw-r--r--arch/metag/include/asm/barrier.h15
-rw-r--r--arch/metag/mm/init.c10
-rw-r--r--arch/microblaze/include/asm/Kbuild2
-rw-r--r--arch/microblaze/kernel/prom.c20
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/include/asm/Kbuild4
-rw-r--r--arch/mips/include/asm/barrier.h15
-rw-r--r--arch/mips/kernel/prom.c11
-rw-r--r--arch/mips/kernel/signal32.c2
-rw-r--r--arch/mn10300/Kconfig1
-rw-r--r--arch/mn10300/include/asm/Kbuild2
-rw-r--r--arch/openrisc/include/asm/Kbuild3
-rw-r--r--arch/openrisc/kernel/prom.c15
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/include/asm/Kbuild2
-rw-r--r--arch/parisc/kernel/module.c2
-rw-r--r--arch/parisc/kernel/signal32.c2
-rw-r--r--arch/parisc/kernel/signal32.h2
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/include/asm/Kbuild3
-rw-r--r--arch/powerpc/include/asm/barrier.h21
-rw-r--r--arch/powerpc/kernel/prom.c36
-rw-r--r--arch/powerpc/kernel/signal_32.c2
-rw-r--r--arch/powerpc/mm/fault.c3
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/include/asm/Kbuild2
-rw-r--r--arch/s390/include/asm/barrier.h15
-rw-r--r--arch/s390/kernel/compat_signal.c2
-rw-r--r--arch/s390/kernel/module.c2
-rw-r--r--arch/score/include/asm/Kbuild3
-rw-r--r--arch/sh/Kconfig1
-rw-r--r--arch/sh/include/asm/Kbuild2
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/include/asm/Kbuild2
-rw-r--r--arch/sparc/include/asm/barrier_64.h15
-rw-r--r--arch/sparc/kernel/module.c2
-rw-r--r--arch/sparc/kernel/signal32.c2
-rw-r--r--arch/tile/Kconfig1
-rw-r--r--arch/tile/include/asm/Kbuild2
-rw-r--r--arch/tile/kernel/compat_signal.c2
-rw-r--r--arch/um/include/asm/Kbuild2
-rw-r--r--arch/unicore32/include/asm/Kbuild2
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/ia32/ia32_signal.c2
-rw-r--r--arch/x86/include/asm/Kbuild1
-rw-r--r--arch/x86/include/asm/barrier.h43
-rw-r--r--arch/x86/kernel/devicetree.c10
-rw-r--r--arch/x86/kernel/module.c2
-rw-r--r--arch/xtensa/include/asm/Kbuild2
-rw-r--r--arch/xtensa/kernel/setup.c16
-rw-r--r--block/blk-mq.c1500
-rw-r--r--block/blk-softirq.c4
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--drivers/clocksource/arm_arch_timer.c48
-rw-r--r--drivers/of/fdt.c76
-rw-r--r--fs/binfmt_elf.c2
-rw-r--r--include/asm-generic/barrier.h15
-rw-r--r--include/asm-generic/preempt.h54
-rw-r--r--include/asm-generic/siginfo.h2
-rw-r--r--include/clocksource/arm_arch_timer.h14
-rw-r--r--include/linux/compat.h2
-rw-r--r--include/linux/compiler.h9
-rw-r--r--include/linux/irqdesc.h8
-rw-r--r--include/linux/mm.h3
-rw-r--r--include/linux/of_fdt.h13
-rw-r--r--include/linux/percpu-defs.h5
-rw-r--r--include/linux/preempt.h9
-rw-r--r--include/linux/smp.h4
-rw-r--r--include/uapi/linux/elf-em.h2
-rw-r--r--kernel/Kconfig.hz2
-rw-r--r--kernel/signal.c2
-rw-r--r--kernel/smp.c2
188 files changed, 3868 insertions, 985 deletions
diff --git a/Documentation/arm64/booting.txt b/Documentation/arm64/booting.txt
index 9c4d388daddc..5273c4d60e65 100644
--- a/Documentation/arm64/booting.txt
+++ b/Documentation/arm64/booting.txt
@@ -68,13 +68,23 @@ Image target is available instead.
Requirement: MANDATORY
-The decompressed kernel image contains a 32-byte header as follows:
+The decompressed kernel image contains a 64-byte header as follows:
- u32 magic = 0x14000008; /* branch to stext, little-endian */
- u32 res0 = 0; /* reserved */
+ u32 code0; /* Executable code */
+ u32 code1; /* Executable code */
u64 text_offset; /* Image load offset */
+ u64 res0 = 0; /* reserved */
u64 res1 = 0; /* reserved */
u64 res2 = 0; /* reserved */
+ u64 res3 = 0; /* reserved */
+ u64 res4 = 0; /* reserved */
+ u32 magic = 0x644d5241; /* Magic number, little endian, "ARM\x64" */
+ u32 res5 = 0; /* reserved */
+
+
+Header notes:
+
+- code0/code1 are responsible for branching to stext.
The image must be placed at the specified offset (currently 0x80000)
from the start of the system RAM and called there. The start of the
diff --git a/Documentation/arm64/memory.txt b/Documentation/arm64/memory.txt
index 5f583af0a6e1..a776d7a7c3cc 100644
--- a/Documentation/arm64/memory.txt
+++ b/Documentation/arm64/memory.txt
@@ -21,7 +21,7 @@ The swapper_pgd_dir address is written to TTBR1 and never written to
TTBR0.
-AArch64 Linux memory layout:
+AArch64 Linux memory layout with 4KB pages:
Start End Size Use
-----------------------------------------------------------------------
@@ -46,6 +46,31 @@ ffffffbffc000000 ffffffbfffffffff 64MB modules
ffffffc000000000 ffffffffffffffff 256GB kernel logical memory map
+AArch64 Linux memory layout with 64KB pages:
+
+Start End Size Use
+-----------------------------------------------------------------------
+0000000000000000 000003ffffffffff 4TB user
+
+fffffc0000000000 fffffdfbfffeffff ~2TB vmalloc
+
+fffffdfbffff0000 fffffdfbffffffff 64KB [guard page]
+
+fffffdfc00000000 fffffdfdffffffff 8GB vmemmap
+
+fffffdfe00000000 fffffdfffbbfffff ~8GB [guard, future vmmemap]
+
+fffffdfffbc00000 fffffdfffbdfffff 2MB earlyprintk device
+
+fffffdfffbe00000 fffffdfffbe0ffff 64KB PCI I/O space
+
+fffffdfffbe10000 fffffdfffbffffff ~2MB [guard]
+
+fffffdfffc000000 fffffdffffffffff 64MB modules
+
+fffffe0000000000 ffffffffffffffff 2TB kernel logical memory map
+
+
Translation table lookup with 4KB pages:
+--------+--------+--------+--------+--------+--------+--------+--------+
diff --git a/Documentation/arm64/tagged-pointers.txt b/Documentation/arm64/tagged-pointers.txt
index d9995f1f51b3..264e9841563a 100644
--- a/Documentation/arm64/tagged-pointers.txt
+++ b/Documentation/arm64/tagged-pointers.txt
@@ -18,17 +18,17 @@ this byte for application use, with the following caveats:
parameters containing user virtual addresses *must* have
their top byte cleared before trapping to the kernel.
- (2) Non-zero tags are not preserved when delivering signals.
- This means that signal handlers in applications making use
- of tags cannot rely on the tag information for user virtual
- addresses being maintained for fields inside siginfo_t.
- One exception to this rule is for signals raised in response
- to watchpoint debug exceptions, where the tag information
+ (2) Tags are not guaranteed to be preserved when delivering
+ signals. This means that signal handlers in applications
+ making use of tags cannot rely on the tag information for
+ user virtual addresses being maintained for fields inside
+ siginfo_t. One exception to this rule is for signals raised
+ in response to debug exceptions, where the tag information
will be preserved.
(3) Special care should be taken when using tagged pointers,
since it is likely that C compilers will not hazard two
- virtual addresses differing only in the upper byte.
+ addresses differing only in the upper bits.
The architecture prevents the use of a tagged PC, so the upper byte will
be set to a sign-extension of bit 55 on exception return.
diff --git a/arch/Kconfig b/arch/Kconfig
index 00e3702ec79b..6344a7ff7c45 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -207,9 +207,6 @@ config HAVE_DMA_ATTRS
config HAVE_DMA_CONTIGUOUS
bool
-config USE_GENERIC_SMP_HELPERS
- bool
-
config GENERIC_SMP_IDLE_THREAD
bool
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 837a1f2d8b96..2ad4dc3bad4c 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -522,7 +522,6 @@ config ARCH_MAY_HAVE_PC_FDC
config SMP
bool "Symmetric multi-processing support"
depends on ALPHA_SABLE || ALPHA_LYNX || ALPHA_RAWHIDE || ALPHA_DP264 || ALPHA_WILDFIRE || ALPHA_TITAN || ALPHA_GENERIC || ALPHA_SHARK || ALPHA_MARVEL
- select USE_GENERIC_SMP_HELPERS
---help---
This enables support for systems with more than one CPU. If you have
a system with only one CPU, like most personal computers, say N. If
diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild
index a6e85f448c1c..a73a8e208a4a 100644
--- a/arch/alpha/include/asm/Kbuild
+++ b/arch/alpha/include/asm/Kbuild
@@ -3,3 +3,5 @@ generic-y += clkdev.h
generic-y += exec.h
generic-y += trace_clock.h
+generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 5917099470ea..6c7b1b36d59a 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -119,7 +119,6 @@ config ARC_PLAT_NEEDS_CPU_TO_DMA
config SMP
bool "Symmetric Multi-Processing (Incomplete)"
default n
- select USE_GENERIC_SMP_HELPERS
help
This enables support for systems with more than one CPU. If you have
a system with only one CPU, like most personal computers, say N. If
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
index d8dd660898b9..93e6ca919620 100644
--- a/arch/arc/include/asm/Kbuild
+++ b/arch/arc/include/asm/Kbuild
@@ -46,3 +46,5 @@ generic-y += ucontext.h
generic-y += user.h
generic-y += vga.h
generic-y += xor.h
+generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/arc/kernel/devtree.c b/arch/arc/kernel/devtree.c
index bdee3a812052..2340af0e1d6f 100644
--- a/arch/arc/kernel/devtree.c
+++ b/arch/arc/kernel/devtree.c
@@ -18,12 +18,6 @@
#include <asm/clk.h>
#include <asm/mach_desc.h>
-/* called from unflatten_device_tree() to bootstrap devicetree itself */
-void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
-{
- return __va(memblock_alloc(size, align));
-}
-
/**
* setup_machine_fdt - Machine setup when an dtb was passed to the kernel
* @dt: virtual address pointer to dt blob
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index 7c10770782a1..028e11e69e68 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -59,6 +59,7 @@ void do_page_fault(struct pt_regs *regs, int write, unsigned long address,
struct mm_struct *mm = tsk->mm;
siginfo_t info;
int fault, ret;
+ int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
/*
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index 4a177365b2c4..ebaa9f7db126 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -155,11 +155,3 @@ void __init free_initrd_mem(unsigned long start, unsigned long end)
free_reserved_area(start, end, 0, "initrd");
}
#endif
-
-#ifdef CONFIG_OF_FLATTREE
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end)
-{
- pr_err("%s(%lx, %lx)\n", __func__, start, end);
-}
-#endif /* CONFIG_OF_FLATTREE */
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 61c6f0f6cd15..040eae1a4908 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1528,10 +1528,9 @@ config SMP
depends on CPU_V6K || CPU_V7
depends on GENERIC_CLOCKEVENTS
depends on HAVE_SMP
- depends on MMU
select HAVE_ARM_SCU if !ARCH_MSM_SCORPIONMP && \
(!ARCH_TEGRA || ARCH_TEGRA_HAS_ARM_SCU)
- select USE_GENERIC_SMP_HELPERS
+ depends on MMU || ARM_MPU
help
This enables support for systems with more than one CPU. If you have
a system with only one CPU, like most personal computers, say N. If
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index d3db39860b9c..4f69afdd5f7d 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -33,3 +33,5 @@ generic-y += timex.h
generic-y += trace_clock.h
generic-y += types.h
generic-y += unaligned.h
+generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
index accefe099182..c78c4cbd329e 100644
--- a/arch/arm/include/asm/arch_timer.h
+++ b/arch/arm/include/asm/arch_timer.h
@@ -17,7 +17,8 @@ int arch_timer_arch_init(void);
* nicely work out which register we want, and chuck away the rest of
* the code. At least it does so with a recent GCC (4.6.3).
*/
-static inline void arch_timer_reg_write(const int access, const int reg, u32 val)
+static __always_inline
+void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val)
{
if (access == ARCH_TIMER_PHYS_ACCESS) {
switch (reg) {
@@ -28,9 +29,7 @@ static inline void arch_timer_reg_write(const int access, const int reg, u32 val
asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val));
break;
}
- }
-
- if (access == ARCH_TIMER_VIRT_ACCESS) {
+ } else if (access == ARCH_TIMER_VIRT_ACCESS) {
switch (reg) {
case ARCH_TIMER_REG_CTRL:
asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val));
@@ -44,7 +43,8 @@ static inline void arch_timer_reg_write(const int access, const int reg, u32 val
isb();
}
-static inline u32 arch_timer_reg_read(const int access, const int reg)
+static __always_inline
+u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
{
u32 val = 0;
@@ -57,9 +57,7 @@ static inline u32 arch_timer_reg_read(const int access, const int reg)
asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val));
break;
}
- }
-
- if (access == ARCH_TIMER_VIRT_ACCESS) {
+ } else if (access == ARCH_TIMER_VIRT_ACCESS) {
switch (reg) {
case ARCH_TIMER_REG_CTRL:
asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val));
@@ -95,8 +93,13 @@ static inline void __cpuinit arch_counter_set_user_access(void)
asm volatile("mrc p15, 0, %0, c14, c1, 0" : "=r" (cntkctl));
- /* disable user access to everything */
- cntkctl &= ~((3 << 8) | (7 << 0));
+ /* Disable user access to both physical/virtual counters/timers */
+ /* Also disable virtual event stream */
+ cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
+ | ARCH_TIMER_USR_VT_ACCESS_EN
+ | ARCH_TIMER_VIRT_EVT_EN
+ | ARCH_TIMER_USR_VCT_ACCESS_EN
+ | ARCH_TIMER_USR_PCT_ACCESS_EN);
asm volatile("mcr p15, 0, %0, c14, c1, 0" : : "r" (cntkctl));
}
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index da1c77d39327..3b3ae49fa561 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -134,21 +134,6 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
return oldval;
}
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
-{
- unsigned long tmp, tmp2;
-
- __asm__ __volatile__("@ atomic_clear_mask\n"
-"1: ldrex %0, [%3]\n"
-" bic %0, %0, %4\n"
-" strex %1, %0, [%3]\n"
-" teq %1, #0\n"
-" bne 1b"
- : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
- : "r" (addr), "Ir" (mask)
- : "cc");
-}
-
#else /* ARM_ARCH_6 */
#ifdef CONFIG_SMP
@@ -197,15 +182,6 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return ret;
}
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
-{
- unsigned long flags;
-
- raw_local_irq_save(flags);
- *addr &= ~mask;
- raw_local_irq_restore(flags);
-}
-
#endif /* __LINUX_ARM_ARCH__ */
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
index 8dcd9c702d90..b00ef075bc2e 100644
--- a/arch/arm/include/asm/barrier.h
+++ b/arch/arm/include/asm/barrier.h
@@ -59,6 +59,21 @@
#define smp_wmb() dmb()
#endif
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+ ACCESS_ONCE(*p) = (v); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+ ___p1; \
+})
+
#define read_barrier_depends() do { } while(0)
#define smp_read_barrier_depends() do { } while(0)
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index a3264c900932..afb9cafd3786 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -19,8 +19,6 @@ typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct user_fp elf_fpregset_t;
-#define EM_ARM 40
-
#define EF_ARM_EABI_MASK 0xff000000
#define EF_ARM_EABI_UNKNOWN 0x00000000
#define EF_ARM_EABI_VER1 0x01000000
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 1fd749ee4a1b..57bb21de7da2 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -344,13 +344,13 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
/* Breakpoint */
ctrl_base = ARM_BASE_BCR;
val_base = ARM_BASE_BVR;
- slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
+ slots = this_cpu_ptr(bp_on_reg);
max_slots = core_num_brps;
} else {
/* Watchpoint */
ctrl_base = ARM_BASE_WCR;
val_base = ARM_BASE_WVR;
- slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
+ slots = this_cpu_ptr(wp_on_reg);
max_slots = core_num_wrps;
}
@@ -396,12 +396,12 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
/* Breakpoint */
base = ARM_BASE_BCR;
- slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
+ slots = this_cpu_ptr(bp_on_reg);
max_slots = core_num_brps;
} else {
/* Watchpoint */
base = ARM_BASE_WCR;
- slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
+ slots = this_cpu_ptr(wp_on_reg);
max_slots = core_num_wrps;
}
@@ -697,7 +697,7 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
struct arch_hw_breakpoint *info;
struct arch_hw_breakpoint_ctrl ctrl;
- slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
+ slots = this_cpu_ptr(wp_on_reg);
for (i = 0; i < core_num_wrps; ++i) {
rcu_read_lock();
@@ -768,7 +768,7 @@ static void watchpoint_single_step_handler(unsigned long pc)
struct perf_event *wp, **slots;
struct arch_hw_breakpoint *info;
- slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
+ slots = this_cpu_ptr(wp_on_reg);
for (i = 0; i < core_num_wrps; ++i) {
rcu_read_lock();
@@ -802,7 +802,7 @@ static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
struct arch_hw_breakpoint *info;
struct arch_hw_breakpoint_ctrl ctrl;
- slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
+ slots = this_cpu_ptr(bp_on_reg);
/* The exception entry code places the amended lr in the PC. */
addr = regs->ARM_pc;
diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c
index 170e9f34003f..a7b621ece23d 100644
--- a/arch/arm/kernel/kprobes.c
+++ b/arch/arm/kernel/kprobes.c
@@ -171,13 +171,13 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
- __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+ __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
kcb->kprobe_status = kcb->prev_kprobe.status;
}
static void __kprobes set_current_kprobe(struct kprobe *p)
{
- __get_cpu_var(current_kprobe) = p;
+ __this_cpu_write(current_kprobe, p);
}
static void __kprobes
@@ -421,10 +421,10 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
continue;
if (ri->rp && ri->rp->handler) {
- __get_cpu_var(current_kprobe) = &ri->rp->kp;
+ __this_cpu_write(current_kprobe, &ri->rp->kp);
get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
ri->rp->handler(ri, regs);
- __get_cpu_var(current_kprobe) = NULL;
+ __this_cpu_write(current_kprobe, NULL);
}
orig_ret_address = (unsigned long)ri->ret_addr;
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index b22fc22a4f9d..71f3c1ed35af 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -40,7 +40,7 @@
void *module_alloc(unsigned long size)
{
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
- GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
+ GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
__builtin_return_address(0));
}
#endif
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index 1f2740e3dbc0..6f1f13784bdd 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -68,7 +68,7 @@ EXPORT_SYMBOL_GPL(perf_num_counters);
static struct pmu_hw_events *cpu_pmu_get_cpu_events(void)
{
- return &__get_cpu_var(cpu_hw_events);
+ return this_cpu_ptr(&cpu_hw_events);
}
static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index ef1703b9587b..28bf2cf781a7 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -65,7 +65,7 @@ static bool vgic_present;
static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
{
BUG_ON(preemptible());
- __get_cpu_var(kvm_arm_running_vcpu) = vcpu;
+ __this_cpu_write(kvm_arm_running_vcpu, vcpu);
}
/**
@@ -75,7 +75,7 @@ static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
struct kvm_vcpu *kvm_arm_get_running_vcpu(void)
{
BUG_ON(preemptible());
- return __get_cpu_var(kvm_arm_running_vcpu);
+ return __this_cpu_read(kvm_arm_running_vcpu);
}
/**
@@ -809,9 +809,9 @@ static void cpu_init_hyp_mode(void *dummy)
/* Switch from the HYP stub to our own HYP init vector */
__hyp_set_vectors(kvm_get_idmap_vector());
- boot_pgd_ptr = (unsigned long long)kvm_mmu_get_boot_httbr();
- pgd_ptr = (unsigned long long)kvm_mmu_get_httbr();
- stack_page = __get_cpu_var(kvm_arm_hyp_stack_page);
+ boot_pgd_ptr = kvm_mmu_get_boot_httbr();
+ pgd_ptr = kvm_mmu_get_httbr();
+ stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
hyp_stack_ptr = stack_page + PAGE_SIZE;
vector_ptr = (unsigned long)__kvm_hyp_vector;
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index b0a82cca7697..78a39215b089 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -76,14 +76,6 @@ static int __init parse_tag_initrd2(const struct tag *tag)
__tagtable(ATAG_INITRD2, parse_tag_initrd2);
-#ifdef CONFIG_OF_FLATTREE
-void __init early_init_dt_setup_initrd_arch(unsigned long start, unsigned long end)
-{
- phys_initrd_start = start;
- phys_initrd_size = end - start;
-}
-#endif /* CONFIG_OF_FLATTREE */
-
/*
* This keeps memory configuration data used by a couple memory
* initialization functions, as well as show_mem() for the skipping
@@ -349,6 +341,12 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
memblock_reserve(__pa(_stext), ALIGN(_end - _stext, PMD_SIZE));
#endif
#ifdef CONFIG_BLK_DEV_INITRD
+ /* FDT scan will populate initrd_start */
+ if (initrd_start && !phys_initrd_size) {
+ phys_initrd_start = __virt_to_phys(initrd_start);
+ phys_initrd_size = initrd_end - initrd_start;
+ }
+ initrd_start = initrd_end = 0;
if (phys_initrd_size &&
!memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) {
pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 55bd4dcf4b2f..056a3f9fcb38 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -2,6 +2,8 @@ config ARM64
def_bool y
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select ARCH_HAVE_CUSTOM_GPIO_H
+ select ARCH_USE_CMPXCHG_LOCKREF
+ select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
select ARCH_WANT_FRAME_POINTERS
@@ -11,25 +13,32 @@ config ARM64
select CLONE_BACKWARDS
# select COMMON_CLK
select CPU_PM if (SUSPEND || CPU_IDLE)
+ select DCACHE_WORD_ACCESS
select GENERIC_CLOCKEVENTS
+ select GENERIC_CLOCKEVENTS_BROADCAST if SMP
select GENERIC_IOMAP
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
select GENERIC_SCHED_CLOCK
select GENERIC_SMP_IDLE_THREAD
+ select GENERIC_STRNCPY_FROM_USER
+ select GENERIC_STRNLEN_USER
select GENERIC_TIME_VSYSCALL
select HARDIRQS_SW_RESEND
select GENERIC_PCI_IOMAP
+ select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_TRACEHOOK
select HAVE_DEBUG_BUGVERBOSE
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_API_DEBUG
select HAVE_DMA_ATTRS
select HAVE_DMA_CONTIGUOUS if MMU
+ select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_GENERIC_DMA_COHERENT
select HAVE_GENERIC_HARDIRQS
select HAVE_HW_BREAKPOINT if PERF_EVENTS
select HAVE_MEMBLOCK
+ select HAVE_PATA_PLATFORM
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
@@ -130,6 +139,8 @@ config MULTI_IRQ_HANDLER
bool
help
Allow each machine to specify it's own IRQ handler at run time.
+config KERNEL_MODE_NEON
+ def_bool y
source "init/Kconfig"
@@ -194,9 +205,13 @@ config ARM64_64K_PAGES
look-up. AArch32 emulation is not available when this feature
is enabled.
+config CPU_BIG_ENDIAN
+ bool "Build big-endian kernel"
+ help
+ Say Y if you plan on running a kernel in big-endian mode.
+
config SMP
bool "Symmetric Multi-Processing"
- select USE_GENERIC_SMP_HELPERS
help
This enables support for systems with more than one CPU. If
you say N here, the kernel will run on single and
@@ -212,7 +227,8 @@ config NR_CPUS
int "Maximum number of CPUs (2-32)"
range 2 32
depends on SMP
- default "4"
+ # These have to remain sorted largest to smallest
+ default "8"
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs"
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 8b6b64741390..c68d8cb8985d 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -20,9 +20,15 @@ LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
KBUILD_DEFCONFIG := defconfig
KBUILD_CFLAGS += -mgeneral-regs-only
+ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
+KBUILD_CPPFLAGS += -mbig-endian
+AS += -EB
+LD += -EB
+else
KBUILD_CPPFLAGS += -mlittle-endian
AS += -EL
LD += -EL
+endif
# For mach-xxx/mach/{io.h|irqs.h}
KBUILD_CPPFLAGS += -Iarch/arm/mach-tegra/include
diff --git a/arch/arm64/boot/dts/foundation-v8.dts b/arch/arm64/boot/dts/foundation-v8.dts
index 519c4b2c0687..4a060906809d 100644
--- a/arch/arm64/boot/dts/foundation-v8.dts
+++ b/arch/arm64/boot/dts/foundation-v8.dts
@@ -224,7 +224,7 @@
virtio_block@0130000 {
compatible = "virtio,mmio";
- reg = <0x130000 0x1000>;
+ reg = <0x130000 0x200>;
interrupts = <42>;
};
};
diff --git a/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi b/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi
index b45e5f39f577..2f2ecd217363 100644
--- a/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi
+++ b/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi
@@ -183,6 +183,12 @@
clocks = <&v2m_oscclk1>, <&v2m_clk24mhz>;
clock-names = "clcdclk", "apb_pclk";
};
+
+ virtio_block@0130000 {
+ compatible = "virtio,mmio";
+ reg = <0x130000 0x200>;
+ interrupts = <42>;
+ };
};
v2m_fixed_3v3: fixedregulator@0 {
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 8d9696adb440..7e9877d5b7ca 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
@@ -19,13 +18,15 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_KALLSYMS_ALL=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
+CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
CONFIG_ARCH_VEXPRESS=y
CONFIG_SMP=y
-CONFIG_PREEMPT_VOLUNTARY=y
+CONFIG_PREEMPT=y
+CONFIG_CMA=y
CONFIG_CMDLINE="console=ttyAMA0"
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_COMPAT=y
@@ -41,14 +42,17 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
-# CONFIG_BLK_DEV is not set
+CONFIG_DMA_CMA=y
CONFIG_SCSI=y
# CONFIG_SCSI_PROC_FS is not set
CONFIG_BLK_DEV_SD=y
# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_ATA=y
+CONFIG_PATA_PLATFORM=y
+CONFIG_PATA_OF_PLATFORM=y
CONFIG_NETDEVICES=y
-CONFIG_MII=y
CONFIG_SMC91X=y
+CONFIG_SMSC911X=y
# CONFIG_WLAN is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_SERIO_I8042 is not set
@@ -58,16 +62,23 @@ CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_FB=y
# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
-# CONFIG_USB_SUPPORT is not set
+CONFIG_USB=y
+CONFIG_USB_ISP1760_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_MMC=y
+CONFIG_MMC_ARMMMCI=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
CONFIG_FUSE_FS=y
@@ -86,3 +97,5 @@ CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_INFO=y
# CONFIG_FTRACE is not set
CONFIG_ATOMIC64_SELFTEST=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_VIRTIO_BLK=y
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 79a642d199f2..626d4a92521f 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -50,3 +50,5 @@ generic-y += unaligned.h
generic-y += user.h
generic-y += vga.h
generic-y += xor.h
+generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index 98abd476992d..9400596a0f39 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -26,7 +26,13 @@
#include <clocksource/arm_arch_timer.h>
-static inline void arch_timer_reg_write(int access, int reg, u32 val)
+/*
+ * These register accessors are marked inline so the compiler can
+ * nicely work out which register we want, and chuck away the rest of
+ * the code.
+ */
+static __always_inline
+void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val)
{
if (access == ARCH_TIMER_PHYS_ACCESS) {
switch (reg) {
@@ -36,8 +42,6 @@ static inline void arch_timer_reg_write(int access, int reg, u32 val)
case ARCH_TIMER_REG_TVAL:
asm volatile("msr cntp_tval_el0, %0" : : "r" (val));
break;
- default:
- BUILD_BUG();
}
} else if (access == ARCH_TIMER_VIRT_ACCESS) {
switch (reg) {
@@ -47,17 +51,14 @@ static inline void arch_timer_reg_write(int access, int reg, u32 val)
case ARCH_TIMER_REG_TVAL:
asm volatile("msr cntv_tval_el0, %0" : : "r" (val));
break;
- default:
- BUILD_BUG();
}
- } else {
- BUILD_BUG();
}
isb();
}
-static inline u32 arch_timer_reg_read(int access, int reg)
+static __always_inline
+u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
{
u32 val;
@@ -69,8 +70,6 @@ static inline u32 arch_timer_reg_read(int access, int reg)
case ARCH_TIMER_REG_TVAL:
asm volatile("mrs %0, cntp_tval_el0" : "=r" (val));
break;
- default:
- BUILD_BUG();
}
} else if (access == ARCH_TIMER_VIRT_ACCESS) {
switch (reg) {
@@ -80,11 +79,7 @@ static inline u32 arch_timer_reg_read(int access, int reg)
case ARCH_TIMER_REG_TVAL:
asm volatile("mrs %0, cntv_tval_el0" : "=r" (val));
break;
- default:
- BUILD_BUG();
}
- } else {
- BUILD_BUG();
}
return val;
@@ -97,19 +92,49 @@ static inline u32 arch_timer_get_cntfrq(void)
return val;
}
-static inline void arch_counter_set_user_access(void)
+static inline u32 arch_timer_get_cntkctl(void)
{
u32 cntkctl;
-
- /* Disable user access to the timers and the physical counter. */
asm volatile("mrs %0, cntkctl_el1" : "=r" (cntkctl));
- cntkctl &= ~((3 << 8) | (1 << 0));
+ return cntkctl;
+}
- /* Enable user access to the virtual counter and frequency. */
- cntkctl |= (1 << 1);
+static inline void arch_timer_set_cntkctl(u32 cntkctl)
+{
asm volatile("msr cntkctl_el1, %0" : : "r" (cntkctl));
}
+static inline void arch_counter_set_user_access(void)
+{
+ u32 cntkctl = arch_timer_get_cntkctl();
+
+ /* Disable user access to the timers and the physical counter */
+ /* Also disable virtual event stream */
+ cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
+ | ARCH_TIMER_USR_VT_ACCESS_EN
+ | ARCH_TIMER_VIRT_EVT_EN
+ | ARCH_TIMER_USR_PCT_ACCESS_EN);
+
+ /* Enable user access to the virtual counter */
+ cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
+
+ arch_timer_set_cntkctl(cntkctl);
+}
+
+static inline void arch_timer_evtstrm_enable(int divider)
+{
+ u32 cntkctl = arch_timer_get_cntkctl();
+ cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
+ /* Set the divider and enable virtual event stream */
+ cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
+ | ARCH_TIMER_VIRT_EVT_EN;
+ arch_timer_set_cntkctl(cntkctl);
+ elf_hwcap |= HWCAP_EVTSTRM;
+#ifdef CONFIG_COMPAT
+ compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
+#endif
+}
+
static inline u64 arch_counter_get_cntvct(void)
{
u64 cval;
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 92b8f09b94cc..ff16f977a12b 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -131,3 +131,34 @@ lr .req x30 // link register
.align 7
b \label
.endm
+
+/*
+ * Select code when configured for BE.
+ */
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define CPU_BE(code...) code
+#else
+#define CPU_BE(code...)
+#endif
+
+/*
+ * Select code when configured for LE.
+ */
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define CPU_LE(code...)
+#else
+#define CPU_LE(code...) code
+#endif
+
+/*
+ * Define a macro that constructs a 64-bit value by concatenating two
+ * 32-bit registers. Note that on big endian systems the order of the
+ * registers is swapped.
+ */
+#ifndef CONFIG_CPU_BIG_ENDIAN
+ .macro regs_to_64, rd, lbits, hbits
+#else
+ .macro regs_to_64, rd, hbits, lbits
+#endif
+ orr \rd, \lbits, \hbits, lsl #32
+ .endm
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index c2d060edd85d..735e3317ba02 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -118,8 +118,7 @@ static inline void atomic_add(int i, atomic_t *v)
" stxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i)
- : "cc");
+ : "Ir" (i));
}
static inline int atomic_add_return(int i, atomic_t *v)
@@ -128,14 +127,15 @@ static inline int atomic_add_return(int i, atomic_t *v)
int result;
asm volatile("// atomic_add_return\n"
-"1: ldaxr %w0, %2\n"
+"1: ldxr %w0, %2\n"
" add %w0, %w0, %w3\n"
" stlxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
- : "cc", "memory");
+ : "memory");
+ smp_mb();
return result;
}
@@ -150,8 +150,7 @@ static inline void atomic_sub(int i, atomic_t *v)
" stxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i)
- : "cc");
+ : "Ir" (i));
}
static inline int atomic_sub_return(int i, atomic_t *v)
@@ -160,14 +159,15 @@ static inline int atomic_sub_return(int i, atomic_t *v)
int result;
asm volatile("// atomic_sub_return\n"
-"1: ldaxr %w0, %2\n"
+"1: ldxr %w0, %2\n"
" sub %w0, %w0, %w3\n"
" stlxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
- : "cc", "memory");
+ : "memory");
+ smp_mb();
return result;
}
@@ -176,34 +176,23 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
unsigned long tmp;
int oldval;
+ smp_mb();
+
asm volatile("// atomic_cmpxchg\n"
-"1: ldaxr %w1, %2\n"
+"1: ldxr %w1, %2\n"
" cmp %w1, %w3\n"
" b.ne 2f\n"
-" stlxr %w0, %w4, %2\n"
+" stxr %w0, %w4, %2\n"
" cbnz %w0, 1b\n"
"2:"
: "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
: "Ir" (old), "r" (new)
- : "cc", "memory");
+ : "cc");
+ smp_mb();
return oldval;
}
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
-{
- unsigned long tmp, tmp2;
-
- asm volatile("// atomic_clear_mask\n"
-"1: ldxr %0, %2\n"
-" bic %0, %0, %3\n"
-" stxr %w1, %0, %2\n"
-" cbnz %w1, 1b"
- : "=&r" (tmp), "=&r" (tmp2), "+Q" (*addr)
- : "Ir" (mask)
- : "cc");
-}
-
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
@@ -251,8 +240,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
" stxr %w1, %0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i)
- : "cc");
+ : "Ir" (i));
}
static inline long atomic64_add_return(long i, atomic64_t *v)
@@ -261,14 +249,15 @@ static inline long atomic64_add_return(long i, atomic64_t *v)
unsigned long tmp;
asm volatile("// atomic64_add_return\n"
-"1: ldaxr %0, %2\n"
+"1: ldxr %0, %2\n"
" add %0, %0, %3\n"
" stlxr %w1, %0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
- : "cc", "memory");
+ : "memory");
+ smp_mb();
return result;
}
@@ -283,8 +272,7 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
" stxr %w1, %0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i)
- : "cc");
+ : "Ir" (i));
}
static inline long atomic64_sub_return(long i, atomic64_t *v)
@@ -293,14 +281,15 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
unsigned long tmp;
asm volatile("// atomic64_sub_return\n"
-"1: ldaxr %0, %2\n"
+"1: ldxr %0, %2\n"
" sub %0, %0, %3\n"
" stlxr %w1, %0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
- : "cc", "memory");
+ : "memory");
+ smp_mb();
return result;
}
@@ -309,17 +298,20 @@ static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
long oldval;
unsigned long res;
+ smp_mb();
+
asm volatile("// atomic64_cmpxchg\n"
-"1: ldaxr %1, %2\n"
+"1: ldxr %1, %2\n"
" cmp %1, %3\n"
" b.ne 2f\n"
-" stlxr %w0, %4, %2\n"
+" stxr %w0, %4, %2\n"
" cbnz %w0, 1b\n"
"2:"
: "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
: "Ir" (old), "r" (new)
- : "cc", "memory");
+ : "cc");
+ smp_mb();
return oldval;
}
@@ -331,11 +323,12 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
unsigned long tmp;
asm volatile("// atomic64_dec_if_positive\n"
-"1: ldaxr %0, %2\n"
+"1: ldxr %0, %2\n"
" subs %0, %0, #1\n"
" b.mi 2f\n"
" stlxr %w1, %0, %2\n"
" cbnz %w1, 1b\n"
+" dmb ish\n"
"2:"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
:
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index e3dc7a610b9f..9269edba2c90 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -25,7 +25,7 @@
#define wfi() asm volatile("wfi" : : : "memory")
#define isb() asm volatile("isb" : : : "memory")
-#define dsb() asm volatile("dsb sy" : : : "memory")
+#define dsb(opt) asm volatile("dsb sy" : : : "memory")
#define mb() dsb()
#define rmb() asm volatile("dsb ld" : : : "memory")
@@ -35,10 +35,60 @@
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
+
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+ ACCESS_ONCE(*p) = (v); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+ ___p1; \
+})
+
#else
+
#define smp_mb() asm volatile("dmb ish" : : : "memory")
#define smp_rmb() asm volatile("dmb ishld" : : : "memory")
#define smp_wmb() asm volatile("dmb ishst" : : : "memory")
+
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ switch (sizeof(*p)) { \
+ case 4: \
+ asm volatile ("stlr %w1, %0" \
+ : "=Q" (*p) : "r" (v) : "memory"); \
+ break; \
+ case 8: \
+ asm volatile ("stlr %1, %0" \
+ : "=Q" (*p) : "r" (v) : "memory"); \
+ break; \
+ } \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1; \
+ compiletime_assert_atomic_type(*p); \
+ switch (sizeof(*p)) { \
+ case 4: \
+ asm volatile ("ldar %w0, %1" \
+ : "=r" (___p1) : "Q" (*p) : "memory"); \
+ break; \
+ case 8: \
+ asm volatile ("ldar %0, %1" \
+ : "=r" (___p1) : "Q" (*p) : "memory"); \
+ break; \
+ } \
+ ___p1; \
+})
+
#endif
#define dmb() asm volatile("dmb ishld" : : : "memory")
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index 8a8ce0e73a38..57c0fa7bf711 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -29,44 +29,45 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
switch (size) {
case 1:
asm volatile("// __xchg1\n"
- "1: ldaxrb %w0, %2\n"
+ "1: ldxrb %w0, %2\n"
" stlxrb %w1, %w3, %2\n"
" cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
: "r" (x)
- : "cc", "memory");
+ : "memory");
break;
case 2:
asm volatile("// __xchg2\n"
- "1: ldaxrh %w0, %2\n"
+ "1: ldxrh %w0, %2\n"
" stlxrh %w1, %w3, %2\n"
" cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
: "r" (x)
- : "cc", "memory");
+ : "memory");
break;
case 4:
asm volatile("// __xchg4\n"
- "1: ldaxr %w0, %2\n"
+ "1: ldxr %w0, %2\n"
" stlxr %w1, %w3, %2\n"
" cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
: "r" (x)
- : "cc", "memory");
+ : "memory");
break;
case 8:
asm volatile("// __xchg8\n"
- "1: ldaxr %0, %2\n"
+ "1: ldxr %0, %2\n"
" stlxr %w1, %3, %2\n"
" cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
: "r" (x)
- : "cc", "memory");
+ : "memory");
break;
default:
BUILD_BUG();
}
+ smp_mb();
return ret;
}
@@ -158,19 +159,27 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
return ret;
}
-#define cmpxchg(ptr,o,n) \
- ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \
- (unsigned long)(o), \
- (unsigned long)(n), \
- sizeof(*(ptr))))
-
-#define cmpxchg_local(ptr,o,n) \
- ((__typeof__(*(ptr)))__cmpxchg((ptr), \
- (unsigned long)(o), \
- (unsigned long)(n), \
- sizeof(*(ptr))))
+#define cmpxchg(ptr, o, n) \
+({ \
+ __typeof__(*(ptr)) __ret; \
+ __ret = (__typeof__(*(ptr))) \
+ __cmpxchg_mb((ptr), (unsigned long)(o), (unsigned long)(n), \
+ sizeof(*(ptr))); \
+ __ret; \
+})
+
+#define cmpxchg_local(ptr, o, n) \
+({ \
+ __typeof__(*(ptr)) __ret; \
+ __ret = (__typeof__(*(ptr))) \
+ __cmpxchg((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr))); \
+ __ret; \
+})
#define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n))
#define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n))
+#define cmpxchg64_relaxed(ptr,o,n) cmpxchg_local((ptr),(o),(n))
+
#endif /* __ASM_CMPXCHG_H */
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index 7058eec269ab..e71f81fe127a 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -26,7 +26,11 @@
#include <linux/ptrace.h>
#define COMPAT_USER_HZ 100
+#ifdef __AARCH64EB__
+#define COMPAT_UTS_MACHINE "armv8b\0\0"
+#else
#define COMPAT_UTS_MACHINE "armv8l\0\0"
+#endif
typedef u32 compat_size_t;
typedef s32 compat_ssize_t;
@@ -73,13 +77,23 @@ struct compat_timeval {
};
struct compat_stat {
+#ifdef __AARCH64EB__
+ short st_dev;
+ short __pad1;
+#else
compat_dev_t st_dev;
+#endif
compat_ino_t st_ino;
compat_mode_t st_mode;
compat_ushort_t st_nlink;
__compat_uid16_t st_uid;
__compat_gid16_t st_gid;
+#ifdef __AARCH64EB__
+ short st_rdev;
+ short __pad2;
+#else
compat_dev_t st_rdev;
+#endif
compat_off_t st_size;
compat_off_t st_blksize;
compat_off_t st_blocks;
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index dec7e05c65ee..1f5eae854a11 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -16,16 +16,6 @@
#ifndef __ASM_CPUTYPE_H
#define __ASM_CPUTYPE_H
-#define ID_MIDR_EL1 "midr_el1"
-#define ID_MPIDR_EL1 "mpidr_el1"
-#define ID_CTR_EL0 "ctr_el0"
-
-#define ID_AA64PFR0_EL1 "id_aa64pfr0_el1"
-#define ID_AA64DFR0_EL1 "id_aa64dfr0_el1"
-#define ID_AA64AFR0_EL1 "id_aa64afr0_el1"
-#define ID_AA64ISAR0_EL1 "id_aa64isar0_el1"
-#define ID_AA64MMFR0_EL1 "id_aa64mmfr0_el1"
-
#define INVALID_HWID ULONG_MAX
#define MPIDR_HWID_BITMASK 0xff00ffffff
@@ -42,7 +32,7 @@
#define read_cpuid(reg) ({ \
u64 __val; \
- asm("mrs %0, " reg : "=r" (__val)); \
+ asm("mrs %0, " #reg : "=r" (__val)); \
__val; \
})
@@ -61,12 +51,12 @@
*/
static inline u32 __attribute_const__ read_cpuid_id(void)
{
- return read_cpuid(ID_MIDR_EL1);
+ return read_cpuid(MIDR_EL1);
}
static inline u64 __attribute_const__ read_cpuid_mpidr(void)
{
- return read_cpuid(ID_MPIDR_EL1);
+ return read_cpuid(MPIDR_EL1);
}
static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
@@ -81,7 +71,7 @@ static inline unsigned int __attribute_const__ read_cpuid_part_number(void)
static inline u32 __attribute_const__ read_cpuid_cachetype(void)
{
- return read_cpuid(ID_CTR_EL0);
+ return read_cpuid(CTR_EL0);
}
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index ef8235c68c09..62314791570c 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -62,6 +62,27 @@ struct task_struct;
#define DBG_ARCH_ID_RESERVED 0 /* In case of ptrace ABI updates. */
+#define DBG_HOOK_HANDLED 0
+#define DBG_HOOK_ERROR 1
+
+struct step_hook {
+ struct list_head node;
+ int (*fn)(struct pt_regs *regs, unsigned int esr);
+};
+
+void register_step_hook(struct step_hook *hook);
+void unregister_step_hook(struct step_hook *hook);
+
+struct break_hook {
+ struct list_head node;
+ u32 esr_val;
+ u32 esr_mask;
+ int (*fn)(struct pt_regs *regs, unsigned int esr);
+};
+
+void register_break_hook(struct break_hook *hook);
+void unregister_break_hook(struct break_hook *hook);
+
u8 debug_monitors_arch(void);
void enable_debug_monitors(enum debug_el el);
@@ -83,14 +104,7 @@ static inline int reinstall_suspended_bps(struct pt_regs *regs)
}
#endif
-#ifdef CONFIG_COMPAT
int aarch32_break_handler(struct pt_regs *regs);
-#else
-static int aarch32_break_handler(struct pt_regs *regs)
-{
- return -EFAULT;
-}
-#endif
#endif /* __ASSEMBLY */
#endif /* __KERNEL__ */
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index fe82b8376faf..a72b04b4f8eb 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -24,13 +24,14 @@
#include <asm-generic/dma-coherent.h>
#define ARCH_HAS_DMA_GET_REQUIRED_MASK
-#define DMA_ERROR_CODE (~0)
#define PG_PROT_KERNEL PAGE_KERNEL
#define FLUSH_TLB_PAGE(addr) flush_tlb_kernel_range(addr, PAGE_SIZE)
#define FLUSH_DCACHE_AREA __flush_dcache_area
extern struct dma_map_ops arm_dma_ops;
+#define DMA_ERROR_CODE (~(dma_addr_t)0)
+extern struct dma_map_ops *dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index fe32c0e4ac01..01d3aab64b79 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -33,8 +33,6 @@ typedef unsigned long elf_greg_t;
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct user_fpsimd_state elf_fpregset_t;
-#define EM_AARCH64 183
-
/*
* AArch64 static relocation types.
*/
@@ -92,11 +90,24 @@ typedef struct user_fpsimd_state elf_fpregset_t;
* These are used to set parameters in the core dumps.
*/
#define ELF_CLASS ELFCLASS64
+#ifdef __AARCH64EB__
+#define ELF_DATA ELFDATA2MSB
+#else
#define ELF_DATA ELFDATA2LSB
+#endif
#define ELF_ARCH EM_AARCH64
+/*
+ * This yields a string that ld.so will use to load implementation
+ * specific libraries for optimization. This is more specific in
+ * intent than poking at uname or /proc/cpuinfo.
+ */
#define ELF_PLATFORM_SIZE 16
+#ifdef __AARCH64EB__
+#define ELF_PLATFORM ("aarch64_be")
+#else
#define ELF_PLATFORM ("aarch64")
+#endif
/*
* This is used to ensure we don't load something for the wrong architecture.
@@ -151,8 +162,12 @@ extern unsigned long arch_randomize_brk(struct mm_struct *mm);
#define arch_randomize_brk arch_randomize_brk
#ifdef CONFIG_COMPAT
-#define EM_ARM 40
+
+#ifdef __AARCH64EB__
+#define COMPAT_ELF_PLATFORM ("v8b")
+#else
#define COMPAT_ELF_PLATFORM ("v8l")
+#endif
#define COMPAT_ELF_ET_DYN_BASE (randomize_et_dyn(2 * TASK_SIZE_32 / 3))
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index c582fa316366..5f750dc96e0f 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -24,12 +24,14 @@
#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \
asm volatile( \
-"1: ldaxr %w1, %2\n" \
+"1: ldxr %w1, %2\n" \
insn "\n" \
"2: stlxr %w3, %w0, %2\n" \
" cbnz %w3, 1b\n" \
+" dmb ish\n" \
"3:\n" \
" .pushsection .fixup,\"ax\"\n" \
+" .align 2\n" \
"4: mov %w0, %w5\n" \
" b 3b\n" \
" .popsection\n" \
@@ -39,7 +41,7 @@
" .popsection\n" \
: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \
: "r" (oparg), "Ir" (-EFAULT) \
- : "cc", "memory")
+ : "memory")
static inline int
futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
@@ -110,11 +112,12 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return -EFAULT;
asm volatile("// futex_atomic_cmpxchg_inatomic\n"
-"1: ldaxr %w1, %2\n"
+"1: ldxr %w1, %2\n"
" sub %w3, %w1, %w4\n"
" cbnz %w3, 3f\n"
"2: stlxr %w3, %w5, %2\n"
" cbnz %w3, 1b\n"
+" dmb ish\n"
"3:\n"
" .pushsection .fixup,\"ax\"\n"
"4: mov %w0, %w6\n"
@@ -126,7 +129,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
" .popsection\n"
: "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp)
: "r" (oldval), "r" (newval), "Ir" (-EFAULT)
- : "cc", "memory");
+ : "memory");
*uval = val;
return ret;
diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h
index ae4801d77514..0be67821f9ce 100644
--- a/arch/arm64/include/asm/hardirq.h
+++ b/arch/arm64/include/asm/hardirq.h
@@ -20,7 +20,7 @@
#include <linux/threads.h>
#include <asm/irq.h>
-#define NR_IPI 5
+#define NR_IPI 6
typedef struct {
unsigned int __softirq_pending;
diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h
index 6d4482fa35bc..6cddbb0c9f54 100644
--- a/arch/arm64/include/asm/hwcap.h
+++ b/arch/arm64/include/asm/hwcap.h
@@ -30,6 +30,7 @@
#define COMPAT_HWCAP_IDIVA (1 << 17)
#define COMPAT_HWCAP_IDIVT (1 << 18)
#define COMPAT_HWCAP_IDIV (COMPAT_HWCAP_IDIVA|COMPAT_HWCAP_IDIVT)
+#define COMPAT_HWCAP_EVTSTRM (1 << 21)
#ifndef __ASSEMBLY__
/*
@@ -37,12 +38,12 @@
* instruction set this cpu supports.
*/
#define ELF_HWCAP (elf_hwcap)
-#define COMPAT_ELF_HWCAP (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
- COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
- COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
- COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
- COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV)
-extern unsigned int elf_hwcap;
+#ifdef CONFIG_COMPAT
+#define COMPAT_ELF_HWCAP (compat_elf_hwcap)
+extern unsigned int compat_elf_hwcap;
+#endif
+
+extern unsigned long elf_hwcap;
#endif
#endif
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
new file mode 100644
index 000000000000..c44ad39ed310
--- /dev/null
+++ b/arch/arm64/include/asm/insn.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2013 Huawei Ltd.
+ * Author: Jiang Liu <liuj97@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_INSN_H
+#define __ASM_INSN_H
+#include <linux/types.h>
+
+/* A64 instructions are always 32 bits. */
+#define AARCH64_INSN_SIZE 4
+
+/*
+ * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
+ * Section C3.1 "A64 instruction index by encoding":
+ * AArch64 main encoding table
+ * Bit position
+ * 28 27 26 25 Encoding Group
+ * 0 0 - - Unallocated
+ * 1 0 0 - Data processing, immediate
+ * 1 0 1 - Branch, exception generation and system instructions
+ * - 1 - 0 Loads and stores
+ * - 1 0 1 Data processing - register
+ * 0 1 1 1 Data processing - SIMD and floating point
+ * 1 1 1 1 Data processing - SIMD and floating point
+ * "-" means "don't care"
+ */
+enum aarch64_insn_encoding_class {
+ AARCH64_INSN_CLS_UNKNOWN, /* UNALLOCATED */
+ AARCH64_INSN_CLS_DP_IMM, /* Data processing - immediate */
+ AARCH64_INSN_CLS_DP_REG, /* Data processing - register */
+ AARCH64_INSN_CLS_DP_FPSIMD, /* Data processing - SIMD and FP */
+ AARCH64_INSN_CLS_LDST, /* Loads and stores */
+ AARCH64_INSN_CLS_BR_SYS, /* Branch, exception generation and
+ * system instructions */
+};
+
+enum aarch64_insn_hint_op {
+ AARCH64_INSN_HINT_NOP = 0x0 << 5,
+ AARCH64_INSN_HINT_YIELD = 0x1 << 5,
+ AARCH64_INSN_HINT_WFE = 0x2 << 5,
+ AARCH64_INSN_HINT_WFI = 0x3 << 5,
+ AARCH64_INSN_HINT_SEV = 0x4 << 5,
+ AARCH64_INSN_HINT_SEVL = 0x5 << 5,
+};
+
+enum aarch64_insn_imm_type {
+ AARCH64_INSN_IMM_ADR,
+ AARCH64_INSN_IMM_26,
+ AARCH64_INSN_IMM_19,
+ AARCH64_INSN_IMM_16,
+ AARCH64_INSN_IMM_14,
+ AARCH64_INSN_IMM_12,
+ AARCH64_INSN_IMM_9,
+ AARCH64_INSN_IMM_MAX
+};
+
+enum aarch64_insn_branch_type {
+ AARCH64_INSN_BRANCH_NOLINK,
+ AARCH64_INSN_BRANCH_LINK,
+};
+
+#define __AARCH64_INSN_FUNCS(abbr, mask, val) \
+static __always_inline bool aarch64_insn_is_##abbr(u32 code) \
+{ return (code & (mask)) == (val); } \
+static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \
+{ return (val); }
+
+__AARCH64_INSN_FUNCS(b, 0xFC000000, 0x14000000)
+__AARCH64_INSN_FUNCS(bl, 0xFC000000, 0x94000000)
+__AARCH64_INSN_FUNCS(svc, 0xFFE0001F, 0xD4000001)
+__AARCH64_INSN_FUNCS(hvc, 0xFFE0001F, 0xD4000002)
+__AARCH64_INSN_FUNCS(smc, 0xFFE0001F, 0xD4000003)
+__AARCH64_INSN_FUNCS(brk, 0xFFE0001F, 0xD4200000)
+__AARCH64_INSN_FUNCS(hint, 0xFFFFF01F, 0xD503201F)
+
+#undef __AARCH64_INSN_FUNCS
+
+bool aarch64_insn_is_nop(u32 insn);
+
+int aarch64_insn_read(void *addr, u32 *insnp);
+int aarch64_insn_write(void *addr, u32 insn);
+enum aarch64_insn_encoding_class aarch64_get_insn_class(u32 insn);
+u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
+ u32 insn, u64 imm);
+u32 aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
+ enum aarch64_insn_branch_type type);
+u32 aarch64_insn_gen_hint(enum aarch64_insn_hint_op op);
+u32 aarch64_insn_gen_nop(void);
+
+bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn);
+
+int aarch64_insn_patch_text_nosync(void *addr, u32 insn);
+int aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt);
+int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt);
+
+#endif /* __ASM_INSN_H */
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index a3290c56e0f8..e66f858f38d5 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -226,14 +226,17 @@ extern void __memset_io(volatile void __iomem *, int, size_t);
*/
extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot);
extern void __iounmap(volatile void __iomem *addr);
+extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY)
#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC))
+#define PROT_NORMAL (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
#define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
#define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
#define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
+#define ioremap_cached(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL))
#define iounmap __iounmap
#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF)
diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h
new file mode 100644
index 000000000000..076a1c714049
--- /dev/null
+++ b/arch/arm64/include/asm/jump_label.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2013 Huawei Ltd.
+ * Author: Jiang Liu <liuj97@gmail.com>
+ *
+ * Based on arch/arm/include/asm/jump_label.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_JUMP_LABEL_H
+#define __ASM_JUMP_LABEL_H
+#include <linux/types.h>
+#include <asm/insn.h>
+
+#ifdef __KERNEL__
+
+#define JUMP_LABEL_NOP_SIZE AARCH64_INSN_SIZE
+
+static __always_inline bool arch_static_branch(struct static_key *key)
+{
+ asm goto("1: nop\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ ".align 3\n\t"
+ ".quad 1b, %l[l_yes], %c0\n\t"
+ ".popsection\n\t"
+ : : "i"(key) : : l_yes);
+
+ return false;
+l_yes:
+ return true;
+}
+
+#endif /* __KERNEL__ */
+
+typedef u64 jump_label_t;
+
+struct jump_entry {
+ jump_label_t code;
+ jump_label_t target;
+ jump_label_t key;
+};
+
+#endif /* __ASM_JUMP_LABEL_H */
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 76131f7e06b3..3c3e92141ad5 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -37,18 +37,23 @@
#define UL(x) _AC(x, UL)
/*
- * PAGE_OFFSET - the virtual address of the start of the kernel image.
+ * PAGE_OFFSET - the virtual address of the start of the kernel image (top
+ * (VA_BITS - 1))
* VA_BITS - the maximum number of bits for virtual addresses.
* TASK_SIZE - the maximum size of a user space task.
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
* The module space lives between the addresses given by TASK_SIZE
* and PAGE_OFFSET - it must be within 128MB of the kernel text.
*/
-#define PAGE_OFFSET UL(0xffffffc000000000)
+#ifdef CONFIG_ARM64_64K_PAGES
+#define VA_BITS (42)
+#else
+#define VA_BITS (39)
+#endif
+#define PAGE_OFFSET (UL(0xffffffffffffffff) << (VA_BITS - 1))
#define MODULES_END (PAGE_OFFSET)
#define MODULES_VADDR (MODULES_END - SZ_64M)
#define EARLYCON_IOBASE (MODULES_VADDR - SZ_4M)
-#define VA_BITS (39)
#define TASK_SIZE_64 (UL(1) << VA_BITS)
#ifdef CONFIG_COMPAT
@@ -152,8 +157,7 @@ static inline void *phys_to_virt(phys_addr_t x)
#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-#define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \
- ((void *)(kaddr) < (void *)high_memory))
+#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#endif
diff --git a/arch/arm64/include/asm/neon.h b/arch/arm64/include/asm/neon.h
new file mode 100644
index 000000000000..b0cc58a97780
--- /dev/null
+++ b/arch/arm64/include/asm/neon.h
@@ -0,0 +1,14 @@
+/*
+ * linux/arch/arm64/include/asm/neon.h
+ *
+ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define cpu_has_neon() (1)
+
+void kernel_neon_begin(void);
+void kernel_neon_end(void);
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index f214069ec5d5..9bea6e74a001 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -63,9 +63,12 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr)
struct page *pte;
pte = alloc_pages(PGALLOC_GFP, 0);
- if (pte)
- pgtable_page_ctor(pte);
-
+ if (!pte)
+ return NULL;
+ if (!pgtable_page_ctor(pte)) {
+ __free_page(pte);
+ return NULL;
+ }
return pte;
}
diff --git a/arch/arm64/include/asm/pgtable-2level-hwdef.h b/arch/arm64/include/asm/pgtable-2level-hwdef.h
index 0a8ed3f94e93..2593b490c56a 100644
--- a/arch/arm64/include/asm/pgtable-2level-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-2level-hwdef.h
@@ -21,10 +21,10 @@
* 8192 entries of 8 bytes each, occupying a 64KB page. Levels 0 and 1 are not
* used. The 2nd level table (PGD for Linux) can cover a range of 4TB, each
* entry representing 512MB. The user and kernel address spaces are limited to
- * 512GB and therefore we only use 1024 entries in the PGD.
+ * 4TB in the 64KB page configuration.
*/
#define PTRS_PER_PTE 8192
-#define PTRS_PER_PGD 1024
+#define PTRS_PER_PGD 8192
/*
* PGDIR_SHIFT determines the size a top-level page table entry can map.
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index b97638e98bad..7998ffed3644 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -47,6 +47,10 @@
/*
* Section
*/
+#define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
+#define PMD_SECT_PROT_NONE (_AT(pmdval_t, 1) << 58)
+#define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */
+#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */
#define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
#define PMD_SECT_NG (_AT(pmdval_t, 1) << 11)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 625c7d69079b..ed363b00b6bd 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -25,10 +25,11 @@
* Software defined PTE bits definition.
*/
#define PTE_VALID (_AT(pteval_t, 1) << 0)
-#define PTE_PROT_NONE (_AT(pteval_t, 1) << 1) /* only when !PTE_VALID */
#define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */
#define PTE_DIRTY (_AT(pteval_t, 1) << 55)
#define PTE_SPECIAL (_AT(pteval_t, 1) << 56)
+#define PTE_WRITE (_AT(pteval_t, 1) << 57)
+#define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */
/*
* Some extra PTE bits definition.
@@ -40,7 +41,7 @@
/*
* VMALLOC and SPARSEMEM_VMEMMAP ranges.
*/
-#define VMALLOC_START UL(0xffffff8000000000)
+#define VMALLOC_START (UL(0xffffffffffffffff) << VA_BITS)
#define VMALLOC_END (PAGE_OFFSET - UL(0x400000000) - SZ_64K)
#define vmemmap ((struct page *)(VMALLOC_END + SZ_64K))
@@ -148,26 +149,57 @@ extern unsigned long zero_page_mask;
/*
* The following only work if pte_present(). Undefined behaviour otherwise.
*/
-#define pte_present(pte) (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE))
-#define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY)
-#define pte_young(pte) (pte_val(pte) & PTE_AF)
-#define pte_special(pte) (pte_val(pte) & PTE_SPECIAL)
-#define pte_write(pte) (!(pte_val(pte) & PTE_RDONLY))
+#define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
+#define pte_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY))
+#define pte_young(pte) (!!(pte_val(pte) & PTE_AF))
+#define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL))
+#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
#define pte_valid_user(pte) \
((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
-#define PTE_BIT_FUNC(fn,op) \
-static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ pte_val(pte) &= ~PTE_WRITE;
+ return pte;
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+ pte_val(pte) |= PTE_WRITE;
+ return pte;
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+ pte_val(pte) &= ~PTE_DIRTY;
+ return pte;
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+ pte_val(pte) |= PTE_DIRTY;
+ return pte;
+}
-PTE_BIT_FUNC(wrprotect, |= PTE_RDONLY);
-PTE_BIT_FUNC(mkwrite, &= ~PTE_RDONLY);
-PTE_BIT_FUNC(mkclean, &= ~PTE_DIRTY);
-PTE_BIT_FUNC(mkdirty, |= PTE_DIRTY);
-PTE_BIT_FUNC(mkold, &= ~PTE_AF);
-PTE_BIT_FUNC(mkyoung, |= PTE_AF);
-PTE_BIT_FUNC(mkspecial, |= PTE_SPECIAL);
+static inline pte_t pte_mkold(pte_t pte)
+{
+ pte_val(pte) &= ~PTE_AF;
+ return pte;
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+ pte_val(pte) |= PTE_AF;
+ return pte;
+}
+
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+ pte_val(pte) |= PTE_SPECIAL;
+ return pte;
+}
static inline void set_pte(pte_t *ptep, pte_t pte)
{
@@ -182,8 +214,10 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
if (pte_valid_user(pte)) {
if (pte_exec(pte))
__sync_icache_dcache(pte, addr);
- if (!pte_dirty(pte))
- pte = pte_wrprotect(pte);
+ if (pte_dirty(pte) && pte_write(pte))
+ pte_val(pte) &= ~PTE_RDONLY;
+ else
+ pte_val(pte) |= PTE_RDONLY;
}
set_pte(ptep, pte);
@@ -287,7 +321,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
- PTE_PROT_NONE | PTE_VALID;
+ PTE_PROT_NONE | PTE_VALID | PTE_WRITE;
pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
return pte;
}
@@ -303,15 +337,17 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
* bits 0-1: present (must be zero)
* bit 2: PTE_FILE
* bits 3-8: swap type
- * bits 9-63: swap offset
+ * bits 9-57: swap offset
*/
#define __SWP_TYPE_SHIFT 3
#define __SWP_TYPE_BITS 6
+#define __SWP_OFFSET_BITS 49
#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
+#define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1)
#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
-#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
+#define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
@@ -327,13 +363,13 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
* Encode and decode a file entry:
* bits 0-1: present (must be zero)
* bit 2: PTE_FILE
- * bits 3-63: file offset / PAGE_SIZE
+ * bits 3-57: file offset / PAGE_SIZE
*/
#define pte_file(pte) (pte_val(pte) & PTE_FILE)
#define pte_to_pgoff(x) (pte_val(x) >> 3)
#define pgoff_to_pte(x) __pte(((x) << 3) | PTE_FILE)
-#define PTE_FILE_MAX_BITS 61
+#define PTE_FILE_MAX_BITS 55
extern int kern_addr_valid(unsigned long addr);
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 702c0b2f54d3..1dc42f699073 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -127,6 +127,11 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
regs->pstate = COMPAT_PSR_MODE_USR;
if (pc & 1)
regs->pstate |= COMPAT_PSR_T_BIT;
+
+#ifdef __AARCH64EB__
+ regs->pstate |= COMPAT_PSR_E_BIT;
+#endif
+
regs->compat_sp = sp;
}
#endif
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index c59543e39b2d..041d286a9a3b 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -42,6 +42,7 @@
#define COMPAT_PSR_MODE_UND 0x0000001b
#define COMPAT_PSR_MODE_SYS 0x0000001f
#define COMPAT_PSR_T_BIT 0x00000020
+#define COMPAT_PSR_E_BIT 0x00000200
#define COMPAT_PSR_F_BIT 0x00000040
#define COMPAT_PSR_I_BIT 0x00000080
#define COMPAT_PSR_A_BIT 0x00000100
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index f5022507c89d..b14d7abdfd4e 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -252,10 +252,14 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
: "memory");
}
+static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+ return lock.owner == lock.next;
+}
+
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
- arch_spinlock_t lockval = ACCESS_ONCE(*lock);
- return lockval.owner != lockval.next;
+ return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));
}
static inline int arch_spin_is_contended(arch_spinlock_t *lock)
@@ -288,7 +292,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
" cbnz %w0, 2b\n"
: "=&r" (tmp), "+Q" (rw->lock)
: "r" (0x80000000)
- : "cc", "memory");
+ : "memory");
}
static inline int arch_write_trylock(arch_rwlock_t *rw)
@@ -302,7 +306,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
"1:\n"
: "=&r" (tmp), "+Q" (rw->lock)
: "r" (0x80000000)
- : "cc", "memory");
+ : "memory");
return !tmp;
}
@@ -343,7 +347,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
" cbnz %w1, 2b\n"
: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
:
- : "cc", "memory");
+ : "memory");
}
static inline void arch_read_unlock(arch_rwlock_t *rw)
@@ -357,7 +361,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
" cbnz %w1, 1b\n"
: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
:
- : "cc", "memory");
+ : "memory");
}
static inline int arch_read_trylock(arch_rwlock_t *rw)
@@ -372,7 +376,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
"1:\n"
: "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock)
:
- : "cc", "memory");
+ : "memory");
return !tmp2;
}
diff --git a/arch/arm64/include/asm/spinlock_types.h b/arch/arm64/include/asm/spinlock_types.h
index 24b7d25ca9c8..3b2b3c5728ad 100644
--- a/arch/arm64/include/asm/spinlock_types.h
+++ b/arch/arm64/include/asm/spinlock_types.h
@@ -36,8 +36,13 @@ typedef volatile u32 arch_rwlock_t;
#define TICKET_SHIFT 16
typedef struct {
+#ifdef __AARCH64EB__
+ u16 next;
+ u16 owner;
+#else
u16 owner;
u16 next;
+#endif
} __aligned(4) arch_spinlock_t;
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 , 0 }
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 01088fb9bcd1..21e1979d48bd 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -100,6 +100,7 @@ static inline void set_fs(mm_segment_t fs)
})
#define access_ok(type, addr, size) __range_ok(addr, size)
+#define user_addr_max get_fs
/*
* The "__xxx" versions of the user access functions do not verify the address
@@ -240,9 +241,6 @@ extern unsigned long __must_check __copy_to_user(void __user *to, const void *fr
extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n);
extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
-extern unsigned long __must_check __strncpy_from_user(char *to, const char __user *from, unsigned long count);
-extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
-
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
{
if (access_ok(VERIFY_READ, from, n))
@@ -276,24 +274,9 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo
return n;
}
-static inline long __must_check strncpy_from_user(char *dst, const char __user *src, long count)
-{
- long res = -EFAULT;
- if (access_ok(VERIFY_READ, src, 1))
- res = __strncpy_from_user(dst, src, count);
- return res;
-}
-
-#define strlen_user(s) strnlen_user(s, ~0UL >> 1)
+extern long strncpy_from_user(char *dest, const char __user *src, long count);
-static inline long __must_check strnlen_user(const char __user *s, long n)
-{
- unsigned long res = 0;
-
- if (__addr_ok(s))
- res = __strnlen_user(s, n);
-
- return res;
-}
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *str, long n);
#endif /* __ASM_UACCESS_H */
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 26e310c54344..130e2be952cf 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -18,7 +18,8 @@
#ifndef __ASM__VIRT_H
#define __ASM__VIRT_H
-#define BOOT_CPU_MODE_EL2 (0x0e12b007)
+#define BOOT_CPU_MODE_EL1 (0xe11)
+#define BOOT_CPU_MODE_EL2 (0xe12)
#ifndef __ASSEMBLY__
#include <asm/cacheflush.h>
diff --git a/arch/arm64/include/asm/word-at-a-time.h b/arch/arm64/include/asm/word-at-a-time.h
new file mode 100644
index 000000000000..aab5bf09e9d9
--- /dev/null
+++ b/arch/arm64/include/asm/word-at-a-time.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2013 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_WORD_AT_A_TIME_H
+#define __ASM_WORD_AT_A_TIME_H
+
+#ifndef __AARCH64EB__
+
+#include <linux/kernel.h>
+
+struct word_at_a_time {
+ const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
+
+static inline unsigned long has_zero(unsigned long a, unsigned long *bits,
+ const struct word_at_a_time *c)
+{
+ unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
+ *bits = mask;
+ return mask;
+}
+
+#define prep_zero_mask(a, bits, c) (bits)
+
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+ bits = (bits - 1) & ~bits;
+ return bits >> 7;
+}
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+ return fls64(mask) >> 3;
+}
+
+#define zero_bytemask(mask) (mask)
+
+#else /* __AARCH64EB__ */
+#include <asm-generic/word-at-a-time.h>
+#endif
+
+/*
+ * Load an unaligned word from kernel space.
+ *
+ * In the (very unlikely) case of the word being a page-crosser
+ * and the next page not being mapped, take the exception and
+ * return zeroes in the non-existing part.
+ */
+static inline unsigned long load_unaligned_zeropad(const void *addr)
+{
+ unsigned long ret, offset;
+
+ /* Load word from unaligned pointer addr */
+ asm(
+ "1: ldr %0, %3\n"
+ "2:\n"
+ " .pushsection .fixup,\"ax\"\n"
+ " .align 2\n"
+ "3: and %1, %2, #0x7\n"
+ " bic %2, %2, #0x7\n"
+ " ldr %0, [%2]\n"
+ " lsl %1, %1, #0x3\n"
+#ifndef __AARCH64EB__
+ " lsr %0, %0, %1\n"
+#else
+ " lsl %0, %0, %1\n"
+#endif
+ " b 2b\n"
+ " .popsection\n"
+ " .pushsection __ex_table,\"a\"\n"
+ " .align 3\n"
+ " .quad 1b, 3b\n"
+ " .popsection"
+ : "=&r" (ret), "=&r" (offset)
+ : "r" (addr), "Q" (*(unsigned long *)addr));
+
+ return ret;
+}
+
+#endif /* __ASM_WORD_AT_A_TIME_H */
diff --git a/arch/arm64/include/uapi/asm/byteorder.h b/arch/arm64/include/uapi/asm/byteorder.h
index 2b92046aafc5..dc19e9537f0d 100644
--- a/arch/arm64/include/uapi/asm/byteorder.h
+++ b/arch/arm64/include/uapi/asm/byteorder.h
@@ -16,6 +16,10 @@
#ifndef __ASM_BYTEORDER_H
#define __ASM_BYTEORDER_H
+#ifdef __AARCH64EB__
+#include <linux/byteorder/big_endian.h>
+#else
#include <linux/byteorder/little_endian.h>
+#endif
#endif /* __ASM_BYTEORDER_H */
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index eea497578b87..73cf0f54d57c 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -21,6 +21,11 @@
*/
#define HWCAP_FP (1 << 0)
#define HWCAP_ASIMD (1 << 1)
-
+#define HWCAP_EVTSTRM (1 << 2)
+#define HWCAP_AES (1 << 3)
+#define HWCAP_PMULL (1 << 4)
+#define HWCAP_SHA1 (1 << 5)
+#define HWCAP_SHA2 (1 << 6)
+#define HWCAP_CRC32 (1 << 7)
#endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 45b6a430d722..e6131baaedda 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -9,7 +9,7 @@ AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \
entry-fpsimd.o process.o ptrace.o setup.o signal.o \
sys.o stacktrace.o time.o traps.o io.o vdso.o \
- hyp-stub.o psci.o opcodes.o
+ hyp-stub.o psci.o opcodes.o insn.o
arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
sys_compat.o
@@ -23,6 +23,7 @@ arm64-obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o suspend.o
obj-$(CONFIG_PCI) += bios32.o
obj-$(CONFIG_SWP_EMULATE) += swp_emulate.o
+arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o
obj-y += $(arm64-obj-y) vdso/
obj-m += $(arm64-obj-m)
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index 41b4f626d554..338b568cd8ae 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -29,16 +29,14 @@
#include <asm/checksum.h>
- /* user mem (segment) */
-EXPORT_SYMBOL(__strnlen_user);
-EXPORT_SYMBOL(__strncpy_from_user);
-
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(clear_page);
+ /* user mem (segment) */
EXPORT_SYMBOL(__copy_from_user);
EXPORT_SYMBOL(__copy_to_user);
EXPORT_SYMBOL(__clear_user);
+EXPORT_SYMBOL(__copy_in_user);
/* physical memory */
EXPORT_SYMBOL(memstart_addr);
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index f78148f5d30c..e6dd3f38d4b9 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -28,7 +28,6 @@
#include <linux/uaccess.h>
#include <asm/debug-monitors.h>
-#include <asm/local.h>
#include <asm/cputype.h>
#include <asm/system_misc.h>
@@ -90,8 +89,8 @@ early_param("nodebugmon", early_debug_disable);
* Keep track of debug users on each core.
* The ref counts are per-cpu so we use a local_t type.
*/
-static DEFINE_PER_CPU(local_t, mde_ref_count);
-static DEFINE_PER_CPU(local_t, kde_ref_count);
+static DEFINE_PER_CPU(int, mde_ref_count);
+static DEFINE_PER_CPU(int, kde_ref_count);
void enable_debug_monitors(enum debug_el el)
{
@@ -99,11 +98,11 @@ void enable_debug_monitors(enum debug_el el)
WARN_ON(preemptible());
- if (local_inc_return(&__get_cpu_var(mde_ref_count)) == 1)
+ if (this_cpu_inc_return(mde_ref_count) == 1)
enable = DBG_MDSCR_MDE;
if (el == DBG_ACTIVE_EL1 &&
- local_inc_return(&__get_cpu_var(kde_ref_count)) == 1)
+ this_cpu_inc_return(kde_ref_count) == 1)
enable |= DBG_MDSCR_KDE;
if (enable && debug_enabled) {
@@ -119,11 +118,11 @@ void disable_debug_monitors(enum debug_el el)
WARN_ON(preemptible());
- if (local_dec_and_test(&__get_cpu_var(mde_ref_count)))
+ if (this_cpu_dec_return(mde_ref_count) == 0)
disable = ~DBG_MDSCR_MDE;
if (el == DBG_ACTIVE_EL1 &&
- local_dec_and_test(&__get_cpu_var(kde_ref_count)))
+ this_cpu_dec_return(kde_ref_count) == 0)
disable &= ~DBG_MDSCR_KDE;
if (disable) {
@@ -226,6 +225,48 @@ static void clear_regs_spsr_ss(struct pt_regs *regs)
regs->pstate = spsr;
}
+/* EL1 Single Step Handler hooks */
+static LIST_HEAD(step_hook);
+DEFINE_RWLOCK(step_hook_lock);
+
+void register_step_hook(struct step_hook *hook)
+{
+ write_lock(&step_hook_lock);
+ list_add(&hook->node, &step_hook);
+ write_unlock(&step_hook_lock);
+}
+
+void unregister_step_hook(struct step_hook *hook)
+{
+ write_lock(&step_hook_lock);
+ list_del(&hook->node);
+ write_unlock(&step_hook_lock);
+}
+
+/*
+ * Call registered single step handers
+ * There is no Syndrome info to check for determining the handler.
+ * So we call all the registered handlers, until the right handler is
+ * found which returns zero.
+ */
+static int call_step_hook(struct pt_regs *regs, unsigned int esr)
+{
+ struct step_hook *hook;
+ int retval = DBG_HOOK_ERROR;
+
+ read_lock(&step_hook_lock);
+
+ list_for_each_entry(hook, &step_hook, node) {
+ retval = hook->fn(regs, esr);
+ if (retval == DBG_HOOK_HANDLED)
+ break;
+ }
+
+ read_unlock(&step_hook_lock);
+
+ return retval;
+}
+
static int single_step_handler(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
@@ -253,7 +294,9 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
*/
user_rewind_single_step(current);
} else {
- /* TODO: route to KGDB */
+ if (call_step_hook(regs, esr) == DBG_HOOK_HANDLED)
+ return 0;
+
pr_warning("Unexpected kernel single-step exception at EL1\n");
/*
* Re-enable stepping since we know that we will be
@@ -265,11 +308,53 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
return 0;
}
+/*
+ * Breakpoint handler is re-entrant as another breakpoint can
+ * hit within breakpoint handler, especically in kprobes.
+ * Use reader/writer locks instead of plain spinlock.
+ */
+static LIST_HEAD(break_hook);
+DEFINE_RWLOCK(break_hook_lock);
+
+void register_break_hook(struct break_hook *hook)
+{
+ write_lock(&break_hook_lock);
+ list_add(&hook->node, &break_hook);
+ write_unlock(&break_hook_lock);
+}
+
+void unregister_break_hook(struct break_hook *hook)
+{
+ write_lock(&break_hook_lock);
+ list_del(&hook->node);
+ write_unlock(&break_hook_lock);
+}
+
+static int call_break_hook(struct pt_regs *regs, unsigned int esr)
+{
+ struct break_hook *hook;
+ int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL;
+
+ read_lock(&break_hook_lock);
+ list_for_each_entry(hook, &break_hook, node)
+ if ((esr & hook->esr_mask) == hook->esr_val)
+ fn = hook->fn;
+ read_unlock(&break_hook_lock);
+
+ return fn ? fn(regs, esr) : DBG_HOOK_ERROR;
+}
+
static int brk_handler(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
siginfo_t info;
+ if (call_break_hook(regs, esr) == DBG_HOOK_HANDLED)
+ return 0;
+
+ pr_warn("unexpected brk exception at %lx, esr=0x%x\n",
+ (long)instruction_pointer(regs), esr);
+
if (!user_mode(regs))
return -EFAULT;
@@ -287,7 +372,8 @@ static int brk_handler(unsigned long addr, unsigned int esr,
int aarch32_break_handler(struct pt_regs *regs)
{
siginfo_t info;
- unsigned int instr;
+ u32 arm_instr;
+ u16 thumb_instr;
bool bp = false;
void __user *pc = (void __user *)instruction_pointer(regs);
@@ -296,18 +382,21 @@ int aarch32_break_handler(struct pt_regs *regs)
if (compat_thumb_mode(regs)) {
/* get 16-bit Thumb instruction */
- get_user(instr, (u16 __user *)pc);
- if (instr == AARCH32_BREAK_THUMB2_LO) {
+ get_user(thumb_instr, (u16 __user *)pc);
+ thumb_instr = le16_to_cpu(thumb_instr);
+ if (thumb_instr == AARCH32_BREAK_THUMB2_LO) {
/* get second half of 32-bit Thumb-2 instruction */
- get_user(instr, (u16 __user *)(pc + 2));
- bp = instr == AARCH32_BREAK_THUMB2_HI;
+ get_user(thumb_instr, (u16 __user *)(pc + 2));
+ thumb_instr = le16_to_cpu(thumb_instr);
+ bp = thumb_instr == AARCH32_BREAK_THUMB2_HI;
} else {
- bp = instr == AARCH32_BREAK_THUMB;
+ bp = thumb_instr == AARCH32_BREAK_THUMB;
}
} else {
/* 32-bit ARM instruction */
- get_user(instr, (u32 __user *)pc);
- bp = (instr & ~0xf0000000) == AARCH32_BREAK_ARM;
+ get_user(arm_instr, (u32 __user *)pc);
+ arm_instr = le32_to_cpu(arm_instr);
+ bp = (arm_instr & ~0xf0000000) == AARCH32_BREAK_ARM;
}
if (!bp)
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 58f0b119d00c..f4805612757b 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -289,6 +289,8 @@ el1_dbg:
/*
* Debug exception handling
*/
+ cmp x24, #ESR_EL1_EC_BRK64 // if BRK64
+ cinc x24, x24, eq // set bit '0'
tbz x24, #0, el1_inv // EL1 only
mrs x0, far_el1
mov x2, sp // struct pt_regs
@@ -312,14 +314,14 @@ el1_irq:
#endif
#ifdef CONFIG_PREEMPT
get_thread_info tsk
- ldr x24, [tsk, #TI_PREEMPT] // get preempt count
- add x0, x24, #1 // increment it
- str x0, [tsk, #TI_PREEMPT]
+ ldr w24, [tsk, #TI_PREEMPT] // get preempt count
+ add w0, w24, #1 // increment it
+ str w0, [tsk, #TI_PREEMPT]
#endif
irq_handler
#ifdef CONFIG_PREEMPT
- str x24, [tsk, #TI_PREEMPT] // restore preempt count
- cbnz x24, 1f // preempt count != 0
+ str w24, [tsk, #TI_PREEMPT] // restore preempt count
+ cbnz w24, 1f // preempt count != 0
ldr x0, [tsk, #TI_FLAGS] // get flags
tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
bl el1_preempt
@@ -492,6 +494,8 @@ el0_undef:
* Undefined instruction
*/
mov x0, sp
+ // enable interrupts before calling the main handler
+ enable_irq
b do_undefinstr
el0_dbg:
/*
@@ -522,15 +526,15 @@ el0_irq_naked:
#endif
get_thread_info tsk
#ifdef CONFIG_PREEMPT
- ldr x24, [tsk, #TI_PREEMPT] // get preempt count
- add x23, x24, #1 // increment it
- str x23, [tsk, #TI_PREEMPT]
+ ldr w24, [tsk, #TI_PREEMPT] // get preempt count
+ add w23, w24, #1 // increment it
+ str w23, [tsk, #TI_PREEMPT]
#endif
irq_handler
#ifdef CONFIG_PREEMPT
- ldr x0, [tsk, #TI_PREEMPT]
- str x24, [tsk, #TI_PREEMPT]
- cmp x0, x23
+ ldr w0, [tsk, #TI_PREEMPT]
+ str w24, [tsk, #TI_PREEMPT]
+ cmp w0, w23
b.eq 1f
mov x1, #0
str x1, [x1] // BUG
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 2fa308e4a1fa..4aef42a04bdc 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -17,10 +17,12 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/cpu_pm.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/signal.h>
+#include <linux/hardirq.h>
#include <asm/fpsimd.h>
#include <asm/cputype.h>
@@ -85,6 +87,66 @@ void fpsimd_flush_thread(void)
preempt_enable();
}
+#ifdef CONFIG_KERNEL_MODE_NEON
+
+/*
+ * Kernel-side NEON support functions
+ */
+void kernel_neon_begin(void)
+{
+ /* Avoid using the NEON in interrupt context */
+ BUG_ON(in_interrupt());
+ preempt_disable();
+
+ if (current->mm)
+ fpsimd_save_state(&current->thread.fpsimd_state);
+}
+EXPORT_SYMBOL(kernel_neon_begin);
+
+void kernel_neon_end(void)
+{
+ if (current->mm)
+ fpsimd_load_state(&current->thread.fpsimd_state);
+
+ preempt_enable();
+}
+EXPORT_SYMBOL(kernel_neon_end);
+
+#endif /* CONFIG_KERNEL_MODE_NEON */
+
+#ifdef CONFIG_CPU_PM
+static int fpsimd_cpu_pm_notifier(struct notifier_block *self,
+ unsigned long cmd, void *v)
+{
+ switch (cmd) {
+ case CPU_PM_ENTER:
+ if (current->mm)
+ fpsimd_save_state(&current->thread.fpsimd_state);
+ break;
+ case CPU_PM_EXIT:
+ if (current->mm)
+ fpsimd_load_state(&current->thread.fpsimd_state);
+ break;
+ case CPU_PM_ENTER_FAILED:
+ default:
+ return NOTIFY_DONE;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block fpsimd_cpu_pm_notifier_block = {
+ .notifier_call = fpsimd_cpu_pm_notifier,
+};
+
+static void fpsimd_pm_init(void)
+{
+ cpu_pm_register_notifier(&fpsimd_cpu_pm_notifier_block);
+}
+
+#else
+static inline void fpsimd_pm_init(void) { }
+#endif /* CONFIG_CPU_PM */
+
/*
* FP/SIMD support code initialisation.
*/
@@ -103,6 +165,8 @@ static int __init fpsimd_init(void)
else
elf_hwcap |= HWCAP_ASIMD;
+ fpsimd_pm_init();
+
return 0;
}
late_initcall(fpsimd_init);
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 53dcae49e729..a8073ff9a677 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -112,11 +112,20 @@
.quad TEXT_OFFSET // Image load offset from start of RAM
.quad 0 // reserved
.quad 0 // reserved
+ .quad 0 // reserved
+ .quad 0 // reserved
+ .quad 0 // reserved
+ .byte 0x41 // Magic number, "ARM\x64"
+ .byte 0x52
+ .byte 0x4d
+ .byte 0x64
+ .word 0 // reserved
ENTRY(stext)
mov x21, x0 // x21=FDT
+ bl el2_setup // Drop to EL1, w20=cpu_boot_mode
bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET
- bl el2_setup // Drop to EL1
+ bl set_cpu_boot_mode_flag
mrs x22, midr_el1 // x22=cpuid
mov x0, x22
bl lookup_processor_type
@@ -142,21 +151,30 @@ ENDPROC(stext)
/*
* If we're fortunate enough to boot at EL2, ensure that the world is
* sane before dropping to EL1.
+ *
+ * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in x20 if
+ * booted in EL1 or EL2 respectively.
*/
ENTRY(el2_setup)
mrs x0, CurrentEL
cmp x0, #PSR_MODE_EL2t
ccmp x0, #PSR_MODE_EL2h, #0x4, ne
- ldr x0, =__boot_cpu_mode // Compute __boot_cpu_mode
- add x0, x0, x28
- b.eq 1f
- str wzr, [x0] // Remember we don't have EL2...
+ b.ne 1f
+ mrs x0, sctlr_el2
+CPU_BE( orr x0, x0, #(1 << 25) ) // Set the EE bit for EL2
+CPU_LE( bic x0, x0, #(1 << 25) ) // Clear the EE bit for EL2
+ msr sctlr_el2, x0
+ b 2f
+1: mrs x0, sctlr_el1
+CPU_BE( orr x0, x0, #(3 << 24) ) // Set the EE and E0E bits for EL1
+CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1
+ msr sctlr_el1, x0
+ mov w20, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1
+ isb
ret
/* Hyp configuration. */
-1: ldr w1, =BOOT_CPU_MODE_EL2
- str w1, [x0, #4] // This CPU has EL2
- mov x0, #(1 << 31) // 64-bit EL1
+2: mov x0, #(1 << 31) // 64-bit EL1
msr hcr_el2, x0
/* Generic timers. */
@@ -173,7 +191,8 @@ ENTRY(el2_setup)
/* sctlr_el1 */
mov x0, #0x0800 // Set/clear RES{1,0} bits
- movk x0, #0x30d0, lsl #16
+CPU_BE( movk x0, #0x33d0, lsl #16 ) // Set EE and E0E on BE systems
+CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
msr sctlr_el1, x0
/* Coprocessor traps. */
@@ -196,10 +215,25 @@ ENTRY(el2_setup)
PSR_MODE_EL1h)
msr spsr_el2, x0
msr elr_el2, lr
+ mov w20, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2
eret
ENDPROC(el2_setup)
/*
+ * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed
+ * in x20. See arch/arm64/include/asm/virt.h for more info.
+ */
+ENTRY(set_cpu_boot_mode_flag)
+ ldr x1, =__boot_cpu_mode // Compute __boot_cpu_mode
+ add x1, x1, x28
+ cmp w20, #BOOT_CPU_MODE_EL2
+ b.ne 1f
+ add x1, x1, #4
+1: str w20, [x1] // This CPU has booted in EL1
+ ret
+ENDPROC(set_cpu_boot_mode_flag)
+
+/*
* We need to find out the CPU boot mode long after boot, so we need to
* store it in a writable variable.
*
@@ -227,8 +261,9 @@ ENTRY(__boot_cpu_mode)
* cores are held until we're ready for them to initialise.
*/
ENTRY(secondary_holding_pen)
- bl __calc_phys_offset // x24=phys offset
- bl el2_setup // Drop to EL1
+ bl el2_setup // Drop to EL1, w20=cpu_boot_mode
+ bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET
+ bl set_cpu_boot_mode_flag
mrs x0, mpidr_el1
ldr x1, =MPIDR_HWID_BITMASK
and x0, x0, x1
@@ -242,7 +277,17 @@ pen: ldr x4, [x3]
wfe
b pen
ENDPROC(secondary_holding_pen)
- .popsection
+
+ /*
+ * Secondary entry point that jumps straight into the kernel. Only to
+ * be used where CPUs are brought online dynamically by the kernel.
+ */
+ENTRY(secondary_entry)
+ bl el2_setup // Drop to EL1
+ bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET
+ bl set_cpu_boot_mode_flag
+ b secondary_startup
+ENDPROC(secondary_entry)
ENTRY(secondary_startup)
/*
@@ -438,8 +483,6 @@ ENDPROC(__create_page_tables)
.type __switch_data, %object
__switch_data:
.quad __mmap_switched
- .quad __data_loc // x4
- .quad _data // x5
.quad __bss_start // x6
.quad _end // x7
.quad processor_id // x4
@@ -454,15 +497,7 @@ __switch_data:
__mmap_switched:
adr x3, __switch_data + 8
- ldp x4, x5, [x3], #16
ldp x6, x7, [x3], #16
- cmp x4, x5 // Copy data segment if needed
-1: ccmp x5, x6, #4, ne
- b.eq 2f
- ldr x16, [x4], #8
- str x16, [x5], #8
- b 1b
-2:
1: cmp x6, x7
b.hs 2f
str xzr, [x6], #8 // Clear BSS
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 59afa21feca0..a3c8176c8782 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -238,14 +238,14 @@ static int hw_breakpoint_control(struct perf_event *bp,
/* Breakpoint */
ctrl_reg = AARCH64_DBG_REG_BCR;
val_reg = AARCH64_DBG_REG_BVR;
- slots = __get_cpu_var(bp_on_reg);
+ slots = this_cpu_ptr(bp_on_reg);
max_slots = core_num_brps;
reg_enable = !debug_info->bps_disabled;
} else {
/* Watchpoint */
ctrl_reg = AARCH64_DBG_REG_WCR;
val_reg = AARCH64_DBG_REG_WVR;
- slots = __get_cpu_var(wp_on_reg);
+ slots = this_cpu_ptr(wp_on_reg);
max_slots = core_num_wrps;
reg_enable = !debug_info->wps_disabled;
}
@@ -292,9 +292,38 @@ static int hw_breakpoint_control(struct perf_event *bp,
*/
int arch_install_hw_breakpoint(struct perf_event *bp)
{
- return hw_breakpoint_control(bp, HW_BREAKPOINT_INSTALL);
-}
+ struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+ struct perf_event **slot, **slots;
+ int i, max_slots, base;
+
+ if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
+ /* Breakpoint */
+ base = AARCH64_DBG_REG_BCR;
+ slots = this_cpu_ptr(bp_on_reg);
+ max_slots = core_num_brps;
+ } else {
+ /* Watchpoint */
+ base = AARCH64_DBG_REG_WCR;
+ slots = this_cpu_ptr(wp_on_reg);
+ max_slots = core_num_wrps;
+ }
+
+ /* Remove the breakpoint. */
+ for (i = 0; i < max_slots; ++i) {
+ slot = &slots[i];
+ if (*slot == bp) {
+ *slot = NULL;
+ break;
+ }
+ }
+
+ if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot"))
+ return;
+
+ /* Reset the control register. */
+ write_wb_reg(base, i, 0);
+}
void arch_uninstall_hw_breakpoint(struct perf_event *bp)
{
hw_breakpoint_control(bp, HW_BREAKPOINT_UNINSTALL);
@@ -546,11 +575,11 @@ static void toggle_bp_registers(int reg, enum debug_el el, int enable)
switch (reg) {
case AARCH64_DBG_REG_BCR:
- slots = __get_cpu_var(bp_on_reg);
+ slots = this_cpu_ptr(bp_on_reg);
max_slots = core_num_brps;
break;
case AARCH64_DBG_REG_WCR:
- slots = __get_cpu_var(wp_on_reg);
+ slots = this_cpu_ptr(wp_on_reg);
max_slots = core_num_wrps;
break;
default:
@@ -587,7 +616,7 @@ static int breakpoint_handler(unsigned long unused, unsigned int esr,
struct debug_info *debug_info;
struct arch_hw_breakpoint_ctrl ctrl;
- slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
+ slots = this_cpu_ptr(bp_on_reg);
addr = instruction_pointer(regs);
debug_info = &current->thread.debug;
@@ -637,7 +666,7 @@ unlock:
user_enable_single_step(current);
} else {
toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 0);
- kernel_step = &__get_cpu_var(stepping_kernel_bp);
+ kernel_step = this_cpu_ptr(&stepping_kernel_bp);
if (*kernel_step != ARM_KERNEL_STEP_NONE)
return 0;
@@ -664,7 +693,7 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr,
struct arch_hw_breakpoint *info;
struct arch_hw_breakpoint_ctrl ctrl;
- slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
+ slots = this_cpu_ptr(wp_on_reg);
debug_info = &current->thread.debug;
for (i = 0; i < core_num_wrps; ++i) {
@@ -739,7 +768,7 @@ unlock:
user_enable_single_step(current);
} else {
toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 0);
- kernel_step = &__get_cpu_var(stepping_kernel_bp);
+ kernel_step = this_cpu_ptr(&stepping_kernel_bp);
if (*kernel_step != ARM_KERNEL_STEP_NONE)
return 0;
@@ -763,7 +792,7 @@ int reinstall_suspended_bps(struct pt_regs *regs)
struct debug_info *debug_info = &current->thread.debug;
int handled_exception = 0, *kernel_step;
- kernel_step = &__get_cpu_var(stepping_kernel_bp);
+ kernel_step = this_cpu_ptr(&stepping_kernel_bp);
/*
* Called from single-step exception handler.
@@ -927,6 +956,14 @@ static inline void hw_breakpoint_pm_init(void)
}
#endif
+#ifdef CONFIG_ARM64_CPU_SUSPEND
+extern void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *));
+#else
+static inline void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
+{
+}
+#endif
+
/*
* One-time initialisation.
*/
@@ -953,7 +990,8 @@ static int __init arch_hw_breakpoint_init(void)
/* Register hotplug notifier. */
register_cpu_notifier(&hw_breakpoint_reset_nb);
- hw_breakpoint_pm_init();
+ /* Register cpu_suspend hw breakpoint restore hook */
+ cpu_suspend_set_dbg_restorer(hw_breakpoint_restore);
return 0;
}
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
new file mode 100644
index 000000000000..92f36835486b
--- /dev/null
+++ b/arch/arm64/kernel/insn.c
@@ -0,0 +1,304 @@
+/*
+ * Copyright (C) 2013 Huawei Ltd.
+ * Author: Jiang Liu <liuj97@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/smp.h>
+#include <linux/stop_machine.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+#include <asm/insn.h>
+
+static int aarch64_insn_encoding_class[] = {
+ AARCH64_INSN_CLS_UNKNOWN,
+ AARCH64_INSN_CLS_UNKNOWN,
+ AARCH64_INSN_CLS_UNKNOWN,
+ AARCH64_INSN_CLS_UNKNOWN,
+ AARCH64_INSN_CLS_LDST,
+ AARCH64_INSN_CLS_DP_REG,
+ AARCH64_INSN_CLS_LDST,
+ AARCH64_INSN_CLS_DP_FPSIMD,
+ AARCH64_INSN_CLS_DP_IMM,
+ AARCH64_INSN_CLS_DP_IMM,
+ AARCH64_INSN_CLS_BR_SYS,
+ AARCH64_INSN_CLS_BR_SYS,
+ AARCH64_INSN_CLS_LDST,
+ AARCH64_INSN_CLS_DP_REG,
+ AARCH64_INSN_CLS_LDST,
+ AARCH64_INSN_CLS_DP_FPSIMD,
+};
+
+enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
+{
+ return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
+}
+
+/* NOP is an alias of HINT */
+bool __kprobes aarch64_insn_is_nop(u32 insn)
+{
+ if (!aarch64_insn_is_hint(insn))
+ return false;
+
+ switch (insn & 0xFE0) {
+ case AARCH64_INSN_HINT_YIELD:
+ case AARCH64_INSN_HINT_WFE:
+ case AARCH64_INSN_HINT_WFI:
+ case AARCH64_INSN_HINT_SEV:
+ case AARCH64_INSN_HINT_SEVL:
+ return false;
+ default:
+ return true;
+ }
+}
+
+/*
+ * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
+ * little-endian.
+ */
+int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
+{
+ int ret;
+ u32 val;
+
+ ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE);
+ if (!ret)
+ *insnp = le32_to_cpu(val);
+
+ return ret;
+}
+
+int __kprobes aarch64_insn_write(void *addr, u32 insn)
+{
+ insn = cpu_to_le32(insn);
+ return probe_kernel_write(addr, &insn, AARCH64_INSN_SIZE);
+}
+
+static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
+{
+ if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS)
+ return false;
+
+ return aarch64_insn_is_b(insn) ||
+ aarch64_insn_is_bl(insn) ||
+ aarch64_insn_is_svc(insn) ||
+ aarch64_insn_is_hvc(insn) ||
+ aarch64_insn_is_smc(insn) ||
+ aarch64_insn_is_brk(insn) ||
+ aarch64_insn_is_nop(insn);
+}
+
+/*
+ * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
+ * Section B2.6.5 "Concurrent modification and execution of instructions":
+ * Concurrent modification and execution of instructions can lead to the
+ * resulting instruction performing any behavior that can be achieved by
+ * executing any sequence of instructions that can be executed from the
+ * same Exception level, except where the instruction before modification
+ * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC,
+ * or SMC instruction.
+ */
+bool __kprobes aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn)
+{
+ return __aarch64_insn_hotpatch_safe(old_insn) &&
+ __aarch64_insn_hotpatch_safe(new_insn);
+}
+
+int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
+{
+ u32 *tp = addr;
+ int ret;
+
+ /* A64 instructions must be word aligned */
+ if ((uintptr_t)tp & 0x3)
+ return -EINVAL;
+
+ ret = aarch64_insn_write(tp, insn);
+ if (ret == 0)
+ flush_icache_range((uintptr_t)tp,
+ (uintptr_t)tp + AARCH64_INSN_SIZE);
+
+ return ret;
+}
+
+struct aarch64_insn_patch {
+ void **text_addrs;
+ u32 *new_insns;
+ int insn_cnt;
+ atomic_t cpu_count;
+};
+
+static int __kprobes aarch64_insn_patch_text_cb(void *arg)
+{
+ int i, ret = 0;
+ struct aarch64_insn_patch *pp = arg;
+
+ /* The first CPU becomes master */
+ if (atomic_inc_return(&pp->cpu_count) == 1) {
+ for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
+ ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
+ pp->new_insns[i]);
+ /*
+ * aarch64_insn_patch_text_nosync() calls flush_icache_range(),
+ * which ends with "dsb; isb" pair guaranteeing global
+ * visibility.
+ */
+ atomic_set(&pp->cpu_count, -1);
+ } else {
+ while (atomic_read(&pp->cpu_count) != -1)
+ cpu_relax();
+ isb();
+ }
+
+ return ret;
+}
+
+int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
+{
+ struct aarch64_insn_patch patch = {
+ .text_addrs = addrs,
+ .new_insns = insns,
+ .insn_cnt = cnt,
+ .cpu_count = ATOMIC_INIT(0),
+ };
+
+ if (cnt <= 0)
+ return -EINVAL;
+
+ return stop_machine(aarch64_insn_patch_text_cb, &patch,
+ cpu_online_mask);
+}
+
+int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
+{
+ int ret;
+ u32 insn;
+
+ /* Unsafe to patch multiple instructions without synchronizaiton */
+ if (cnt == 1) {
+ ret = aarch64_insn_read(addrs[0], &insn);
+ if (ret)
+ return ret;
+
+ if (aarch64_insn_hotpatch_safe(insn, insns[0])) {
+ /*
+ * ARMv8 architecture doesn't guarantee all CPUs see
+ * the new instruction after returning from function
+ * aarch64_insn_patch_text_nosync(). So send IPIs to
+ * all other CPUs to achieve instruction
+ * synchronization.
+ */
+ ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]);
+ kick_all_cpus_sync();
+ return ret;
+ }
+ }
+
+ return aarch64_insn_patch_text_sync(addrs, insns, cnt);
+}
+
+u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
+ u32 insn, u64 imm)
+{
+ u32 immlo, immhi, lomask, himask, mask;
+ int shift;
+
+ switch (type) {
+ case AARCH64_INSN_IMM_ADR:
+ lomask = 0x3;
+ himask = 0x7ffff;
+ immlo = imm & lomask;
+ imm >>= 2;
+ immhi = imm & himask;
+ imm = (immlo << 24) | (immhi);
+ mask = (lomask << 24) | (himask);
+ shift = 5;
+ break;
+ case AARCH64_INSN_IMM_26:
+ mask = BIT(26) - 1;
+ shift = 0;
+ break;
+ case AARCH64_INSN_IMM_19:
+ mask = BIT(19) - 1;
+ shift = 5;
+ break;
+ case AARCH64_INSN_IMM_16:
+ mask = BIT(16) - 1;
+ shift = 5;
+ break;
+ case AARCH64_INSN_IMM_14:
+ mask = BIT(14) - 1;
+ shift = 5;
+ break;
+ case AARCH64_INSN_IMM_12:
+ mask = BIT(12) - 1;
+ shift = 10;
+ break;
+ case AARCH64_INSN_IMM_9:
+ mask = BIT(9) - 1;
+ shift = 12;
+ break;
+ default:
+ pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
+ type);
+ return 0;
+ }
+
+ /* Update the immediate field. */
+ insn &= ~(mask << shift);
+ insn |= (imm & mask) << shift;
+
+ return insn;
+}
+
+u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
+ enum aarch64_insn_branch_type type)
+{
+ u32 insn;
+ long offset;
+
+ /*
+ * PC: A 64-bit Program Counter holding the address of the current
+ * instruction. A64 instructions must be word-aligned.
+ */
+ BUG_ON((pc & 0x3) || (addr & 0x3));
+
+ /*
+ * B/BL support [-128M, 128M) offset
+ * ARM64 virtual address arrangement guarantees all kernel and module
+ * texts are within +/-128M.
+ */
+ offset = ((long)addr - (long)pc);
+ BUG_ON(offset < -SZ_128M || offset >= SZ_128M);
+
+ if (type == AARCH64_INSN_BRANCH_LINK)
+ insn = aarch64_insn_get_bl_value();
+ else
+ insn = aarch64_insn_get_b_value();
+
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
+ offset >> 2);
+}
+
+u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
+{
+ return aarch64_insn_get_hint_value() | op;
+}
+
+u32 __kprobes aarch64_insn_gen_nop(void)
+{
+ return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
+}
diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c
new file mode 100644
index 000000000000..263a166291fb
--- /dev/null
+++ b/arch/arm64/kernel/jump_label.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2013 Huawei Ltd.
+ * Author: Jiang Liu <liuj97@gmail.com>
+ *
+ * Based on arch/arm/kernel/jump_label.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/kernel.h>
+#include <linux/jump_label.h>
+#include <asm/insn.h>
+
+#ifdef HAVE_JUMP_LABEL
+
+static void __arch_jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type,
+ bool is_static)
+{
+ void *addr = (void *)entry->code;
+ u32 insn;
+
+ if (type == JUMP_LABEL_ENABLE) {
+ insn = aarch64_insn_gen_branch_imm(entry->code,
+ entry->target,
+ AARCH64_INSN_BRANCH_NOLINK);
+ } else {
+ insn = aarch64_insn_gen_nop();
+ }
+
+ if (is_static)
+ aarch64_insn_patch_text_nosync(addr, insn);
+ else
+ aarch64_insn_patch_text(&addr, &insn, 1);
+}
+
+void arch_jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type)
+{
+ __arch_jump_label_transform(entry, type, false);
+}
+
+void arch_jump_label_transform_static(struct jump_entry *entry,
+ enum jump_label_type type)
+{
+ __arch_jump_label_transform(entry, type, true);
+}
+
+#endif /* HAVE_JUMP_LABEL */
diff --git a/arch/arm64/kernel/kuser32.S b/arch/arm64/kernel/kuser32.S
index 8b69ecb1d8bc..7787208e8cc6 100644
--- a/arch/arm64/kernel/kuser32.S
+++ b/arch/arm64/kernel/kuser32.S
@@ -27,6 +27,9 @@
*
* See Documentation/arm/kernel_user_helpers.txt for formal definitions.
*/
+
+#include <asm/unistd32.h>
+
.align 5
.globl __kuser_helper_start
__kuser_helper_start:
@@ -35,33 +38,32 @@ __kuser_cmpxchg64: // 0xffff0f60
.inst 0xe92d00f0 // push {r4, r5, r6, r7}
.inst 0xe1c040d0 // ldrd r4, r5, [r0]
.inst 0xe1c160d0 // ldrd r6, r7, [r1]
- .inst 0xf57ff05f // dmb sy
.inst 0xe1b20f9f // 1: ldrexd r0, r1, [r2]
.inst 0xe0303004 // eors r3, r0, r4
.inst 0x00313005 // eoreqs r3, r1, r5
- .inst 0x01a23f96 // strexdeq r3, r6, [r2]
+ .inst 0x01a23e96 // stlexdeq r3, r6, [r2]
.inst 0x03330001 // teqeq r3, #1
.inst 0x0afffff9 // beq 1b
- .inst 0xf57ff05f // dmb sy
+ .inst 0xf57ff05b // dmb ish
.inst 0xe2730000 // rsbs r0, r3, #0
.inst 0xe8bd00f0 // pop {r4, r5, r6, r7}
.inst 0xe12fff1e // bx lr
.align 5
__kuser_memory_barrier: // 0xffff0fa0
- .inst 0xf57ff05f // dmb sy
+ .inst 0xf57ff05b // dmb ish
.inst 0xe12fff1e // bx lr
.align 5
__kuser_cmpxchg: // 0xffff0fc0
- .inst 0xf57ff05f // dmb sy
.inst 0xe1923f9f // 1: ldrex r3, [r2]
.inst 0xe0533000 // subs r3, r3, r0
- .inst 0x01823f91 // strexeq r3, r1, [r2]
+ .inst 0x01823e91 // stlexeq r3, r1, [r2]
.inst 0x03330001 // teqeq r3, #1
.inst 0x0afffffa // beq 1b
+ .inst 0xf57ff05b // dmb ish
.inst 0xe2730000 // rsbs r0, r3, #0
- .inst 0xeaffffef // b <__kuser_memory_barrier>
+ .inst 0xe12fff1e // bx lr
.align 5
__kuser_get_tls: // 0xffff0fe0
@@ -75,3 +77,42 @@ __kuser_helper_version: // 0xffff0ffc
.word ((__kuser_helper_end - __kuser_helper_start) >> 5)
.globl __kuser_helper_end
__kuser_helper_end:
+
+/*
+ * AArch32 sigreturn code
+ *
+ * For ARM syscalls, the syscall number has to be loaded into r7.
+ * We do not support an OABI userspace.
+ *
+ * For Thumb syscalls, we also pass the syscall number via r7. We therefore
+ * need two 16-bit instructions.
+ */
+ .globl __aarch32_sigret_code_start
+__aarch32_sigret_code_start:
+
+ /*
+ * ARM Code
+ */
+ .byte __NR_compat_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_sigreturn
+ .byte __NR_compat_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_sigreturn
+
+ /*
+ * Thumb code
+ */
+ .byte __NR_compat_sigreturn, 0x27 // svc #__NR_compat_sigreturn
+ .byte __NR_compat_sigreturn, 0xdf // mov r7, #__NR_compat_sigreturn
+
+ /*
+ * ARM code
+ */
+ .byte __NR_compat_rt_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_rt_sigreturn
+ .byte __NR_compat_rt_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_rt_sigreturn
+
+ /*
+ * Thumb code
+ */
+ .byte __NR_compat_rt_sigreturn, 0x27 // svc #__NR_compat_rt_sigreturn
+ .byte __NR_compat_rt_sigreturn, 0xdf // mov r7, #__NR_compat_rt_sigreturn
+
+ .globl __aarch32_sigret_code_end
+__aarch32_sigret_code_end:
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index ca0e3d55da99..1eb1cc955139 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -25,11 +25,15 @@
#include <linux/mm.h>
#include <linux/moduleloader.h>
#include <linux/vmalloc.h>
+#include <asm/insn.h>
+
+#define AARCH64_INSN_IMM_MOVNZ AARCH64_INSN_IMM_MAX
+#define AARCH64_INSN_IMM_MOVK AARCH64_INSN_IMM_16
void *module_alloc(unsigned long size)
{
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
- GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
+ GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
__builtin_return_address(0));
}
@@ -94,25 +98,18 @@ static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
return 0;
}
-enum aarch64_imm_type {
- INSN_IMM_MOVNZ,
- INSN_IMM_MOVK,
- INSN_IMM_ADR,
- INSN_IMM_26,
- INSN_IMM_19,
- INSN_IMM_16,
- INSN_IMM_14,
- INSN_IMM_12,
- INSN_IMM_9,
-};
-
-static u32 encode_insn_immediate(enum aarch64_imm_type type, u32 insn, u64 imm)
+static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
+ int lsb, enum aarch64_insn_imm_type imm_type)
{
- u32 immlo, immhi, lomask, himask, mask;
- int shift;
+ u64 imm, limit = 0;
+ s64 sval;
+ u32 insn = le32_to_cpu(*(u32 *)place);
+
+ sval = do_reloc(op, place, val);
+ sval >>= lsb;
+ imm = sval & 0xffff;
- switch (type) {
- case INSN_IMM_MOVNZ:
+ if (imm_type == AARCH64_INSN_IMM_MOVNZ) {
/*
* For signed MOVW relocations, we have to manipulate the
* instruction encoding depending on whether or not the
@@ -131,70 +128,12 @@ static u32 encode_insn_immediate(enum aarch64_imm_type type, u32 insn, u64 imm)
*/
imm = ~imm;
}
- case INSN_IMM_MOVK:
- mask = BIT(16) - 1;
- shift = 5;
- break;
- case INSN_IMM_ADR:
- lomask = 0x3;
- himask = 0x7ffff;
- immlo = imm & lomask;
- imm >>= 2;
- immhi = imm & himask;
- imm = (immlo << 24) | (immhi);
- mask = (lomask << 24) | (himask);
- shift = 5;
- break;
- case INSN_IMM_26:
- mask = BIT(26) - 1;
- shift = 0;
- break;
- case INSN_IMM_19:
- mask = BIT(19) - 1;
- shift = 5;
- break;
- case INSN_IMM_16:
- mask = BIT(16) - 1;
- shift = 5;
- break;
- case INSN_IMM_14:
- mask = BIT(14) - 1;
- shift = 5;
- break;
- case INSN_IMM_12:
- mask = BIT(12) - 1;
- shift = 10;
- break;
- case INSN_IMM_9:
- mask = BIT(9) - 1;
- shift = 12;
- break;
- default:
- pr_err("encode_insn_immediate: unknown immediate encoding %d\n",
- type);
- return 0;
+ imm_type = AARCH64_INSN_IMM_MOVK;
}
- /* Update the immediate field. */
- insn &= ~(mask << shift);
- insn |= (imm & mask) << shift;
-
- return insn;
-}
-
-static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
- int lsb, enum aarch64_imm_type imm_type)
-{
- u64 imm, limit = 0;
- s64 sval;
- u32 insn = *(u32 *)place;
-
- sval = do_reloc(op, place, val);
- sval >>= lsb;
- imm = sval & 0xffff;
-
/* Update the instruction with the new encoding. */
- *(u32 *)place = encode_insn_immediate(imm_type, insn, imm);
+ insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
+ *(u32 *)place = cpu_to_le32(insn);
/* Shift out the immediate field. */
sval >>= 16;
@@ -203,9 +142,9 @@ static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
* For unsigned immediates, the overflow check is straightforward.
* For signed immediates, the sign bit is actually the bit past the
* most significant bit of the field.
- * The INSN_IMM_16 immediate type is unsigned.
+ * The AARCH64_INSN_IMM_16 immediate type is unsigned.
*/
- if (imm_type != INSN_IMM_16) {
+ if (imm_type != AARCH64_INSN_IMM_16) {
sval++;
limit++;
}
@@ -218,11 +157,11 @@ static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
}
static int reloc_insn_imm(enum aarch64_reloc_op op, void *place, u64 val,
- int lsb, int len, enum aarch64_imm_type imm_type)
+ int lsb, int len, enum aarch64_insn_imm_type imm_type)
{
u64 imm, imm_mask;
s64 sval;
- u32 insn = *(u32 *)place;
+ u32 insn = le32_to_cpu(*(u32 *)place);
/* Calculate the relocation value. */
sval = do_reloc(op, place, val);
@@ -233,7 +172,8 @@ static int reloc_insn_imm(enum aarch64_reloc_op op, void *place, u64 val,
imm = sval & imm_mask;
/* Update the instruction's immediate field. */
- *(u32 *)place = encode_insn_immediate(imm_type, insn, imm);
+ insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
+ *(u32 *)place = cpu_to_le32(insn);
/*
* Extract the upper value bits (including the sign bit) and
@@ -315,125 +255,125 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
overflow_check = false;
case R_AARCH64_MOVW_UABS_G0:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
- INSN_IMM_16);
+ AARCH64_INSN_IMM_16);
break;
case R_AARCH64_MOVW_UABS_G1_NC:
overflow_check = false;
case R_AARCH64_MOVW_UABS_G1:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
- INSN_IMM_16);
+ AARCH64_INSN_IMM_16);
break;
case R_AARCH64_MOVW_UABS_G2_NC:
overflow_check = false;
case R_AARCH64_MOVW_UABS_G2:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
- INSN_IMM_16);
+ AARCH64_INSN_IMM_16);
break;
case R_AARCH64_MOVW_UABS_G3:
/* We're using the top bits so we can't overflow. */
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
- INSN_IMM_16);
+ AARCH64_INSN_IMM_16);
break;
case R_AARCH64_MOVW_SABS_G0:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
- INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ);
break;
case R_AARCH64_MOVW_SABS_G1:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
- INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ);
break;
case R_AARCH64_MOVW_SABS_G2:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
- INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ);
break;
case R_AARCH64_MOVW_PREL_G0_NC:
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
- INSN_IMM_MOVK);
+ AARCH64_INSN_IMM_MOVK);
break;
case R_AARCH64_MOVW_PREL_G0:
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
- INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ);
break;
case R_AARCH64_MOVW_PREL_G1_NC:
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
- INSN_IMM_MOVK);
+ AARCH64_INSN_IMM_MOVK);
break;
case R_AARCH64_MOVW_PREL_G1:
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
- INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ);
break;
case R_AARCH64_MOVW_PREL_G2_NC:
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
- INSN_IMM_MOVK);
+ AARCH64_INSN_IMM_MOVK);
break;
case R_AARCH64_MOVW_PREL_G2:
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
- INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ);
break;
case R_AARCH64_MOVW_PREL_G3:
/* We're using the top bits so we can't overflow. */
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
- INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ);
break;
/* Immediate instruction relocations. */
case R_AARCH64_LD_PREL_LO19:
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
- INSN_IMM_19);
+ AARCH64_INSN_IMM_19);
break;
case R_AARCH64_ADR_PREL_LO21:
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
- INSN_IMM_ADR);
+ AARCH64_INSN_IMM_ADR);
break;
case R_AARCH64_ADR_PREL_PG_HI21_NC:
overflow_check = false;
case R_AARCH64_ADR_PREL_PG_HI21:
ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21,
- INSN_IMM_ADR);
+ AARCH64_INSN_IMM_ADR);
break;
case R_AARCH64_ADD_ABS_LO12_NC:
case R_AARCH64_LDST8_ABS_LO12_NC:
overflow_check = false;
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
- INSN_IMM_12);
+ AARCH64_INSN_IMM_12);
break;
case R_AARCH64_LDST16_ABS_LO12_NC:
overflow_check = false;
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
- INSN_IMM_12);
+ AARCH64_INSN_IMM_12);
break;
case R_AARCH64_LDST32_ABS_LO12_NC:
overflow_check = false;
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
- INSN_IMM_12);
+ AARCH64_INSN_IMM_12);
break;
case R_AARCH64_LDST64_ABS_LO12_NC:
overflow_check = false;
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
- INSN_IMM_12);
+ AARCH64_INSN_IMM_12);
break;
case R_AARCH64_LDST128_ABS_LO12_NC:
overflow_check = false;
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
- INSN_IMM_12);
+ AARCH64_INSN_IMM_12);
break;
case R_AARCH64_TSTBR14:
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
- INSN_IMM_14);
+ AARCH64_INSN_IMM_14);
break;
case R_AARCH64_CONDBR19:
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
- INSN_IMM_19);
+ AARCH64_INSN_IMM_19);
break;
case R_AARCH64_JUMP26:
case R_AARCH64_CALL26:
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
- INSN_IMM_26);
+ AARCH64_INSN_IMM_26);
break;
default:
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 2de37529a918..baf5afb7e6a0 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -22,6 +22,7 @@
#include <linux/bitmap.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/perf_event.h>
@@ -363,26 +364,53 @@ validate_group(struct perf_event *event)
}
static void
+armpmu_disable_percpu_irq(void *data)
+{
+ unsigned int irq = *(unsigned int *)data;
+ disable_percpu_irq(irq);
+}
+
+static void
armpmu_release_hardware(struct arm_pmu *armpmu)
{
- int i, irq, irqs;
+ int irq;
+ unsigned int i, irqs;
struct platform_device *pmu_device = armpmu->plat_device;
irqs = min(pmu_device->num_resources, num_possible_cpus());
+ if (!irqs)
+ return;
- for (i = 0; i < irqs; ++i) {
- if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
- continue;
- irq = platform_get_irq(pmu_device, i);
- if (irq >= 0)
- free_irq(irq, armpmu);
+ irq = platform_get_irq(pmu_device, 0);
+ if (irq <= 0)
+ return;
+
+ if (irq_is_percpu(irq)) {
+ on_each_cpu(armpmu_disable_percpu_irq, &irq, 1);
+ free_percpu_irq(irq, &cpu_hw_events);
+ } else {
+ for (i = 0; i < irqs; ++i) {
+ if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
+ continue;
+ irq = platform_get_irq(pmu_device, i);
+ if (irq > 0)
+ free_irq(irq, armpmu);
+ }
}
}
+static void
+armpmu_enable_percpu_irq(void *data)
+{
+ unsigned int irq = *(unsigned int *)data;
+ enable_percpu_irq(irq, IRQ_TYPE_NONE);
+}
+
static int
armpmu_reserve_hardware(struct arm_pmu *armpmu)
{
- int i, err, irq, irqs;
+ int err, irq;
+ unsigned int i, irqs;
struct platform_device *pmu_device = armpmu->plat_device;
if (!pmu_device) {
@@ -391,39 +419,59 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
}
irqs = min(pmu_device->num_resources, num_possible_cpus());
- if (irqs < 1) {
+ if (!irqs) {
pr_err("no irqs for PMUs defined\n");
return -ENODEV;
}
- for (i = 0; i < irqs; ++i) {
- err = 0;
- irq = platform_get_irq(pmu_device, i);
- if (irq < 0)
- continue;
+ irq = platform_get_irq(pmu_device, 0);
+ if (irq <= 0) {
+ pr_err("failed to get valid irq for PMU device\n");
+ return -ENODEV;
+ }
- /*
- * If we have a single PMU interrupt that we can't shift,
- * assume that we're running on a uniprocessor machine and
- * continue. Otherwise, continue without this interrupt.
- */
- if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
- pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
- irq, i);
- continue;
- }
+ if (irq_is_percpu(irq)) {
+ err = request_percpu_irq(irq, armpmu->handle_irq,
+ "arm-pmu", &cpu_hw_events);
- err = request_irq(irq, armpmu->handle_irq,
- IRQF_NOBALANCING,
- "arm-pmu", armpmu);
if (err) {
- pr_err("unable to request IRQ%d for ARM PMU counters\n",
- irq);
+ pr_err("unable to request percpu IRQ%d for ARM PMU counters\n",
+ irq);
armpmu_release_hardware(armpmu);
return err;
}
- cpumask_set_cpu(i, &armpmu->active_irqs);
+ on_each_cpu(armpmu_enable_percpu_irq, &irq, 1);
+ } else {
+ for (i = 0; i < irqs; ++i) {
+ err = 0;
+ irq = platform_get_irq(pmu_device, i);
+ if (irq <= 0)
+ continue;
+
+ /*
+ * If we have a single PMU interrupt that we can't shift,
+ * assume that we're running on a uniprocessor machine and
+ * continue. Otherwise, continue without this interrupt.
+ */
+ if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
+ pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
+ irq, i);
+ continue;
+ }
+
+ err = request_irq(irq, armpmu->handle_irq,
+ IRQF_NOBALANCING,
+ "arm-pmu", armpmu);
+ if (err) {
+ pr_err("unable to request IRQ%d for ARM PMU counters\n",
+ irq);
+ armpmu_release_hardware(armpmu);
+ return err;
+ }
+
+ cpumask_set_cpu(i, &armpmu->active_irqs);
+ }
}
return 0;
@@ -784,8 +832,8 @@ static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
/*
* PMXEVTYPER: Event selection reg
*/
-#define ARMV8_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */
-#define ARMV8_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
+#define ARMV8_EVTYPE_MASK 0xc80003ff /* Mask for writable bits */
+#define ARMV8_EVTYPE_EVENT 0x3ff /* Mask for EVENT bits */
/*
* Event filters for PMUv3
@@ -1044,7 +1092,7 @@ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
*/
regs = get_irq_regs();
- cpuc = &__get_cpu_var(cpu_hw_events);
+ cpuc = this_cpu_ptr(&cpu_hw_events);
for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
@@ -1175,7 +1223,8 @@ static void armv8pmu_reset(void *info)
static int armv8_pmuv3_map_event(struct perf_event *event)
{
return map_cpu_event(event, &armv8_pmuv3_perf_map,
- &armv8_pmuv3_perf_cache_map, 0xFF);
+ &armv8_pmuv3_perf_cache_map,
+ ARMV8_EVTYPE_EVENT);
}
static struct arm_pmu armv8pmu = {
@@ -1257,7 +1306,7 @@ device_initcall(register_pmu_driver);
static struct pmu_hw_events *armpmu_get_cpu_events(void)
{
- return &__get_cpu_var(cpu_hw_events);
+ return this_cpu_ptr(&cpu_hw_events);
}
static void __init cpu_pmu_init(struct arm_pmu *armpmu)
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index fbb265350572..3e4d03cb086f 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -314,6 +314,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
unsigned long get_wchan(struct task_struct *p)
{
struct stackframe frame;
+ unsigned long stack_page;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
@@ -321,9 +322,11 @@ unsigned long get_wchan(struct task_struct *p)
frame.fp = thread_saved_fp(p);
frame.sp = thread_saved_sp(p);
frame.pc = thread_saved_pc(p);
+ stack_page = (unsigned long)task_stack_page(p);
do {
- int ret = unwind_frame(&frame);
- if (ret < 0)
+ if (frame.sp < stack_page ||
+ frame.sp >= stack_page + THREAD_SIZE ||
+ unwind_frame(&frame))
return 0;
if (!in_sched_functions(frame.pc))
return frame.pc;
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index c484d5625ffb..6a8928bba03c 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -634,28 +634,27 @@ static int compat_gpr_get(struct task_struct *target,
for (i = 0; i < num_regs; ++i) {
unsigned int idx = start + i;
- void *reg;
+ compat_ulong_t reg;
switch (idx) {
case 15:
- reg = (void *)&task_pt_regs(target)->pc;
+ reg = task_pt_regs(target)->pc;
break;
case 16:
- reg = (void *)&task_pt_regs(target)->pstate;
+ reg = task_pt_regs(target)->pstate;
break;
case 17:
- reg = (void *)&task_pt_regs(target)->orig_x0;
+ reg = task_pt_regs(target)->orig_x0;
break;
default:
- reg = (void *)&task_pt_regs(target)->regs[idx];
+ reg = task_pt_regs(target)->regs[idx];
}
- ret = copy_to_user(ubuf, reg, sizeof(compat_ulong_t));
-
+ ret = copy_to_user(ubuf, &reg, sizeof(reg));
if (ret)
break;
- else
- ubuf += sizeof(compat_ulong_t);
+
+ ubuf += sizeof(reg);
}
return ret;
@@ -683,28 +682,28 @@ static int compat_gpr_set(struct task_struct *target,
for (i = 0; i < num_regs; ++i) {
unsigned int idx = start + i;
- void *reg;
+ compat_ulong_t reg;
+
+ ret = copy_from_user(&reg, ubuf, sizeof(reg));
+ if (ret)
+ return ret;
+
+ ubuf += sizeof(reg);
switch (idx) {
case 15:
- reg = (void *)&newregs.pc;
+ newregs.pc = reg;
break;
case 16:
- reg = (void *)&newregs.pstate;
+ newregs.pstate = reg;
break;
case 17:
- reg = (void *)&newregs.orig_x0;
+ newregs.orig_x0 = reg;
break;
default:
- reg = (void *)&newregs.regs[idx];
+ newregs.regs[idx] = reg;
}
- ret = copy_from_user(reg, ubuf, sizeof(compat_ulong_t));
-
- if (ret)
- goto out;
- else
- ubuf += sizeof(compat_ulong_t);
}
if (valid_user_regs(&newregs.user_regs))
@@ -712,7 +711,6 @@ static int compat_gpr_set(struct task_struct *target,
else
ret = -EINVAL;
-out:
return ret;
}
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index d7512213528a..f117b255bf8c 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -61,9 +61,19 @@
unsigned int processor_id;
EXPORT_SYMBOL(processor_id);
-unsigned int elf_hwcap __read_mostly;
+unsigned long elf_hwcap __read_mostly;
EXPORT_SYMBOL_GPL(elf_hwcap);
+#ifdef CONFIG_COMPAT
+#define COMPAT_ELF_HWCAP_DEFAULT \
+ (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
+ COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
+ COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
+ COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
+ COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV)
+unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
+#endif
+
static const char *cpu_name;
static const char *machine_name;
@@ -180,17 +190,17 @@ static void __init smp_build_mpidr_hash(void)
__flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
}
#endif
+bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
+{
+ return phys_id == cpu_logical_map(cpu);
+}
static void __init setup_processor(void)
{
struct cpu_info *cpu_info;
u64 reg_value;
+ u64 features, block;
- /*
- * locate processor in the list of supported processor
- * types. The linker builds this table for us from the
- * entries in arch/arm/mm/proc.S
- */
cpu_info = lookup_processor_type(read_cpuid_id());
if (!cpu_info) {
pr_info("CPU configuration botched (ID %08x), unable to continue.\n",
@@ -203,11 +213,13 @@ static void __init setup_processor(void)
pr_info("CPU: %s [%08x] revision %d\n",
cpu_name, read_cpuid_id(), read_cpuid_id() & 15);
- sprintf(init_utsname()->machine, "aarch64");
+ sprintf(init_utsname()->machine, ELF_PLATFORM);
elf_hwcap = 0;
/* Read the number of ASID bits */
- reg_value = read_cpuid(ID_AA64MMFR0_EL1) & 0xf0;
+ reg_value = read_cpuid(ID_AA64MMFR0_EL1);
+ features=reg_value;
+ reg_value=reg_value & 0xf0;
if (reg_value == 0x00)
max_asid_bits = 8;
else if (reg_value == 0x20)
@@ -215,6 +227,35 @@ static void __init setup_processor(void)
else
BUG_ON(1);
cpu_last_asid = 1 << max_asid_bits;
+ /*
+ * ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks.
+ * The blocks we test below represent incremental functionality
+ * for non-negative values. Negative values are reserved.
+ */
+ block = (features >> 4) & 0xf;
+ if (!(block & 0x8)) {
+ switch (block) {
+ default:
+ case 2:
+ elf_hwcap |= HWCAP_PMULL;
+ case 1:
+ elf_hwcap |= HWCAP_AES;
+ case 0:
+ break;
+ }
+ }
+
+ block = (features >> 8) & 0xf;
+ if (block && !(block & 0x8))
+ elf_hwcap |= HWCAP_SHA1;
+
+ block = (features >> 12) & 0xf;
+ if (block && !(block & 0x8))
+ elf_hwcap |= HWCAP_SHA2;
+
+ block = (features >> 16) & 0xf;
+ if (block && !(block & 0x8))
+ elf_hwcap |= HWCAP_CRC32;
}
static struct machine_desc * __init setup_machine_fdt(phys_addr_t dt_phys)
@@ -297,29 +338,6 @@ static struct machine_desc * __init setup_machine_fdt(phys_addr_t dt_phys)
return mdesc_best;
}
-void __init early_init_dt_add_memory_arch(u64 base, u64 size)
-{
- base &= PAGE_MASK;
- size &= PAGE_MASK;
- if (base + size < PHYS_OFFSET) {
- pr_warning("Ignoring memory block 0x%llx - 0x%llx\n",
- base, base + size);
- return;
- }
- if (base < PHYS_OFFSET) {
- pr_warning("Ignoring memory range 0x%llx - 0x%llx\n",
- base, PHYS_OFFSET);
- size -= PHYS_OFFSET - base;
- base = PHYS_OFFSET;
- }
- memblock_add(base, size);
-}
-
-void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
-{
- return __va(memblock_alloc(size, align));
-}
-
/*
* Limit the memory size that was specified via FDT.
*/
@@ -452,6 +470,12 @@ subsys_initcall(topology_init);
static const char *hwcap_str[] = {
"fp",
"asimd",
+ "evtstrm",
+ "aes",
+ "pmull",
+ "sha1",
+ "sha2",
+ "crc32",
NULL
};
@@ -479,9 +503,6 @@ static int c_show(struct seq_file *m, void *v)
#ifdef CONFIG_SMP
seq_printf(m, "processor\t: %d\n", i);
#endif
- seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
- loops_per_jiffy / (500000UL/HZ),
- loops_per_jiffy / (5000UL/HZ) % 100);
}
/* dump out the processor features */
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index e393174fe859..b3fc9f5ec6d3 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -100,34 +100,6 @@ struct compat_rt_sigframe {
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-/*
- * For ARM syscalls, the syscall number has to be loaded into r7.
- * We do not support an OABI userspace.
- */
-#define MOV_R7_NR_SIGRETURN (0xe3a07000 | __NR_compat_sigreturn)
-#define SVC_SYS_SIGRETURN (0xef000000 | __NR_compat_sigreturn)
-#define MOV_R7_NR_RT_SIGRETURN (0xe3a07000 | __NR_compat_rt_sigreturn)
-#define SVC_SYS_RT_SIGRETURN (0xef000000 | __NR_compat_rt_sigreturn)
-
-/*
- * For Thumb syscalls, we also pass the syscall number via r7. We therefore
- * need two 16-bit instructions.
- */
-#define SVC_THUMB_SIGRETURN (((0xdf00 | __NR_compat_sigreturn) << 16) | \
- 0x2700 | __NR_compat_sigreturn)
-#define SVC_THUMB_RT_SIGRETURN (((0xdf00 | __NR_compat_rt_sigreturn) << 16) | \
- 0x2700 | __NR_compat_rt_sigreturn)
-
-const compat_ulong_t aarch32_sigret_code[6] = {
- /*
- * AArch32 sigreturn code.
- * We don't construct an OABI SWI - instead we just set the imm24 field
- * to the EABI syscall number so that we create a sane disassembly.
- */
- MOV_R7_NR_SIGRETURN, SVC_SYS_SIGRETURN, SVC_THUMB_SIGRETURN,
- MOV_R7_NR_RT_SIGRETURN, SVC_SYS_RT_SIGRETURN, SVC_THUMB_RT_SIGRETURN,
-};
-
static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
{
compat_sigset_t cset;
@@ -150,7 +122,7 @@ static inline int get_sigset_t(sigset_t *set,
return 0;
}
-int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
+int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
{
int err;
@@ -474,12 +446,13 @@ static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka,
/* Check if the handler is written for ARM or Thumb */
thumb = handler & 1;
- if (thumb) {
+ if (thumb)
spsr |= COMPAT_PSR_T_BIT;
- spsr &= ~COMPAT_PSR_IT_MASK;
- } else {
+ else
spsr &= ~COMPAT_PSR_T_BIT;
- }
+
+ /* The IT state must be cleared for both ARM and Thumb-2 */
+ spsr &= ~COMPAT_PSR_IT_MASK;
if (ka->sa.sa_flags & SA_RESTORER) {
retcode = ptr_to_compat(ka->sa.sa_restorer);
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index df8b01933134..fed9d17a8866 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -62,6 +62,7 @@ enum ipi_msg_type {
IPI_CALL_FUNC_SINGLE,
IPI_CPU_STOP,
IPI_WAKEUP,
+ IPI_TIMER,
};
static DEFINE_RAW_SPINLOCK(boot_lock);
@@ -367,11 +368,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
void __init smp_cpus_done(unsigned int max_cpus)
{
- unsigned long bogosum = loops_per_jiffy * num_online_cpus();
-
- pr_info("SMP: Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
- num_online_cpus(), bogosum / (500000/HZ),
- (bogosum / (5000/HZ)) % 100);
+ pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
}
void __init smp_prepare_boot_cpu(void)
@@ -601,6 +598,7 @@ static const char *ipi_types[NR_IPI] = {
S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
S(IPI_CPU_STOP, "CPU stop interrupts"),
S(IPI_WAKEUP, "CPU wakeup interrupts"),
+ S(IPI_TIMER, "Timer broadcast interrupts"),
};
void show_ipi_list(struct seq_file *p, int prec)
@@ -610,7 +608,7 @@ void show_ipi_list(struct seq_file *p, int prec)
for (i = 0; i < NR_IPI; i++) {
seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i + IPI_RESCHEDULE,
prec >= 4 ? " " : "");
- for_each_present_cpu(cpu)
+ for_each_online_cpu(cpu)
seq_printf(p, "%10u ",
__get_irq_stat(cpu, ipi_irqs[i]));
seq_printf(p, " %s\n", ipi_types[i]);
@@ -688,6 +686,13 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
case IPI_WAKEUP:
break;
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+ case IPI_TIMER:
+ irq_enter();
+ tick_receive_broadcast();
+ irq_exit();
+ break;
+#endif
default:
pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
@@ -701,6 +706,13 @@ void smp_send_reschedule(int cpu)
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
}
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+void tick_broadcast(const struct cpumask *mask)
+{
+ smp_cross_call(mask, IPI_TIMER);
+}
+#endif
+
void smp_send_stop(void)
{
unsigned long timeout;
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index 7c35fa682f76..c0bc0977cb14 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -48,7 +48,16 @@ static int __init smp_spin_table_prepare_cpu(int cpu)
return -ENODEV;
release_addr = __va(cpu_release_addr[cpu]);
- release_addr[0] = (void *)__pa(secondary_holding_pen);
+
+ /*
+ * We write the release address as LE regardless of the native
+ * endianess of the kernel. Therefore, any boot-loaders that
+ * read this address need to convert this address to the
+ * boot-loader's endianess before jumping. This is mandated by
+ * the boot protocol.
+ */
+ release_addr[0] = (void *) cpu_to_le64(__pa(secondary_holding_pen));
+
__flush_dcache_area(release_addr, sizeof(release_addr[0]));
/*
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 048334bb2651..38f0558f0c0a 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -43,7 +43,7 @@ int unwind_frame(struct stackframe *frame)
low = frame->sp;
high = ALIGN(low, THREAD_SIZE);
- if (fp < low || fp > high || fp & 0xf)
+ if (fp < low || fp > high - 0x18 || fp & 0xf)
return -EINVAL;
frame->sp = fp + 0x10;
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index 491acebf64a6..7f1235099361 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -32,6 +32,22 @@ void __cpu_suspend_save(struct cpu_suspend_ctx *ptr, u64 *save_ptr)
__flush_dcache_area(save_ptr, sizeof(*save_ptr));
}
+/*
+ * This hook is provided so that cpu_suspend code can restore HW
+ * breakpoints as early as possible in the resume path, before reenabling
+ * debug exceptions. Code cannot be run from a CPU PM notifier since by the
+ * time the notifier runs debug exceptions might have been enabled already,
+ * with HW breakpoints registers content still in an unknown state.
+ */
+void (*hw_breakpoint_restore)(void *);
+void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
+{
+ /* Prevent multiple restore hook initializations */
+ if (WARN_ON(hw_breakpoint_restore))
+ return;
+ hw_breakpoint_restore = hw_bp_restore;
+}
+
/**
* cpu_suspend
*
@@ -53,6 +69,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
if (ret == 0) {
cpu_switch_mm(mm->pgd, mm);
flush_tlb_all();
+ /*
+ * Restore HW breakpoint registers to sane values
+ * before debug exceptions are possibly reenabled
+ * through local_dbg_restore.
+ */
+ if (hw_breakpoint_restore)
+ hw_breakpoint_restore(NULL);
}
return ret;
diff --git a/arch/arm64/kernel/sys32.S b/arch/arm64/kernel/sys32.S
index a1b19ed7467c..423a5b3fc2be 100644
--- a/arch/arm64/kernel/sys32.S
+++ b/arch/arm64/kernel/sys32.S
@@ -59,48 +59,48 @@ ENDPROC(compat_sys_fstatfs64_wrapper)
* extension.
*/
compat_sys_pread64_wrapper:
- orr x3, x4, x5, lsl #32
+ regs_to_64 x3, x4, x5
b sys_pread64
ENDPROC(compat_sys_pread64_wrapper)
compat_sys_pwrite64_wrapper:
- orr x3, x4, x5, lsl #32
+ regs_to_64 x3, x4, x5
b sys_pwrite64
ENDPROC(compat_sys_pwrite64_wrapper)
compat_sys_truncate64_wrapper:
- orr x1, x2, x3, lsl #32
+ regs_to_64 x1, x2, x3
b sys_truncate
ENDPROC(compat_sys_truncate64_wrapper)
compat_sys_ftruncate64_wrapper:
- orr x1, x2, x3, lsl #32
+ regs_to_64 x1, x2, x3
b sys_ftruncate
ENDPROC(compat_sys_ftruncate64_wrapper)
compat_sys_readahead_wrapper:
- orr x1, x2, x3, lsl #32
+ regs_to_64 x1, x2, x3
mov w2, w4
b sys_readahead
ENDPROC(compat_sys_readahead_wrapper)
compat_sys_fadvise64_64_wrapper:
mov w6, w1
- orr x1, x2, x3, lsl #32
- orr x2, x4, x5, lsl #32
+ regs_to_64 x1, x2, x3
+ regs_to_64 x2, x4, x5
mov w3, w6
b sys_fadvise64_64
ENDPROC(compat_sys_fadvise64_64_wrapper)
compat_sys_sync_file_range2_wrapper:
- orr x2, x2, x3, lsl #32
- orr x3, x4, x5, lsl #32
+ regs_to_64 x2, x2, x3
+ regs_to_64 x3, x4, x5
b sys_sync_file_range2
ENDPROC(compat_sys_sync_file_range2_wrapper)
compat_sys_fallocate_wrapper:
- orr x2, x2, x3, lsl #32
- orr x3, x4, x5, lsl #32
+ regs_to_64 x2, x2, x3
+ regs_to_64 x3, x4, x5
b sys_fallocate
ENDPROC(compat_sys_fallocate_wrapper)
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 0ea7a22bcdf2..a7149cae1615 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -58,7 +58,10 @@ static struct page *vectors_page[1];
static int alloc_vectors_page(void)
{
extern char __kuser_helper_start[], __kuser_helper_end[];
+ extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
+
int kuser_sz = __kuser_helper_end - __kuser_helper_start;
+ int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
unsigned long vpage;
vpage = get_zeroed_page(GFP_ATOMIC);
@@ -72,7 +75,7 @@ static int alloc_vectors_page(void)
/* sigreturn code */
memcpy((void *)vpage + AARCH32_KERN_SIGRET_CODE_OFFSET,
- aarch32_sigret_code, sizeof(aarch32_sigret_code));
+ __aarch32_sigret_code_start, sigret_sz);
flush_icache_range(vpage, vpage + PAGE_SIZE);
vectors_page[0] = virt_to_page(vpage);
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 9951823df544..f56e7f5e96f9 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -57,6 +57,7 @@ SECTIONS
RO_DATA(PAGE_SIZE)
+ NOTES
_etext = .; /* End of text and rodata section */
. = ALIGN(PAGE_SIZE);
@@ -87,45 +88,15 @@ SECTIONS
PERCPU_SECTION(64)
__init_end = .;
- . = ALIGN(THREAD_SIZE);
- __data_loc = .;
-
- .data : AT(__data_loc) {
- _data = .; /* address in memory */
- _sdata = .;
-
- /*
- * first, the init task union, aligned
- * to an 8192 byte boundary.
- */
- INIT_TASK_DATA(THREAD_SIZE)
- NOSAVE_DATA
- CACHELINE_ALIGNED_DATA(64)
- READ_MOSTLY_DATA(64)
-
- /*
- * The exception fixup table (might need resorting at runtime)
- */
- . = ALIGN(32);
- __start___ex_table = .;
- *(__ex_table)
- __stop___ex_table = .;
-
- /*
- * and the usual data section
- */
- DATA_DATA
- CONSTRUCTORS
-
- _edata = .;
- }
- _edata_loc = __data_loc + SIZEOF(.data);
- NOTES
+ . = ALIGN(PAGE_SIZE);
+ _data = .;
+ _sdata = .;
+ RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE)
+ _edata = .;
BSS_SECTION(0, 0, 0)
_end = .;
STABS_DEBUG
- .comment 0 : { *(.comment) }
}
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
index 8032230f6b5a..fb224a837127 100644
--- a/arch/arm64/lib/Makefile
+++ b/arch/arm64/lib/Makefile
@@ -1,5 +1,4 @@
-lib-y := bitops.o delay.o \
- strncpy_from_user.o strnlen_user.o clear_user.o \
+lib-y := bitops.o delay.o clear_user.o \
copy_from_user.o copy_to_user.o copy_in_user.o \
copy_page.o clear_page.o \
memchr.o memmove.o memset.o \
diff --git a/arch/arm64/lib/bitops.S b/arch/arm64/lib/bitops.S
index e5db797790d3..7dac371cc9a2 100644
--- a/arch/arm64/lib/bitops.S
+++ b/arch/arm64/lib/bitops.S
@@ -46,11 +46,12 @@ ENTRY( \name )
mov x2, #1
add x1, x1, x0, lsr #3 // Get word offset
lsl x4, x2, x3 // Create mask
-1: ldaxr x2, [x1]
+1: ldxr x2, [x1]
lsr x0, x2, x3 // Save old value of bit
\instr x2, x2, x4 // toggle bit
stlxr w5, x2, [x1]
cbnz w5, 1b
+ dmb ish
and x0, x0, #1
3: ret
ENDPROC(\name )
diff --git a/arch/arm64/lib/strncpy_from_user.S b/arch/arm64/lib/strncpy_from_user.S
deleted file mode 100644
index 56e448a831a0..000000000000
--- a/arch/arm64/lib/strncpy_from_user.S
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Based on arch/arm/lib/strncpy_from_user.S
- *
- * Copyright (C) 1995-2000 Russell King
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-#include <asm/errno.h>
-
- .text
- .align 5
-
-/*
- * Copy a string from user space to kernel space.
- * x0 = dst, x1 = src, x2 = byte length
- * returns the number of characters copied (strlen of copied string),
- * -EFAULT on exception, or "len" if we fill the whole buffer
- */
-ENTRY(__strncpy_from_user)
- mov x4, x1
-1: subs x2, x2, #1
- bmi 2f
-USER(9f, ldrb w3, [x1], #1 )
- strb w3, [x0], #1
- cbnz w3, 1b
- sub x1, x1, #1 // take NUL character out of count
-2: sub x0, x1, x4
- ret
-ENDPROC(__strncpy_from_user)
-
- .section .fixup,"ax"
- .align 0
-9: strb wzr, [x0] // null terminate
- mov x0, #-EFAULT
- ret
- .previous
diff --git a/arch/arm64/lib/strnlen_user.S b/arch/arm64/lib/strnlen_user.S
deleted file mode 100644
index 7f7b176a5646..000000000000
--- a/arch/arm64/lib/strnlen_user.S
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Based on arch/arm/lib/strnlen_user.S
- *
- * Copyright (C) 1995-2000 Russell King
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-#include <asm/errno.h>
-
- .text
- .align 5
-
-/* Prototype: unsigned long __strnlen_user(const char *str, long n)
- * Purpose : get length of a string in user memory
- * Params : str - address of string in user memory
- * Returns : length of string *including terminator*
- * or zero on exception, or n if too long
- */
-ENTRY(__strnlen_user)
- mov x2, x0
-1: subs x1, x1, #1
- b.mi 2f
-USER(9f, ldrb w3, [x0], #1 )
- cbnz w3, 1b
-2: sub x0, x0, x2
- ret
-ENDPROC(__strnlen_user)
-
- .section .fixup,"ax"
- .align 0
-9: mov x0, #0
- ret
- .previous
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index e729eab9e795..ba86c3f945ad 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -169,7 +169,7 @@ ENDPROC(flush_icache_range)
ENDPROC(__flush_cache_user_range)
/*
- * __flush_kern_dcache_page(kaddr)
+ * __flush_dcache_area(kaddr, size)
*
* Ensure that the data held in the page kaddr is written back to the
* page in question.
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 868b12103fdd..6025736ead94 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -60,16 +60,39 @@ static void *arm64_swiotlb_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
struct dma_attrs *attrs)
{
+ if (dev == NULL) {
+ WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
+ return NULL;
+ }
+
if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
dev->coherent_dma_mask <= DMA_BIT_MASK(32))
flags |= GFP_DMA32;
- return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
+ if (IS_ENABLED(CONFIG_DMA_CMA)) {
+ struct page *page;
+
+ size = PAGE_ALIGN(size);
+ page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
+ get_order(size));
+ if (!page)
+ return NULL;
+
+ *dma_handle = phys_to_dma(dev, page_to_phys(page));
+ return page_address(page);
+ } else {
+ return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
+ }
}
static void arm64_swiotlb_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
struct dma_attrs *attrs)
{
+ if (dev == NULL) {
+ WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
+ return;
+ }
+
swiotlb_free_coherent(dev, size, vaddr, dma_handle);
}
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 8083e637781e..abee8357eb46 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -156,7 +156,7 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
force_sig_info(sig, &si, tsk);
}
-void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
+static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
{
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->active_mm;
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 5ce02daaab39..7a49658ba918 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -32,7 +32,6 @@
#include <linux/of_fdt.h>
#include <linux/dma-contiguous.h>
-#include <asm/prom.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/sizes.h>
@@ -52,13 +51,7 @@ phys_addr_t memstart_addr __read_mostly = 0;
phys_addr_t memstart_addr __read_mostly = 0x80000000;
#endif
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end)
-{
- phys_initrd_start = start;
- phys_initrd_size = end - start;
-}
-
+#ifdef CONFIG_BLK_DEV_INITRD
static int __init early_initrd(char *p)
{
unsigned long start, size;
@@ -68,12 +61,13 @@ static int __init early_initrd(char *p)
if (*endp == ',') {
size = memparse(endp + 1, NULL);
- phys_initrd_start = start;
- phys_initrd_size = size;
+ initrd_start = (unsigned long)__va(start);
+ initrd_end = (unsigned long)__va(start + size);
}
return 0;
}
early_param("initrd", early_initrd);
+#endif
#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT)
@@ -146,13 +140,8 @@ void __init arm64_memblock_init(void)
/* Register the kernel text, kernel data and initrd with memblock */
memblock_reserve(__pa(_text), _end - _text);
#ifdef CONFIG_BLK_DEV_INITRD
- if (phys_initrd_size) {
- memblock_reserve(phys_initrd_start, phys_initrd_size);
-
- /* Now convert initrd to virtual addresses */
- initrd_start = __phys_to_virt(phys_initrd_start);
- initrd_end = initrd_start + phys_initrd_size;
- }
+ if (initrd_start)
+ memblock_reserve(__virt_to_phys(initrd_start), initrd_end - initrd_start);
#endif
/*
diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
index df749c958aa1..f777f55fe267 100644
--- a/arch/arm64/mm/ioremap.c
+++ b/arch/arm64/mm/ioremap.c
@@ -77,9 +77,14 @@ EXPORT_SYMBOL(__ioremap);
void __iounmap(volatile void __iomem *io_addr)
{
- void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
+ unsigned long addr = (unsigned long)io_addr & PAGE_MASK;
- vunmap(addr);
+ /*
+ * We could get an address outside vmalloc range in case
+ * of ioremap_cache() reusing a RAM mapping.
+ */
+ if (VMALLOC_START <= addr && addr < VMALLOC_END)
+ vunmap((void *)addr);
}
EXPORT_SYMBOL(__iounmap);
@@ -95,3 +100,13 @@ int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
}
EXPORT_SYMBOL_GPL(pci_ioremap_io);
#endif
+void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
+{
+ /* For normal memory we already have a cacheable mapping. */
+ if (pfn_valid(__phys_to_pfn(phys_addr)))
+ return (void __iomem *)__phys_to_virt(phys_addr);
+
+ return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL),
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap_cache);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index b5dc5f9c77f6..7f04dc4f0d8d 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -349,6 +349,19 @@ static void __init map_mem(void)
{
struct memblock_region *reg;
struct map_desc md;
+ phys_addr_t limit;
+
+ /*
+ * Temporarily limit the memblock range. We need to do this as
+ * create_mapping requires puds, pmds and ptes to be allocated from
+ * memory addressable from the initial direct kernel mapping.
+ *
+ * The initial direct kernel mapping, located at swapper_pg_dir,
+ * gives us PGDIR_SIZE memory starting from PHYS_OFFSET (which must be
+ * aligned to 2MB as per Documentation/arm64/booting.txt).
+ */
+ limit = PHYS_OFFSET + PGDIR_SIZE;
+ memblock_set_current_limit(limit);
/* map all the memory banks */
for_each_memblock(memory, reg) {
@@ -363,6 +376,22 @@ static void __init map_mem(void)
if (start >= end)
break;
+#ifndef CONFIG_ARM64_64K_PAGES
+ /*
+ * For the first memory bank align the start address and
+ * current memblock limit to prevent create_mapping() from
+ * allocating pte page tables from unmapped memory.
+ * When 64K pages are enabled, the pte page table for the
+ * first PGDIR_SIZE is already present in swapper_pg_dir.
+ */
+ if (start < limit)
+ start = ALIGN(start, PMD_SIZE);
+ if (end < limit) {
+ limit = end & PMD_MASK;
+ memblock_set_current_limit(limit);
+ }
+#endif
+
create_mapping(&md);
}
}
diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c
index 7083cdada657..62c6101df260 100644
--- a/arch/arm64/mm/pgd.c
+++ b/arch/arm64/mm/pgd.c
@@ -32,17 +32,10 @@
pgd_t *pgd_alloc(struct mm_struct *mm)
{
- pgd_t *new_pgd;
-
if (PGD_SIZE == PAGE_SIZE)
- new_pgd = (pgd_t *)get_zeroed_page(GFP_KERNEL);
+ return (pgd_t *)get_zeroed_page(GFP_KERNEL);
else
- new_pgd = kzalloc(PGD_SIZE, GFP_KERNEL);
-
- if (!new_pgd)
- return NULL;
-
- return new_pgd;
+ return kzalloc(PGD_SIZE, GFP_KERNEL);
}
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
diff --git a/arch/arm64/mm/proc-macros.S b/arch/arm64/mm/proc-macros.S
index 8957b822010b..005d29e2977d 100644
--- a/arch/arm64/mm/proc-macros.S
+++ b/arch/arm64/mm/proc-macros.S
@@ -38,8 +38,7 @@
*/
.macro dcache_line_size, reg, tmp
mrs \tmp, ctr_el0 // read CTR
- lsr \tmp, \tmp, #16
- and \tmp, \tmp, #0xf // cache line size encoding
+ ubfm \tmp, \tmp, #16, #19 // cache line size encoding
mov \reg, #4 // bytes per word
lsl \reg, \reg, \tmp // actual cache line size
.endm
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 8d1d562141e2..0214dc743cb1 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -175,12 +175,12 @@ ENTRY(__cpu_setup)
bl __flush_dcache_all
mov lr, x28
ic iallu // I+BTB cache invalidate
+ tlbi vmalle1is // invalidate I + D TLBs
dsb sy
mov x0, #3 << 20
msr cpacr_el1, x0 // Enable FP/ASIMD
msr mdscr_el1, xzr // Reset mdscr_el1
- tlbi vmalle1is // invalidate I + D TLBs
/*
* Memory region attributes for LPAE:
*
@@ -240,10 +240,10 @@ crval:
* CE0 XWHW CZ ME TEEA S
* .... .IEE .... NEAI TE.I ..AD DEN0 ACAM
* 0011 0... 1101 ..0. ..0. 10.. .... .... < hardware reserved
- * .... .100 .... 01.1 11.1 ..01 0001 1101 < software settings
+ * .... .1.. .... 01.1 11.1 ..01 0001 1101 < software settings
*/
.type crval, #object
crval:
- .word 0x030802e2 // clear
+ .word 0x000802e2 // clear
.word 0x0405d11d // set
#endif
diff --git a/arch/arm64/mm/tlb.S b/arch/arm64/mm/tlb.S
index 8ae80a18e8ec..19da91e0cd27 100644
--- a/arch/arm64/mm/tlb.S
+++ b/arch/arm64/mm/tlb.S
@@ -35,7 +35,7 @@
*/
ENTRY(__cpu_flush_user_tlb_range)
vma_vm_mm x3, x2 // get vma->vm_mm
- mmid x3, x3 // get vm_mm->context.id
+ mmid w3, x3 // get vm_mm->context.id
dsb sy
lsr x0, x0, #12 // align address
lsr x1, x1, #12
diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild
index d22af851f3f6..17eb8c33c687 100644
--- a/arch/avr32/include/asm/Kbuild
+++ b/arch/avr32/include/asm/Kbuild
@@ -3,3 +3,6 @@ generic-y += clkdev.h
generic-y += exec.h
generic-y += trace_clock.h
generic-y += param.h
+generic-y += preempt.h
+generic-y += xor.h
+generic-y += hash.h
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index 1bc285123d42..36a0255bf858 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -35,7 +35,6 @@ config BLACKFIN
select HAVE_GENERIC_HARDIRQS
select GENERIC_ATOMIC64
select GENERIC_IRQ_PROBE
- select USE_GENERIC_SMP_HELPERS if SMP
select HAVE_NMI_WATCHDOG if NMI_WATCHDOG
select GENERIC_SMP_IDLE_THREAD
select ARCH_USES_GETTIMEOFFSET if !GENERIC_CLOCKEVENTS
diff --git a/arch/blackfin/include/asm/Kbuild b/arch/blackfin/include/asm/Kbuild
index 127826f8a375..359d36fdc247 100644
--- a/arch/blackfin/include/asm/Kbuild
+++ b/arch/blackfin/include/asm/Kbuild
@@ -44,3 +44,5 @@ generic-y += ucontext.h
generic-y += unaligned.h
generic-y += user.h
generic-y += xor.h
+generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild
index 4258b088aa93..d73bb85ccdd3 100644
--- a/arch/c6x/include/asm/Kbuild
+++ b/arch/c6x/include/asm/Kbuild
@@ -55,3 +55,6 @@ generic-y += types.h
generic-y += ucontext.h
generic-y += user.h
generic-y += vga.h
+generic-y += xor.h
+generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/c6x/kernel/devicetree.c b/arch/c6x/kernel/devicetree.c
index bdb56f09d0ac..cc89158a0892 100644
--- a/arch/c6x/kernel/devicetree.c
+++ b/arch/c6x/kernel/devicetree.c
@@ -10,44 +10,7 @@
*
*/
#include <linux/init.h>
-#include <linux/of.h>
-#include <linux/of_fdt.h>
-#include <linux/initrd.h>
-#include <linux/memblock.h>
-
-void __init early_init_devtree(void *params)
-{
- /* Setup flat device-tree pointer */
- initial_boot_params = params;
-
- /* Retrieve various informations from the /chosen node of the
- * device-tree, including the platform type, initrd location and
- * size and more ...
- */
- of_scan_flat_dt(early_init_dt_scan_chosen, c6x_command_line);
-
- /* Scan memory nodes and rebuild MEMBLOCKs */
- of_scan_flat_dt(early_init_dt_scan_root, NULL);
- of_scan_flat_dt(early_init_dt_scan_memory, NULL);
-}
-
-
-#ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end)
-{
- initrd_start = (unsigned long)__va(start);
- initrd_end = (unsigned long)__va(end);
- initrd_below_start_ok = 1;
-}
-#endif
-
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
{
c6x_add_memory(base, size);
}
-
-void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
-{
- return __va(memblock_alloc(size, align));
-}
diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild
index f1e79edc9dd2..70d23257546f 100644
--- a/arch/cris/include/asm/Kbuild
+++ b/arch/cris/include/asm/Kbuild
@@ -7,3 +7,7 @@ generic-y += clkdev.h
generic-y += exec.h
generic-y += module.h
generic-y += trace_clock.h
+generic-y += vga.h
+generic-y += xor.h
+generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/frv/include/asm/Kbuild b/arch/frv/include/asm/Kbuild
index c5d767028306..bc42f14c9c2e 100644
--- a/arch/frv/include/asm/Kbuild
+++ b/arch/frv/include/asm/Kbuild
@@ -2,3 +2,5 @@
generic-y += clkdev.h
generic-y += exec.h
generic-y += trace_clock.h
+generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild
index 995eb47e01bb..1eaa0aaa707e 100644
--- a/arch/h8300/include/asm/Kbuild
+++ b/arch/h8300/include/asm/Kbuild
@@ -4,3 +4,8 @@ generic-y += exec.h
generic-y += mmu.h
generic-y += module.h
generic-y += trace_clock.h
+<<<<<<< HEAD
+=======
+generic-y += xor.h
+generic-y += preempt.h
+>>>>>>> a787870... sched, arch: Create asm/preempt.h
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
index 33a97929d055..852b48742e8c 100644
--- a/arch/hexagon/Kconfig
+++ b/arch/hexagon/Kconfig
@@ -4,7 +4,6 @@ comment "Linux Kernel Configuration for Hexagon"
config HEXAGON
def_bool y
select HAVE_OPROFILE
- select USE_GENERIC_SMP_HELPERS if SMP
# Other pending projects/to-do items.
# select HAVE_REGS_AND_STACK_ACCESS_API
# select HAVE_HW_BREAKPOINT if PERF_EVENTS
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild
index 1da17caac23c..469d223950ff 100644
--- a/arch/hexagon/include/asm/Kbuild
+++ b/arch/hexagon/include/asm/Kbuild
@@ -53,3 +53,5 @@ generic-y += types.h
generic-y += ucontext.h
generic-y += unaligned.h
generic-y += xor.h
+generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 1a2b7749b047..26ac47433555 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -344,7 +344,6 @@ config FORCE_MAX_ZONEORDER
config SMP
bool "Symmetric multi-processing support"
- select USE_GENERIC_SMP_HELPERS
help
This enables support for systems with more than one CPU. If you have
a system with only one CPU, say N. If you have a system with more
diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild
index 05b03ecd7933..283a83154b5e 100644
--- a/arch/ia64/include/asm/Kbuild
+++ b/arch/ia64/include/asm/Kbuild
@@ -3,3 +3,6 @@ generic-y += clkdev.h
generic-y += exec.h
generic-y += kvm_para.h
generic-y += trace_clock.h
+generic-y += preempt.h
+generic-y += vtime.h
+generic-y += hash.h
diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
index 60576e06b6fb..d0a69aa35e27 100644
--- a/arch/ia64/include/asm/barrier.h
+++ b/arch/ia64/include/asm/barrier.h
@@ -45,14 +45,37 @@
# define smp_rmb() rmb()
# define smp_wmb() wmb()
# define smp_read_barrier_depends() read_barrier_depends()
+
#else
+
# define smp_mb() barrier()
# define smp_rmb() barrier()
# define smp_wmb() barrier()
# define smp_read_barrier_depends() do { } while(0)
+
#endif
/*
+ * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no
+ * need for asm trickery!
+ */
+
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ barrier(); \
+ ACCESS_ONCE(*p) = (v); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ barrier(); \
+ ___p1; \
+})
+
+/*
* XXX check on this ---I suspect what Linus really wants here is
* acquire vs release semantics but we can't discuss this stuff with
* Linus just yet. Grrr...
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index 3637e03d2282..33cab9a8adff 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -105,7 +105,7 @@ restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr)
}
int
-copy_siginfo_to_user (siginfo_t __user *to, siginfo_t *from)
+copy_siginfo_to_user (siginfo_t __user *to, const siginfo_t *from)
{
if (!access_ok(VERIFY_WRITE, to, sizeof(siginfo_t)))
return -EFAULT;
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index bcd17b206571..ff67f9a1ac82 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -275,7 +275,6 @@ source "kernel/Kconfig.preempt"
config SMP
bool "Symmetric multi-processing support"
- select USE_GENERIC_SMP_HELPERS
---help---
This enables support for systems with more than one CPU. If you have
a system with only one CPU, like most personal computers, say N. If
diff --git a/arch/m32r/include/asm/Kbuild b/arch/m32r/include/asm/Kbuild
index bebdc36ebb0a..932435ac4e5c 100644
--- a/arch/m32r/include/asm/Kbuild
+++ b/arch/m32r/include/asm/Kbuild
@@ -3,3 +3,5 @@ generic-y += clkdev.h
generic-y += exec.h
generic-y += module.h
generic-y += trace_clock.h
+generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index 09d77a862da3..7cc8c364924d 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -31,3 +31,5 @@ generic-y += trace_clock.h
generic-y += types.h
generic-y += word-at-a-time.h
generic-y += xor.h
+generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/metag/Kconfig b/arch/metag/Kconfig
index dcd94406030e..1647703a7bad 100644
--- a/arch/metag/Kconfig
+++ b/arch/metag/Kconfig
@@ -111,7 +111,6 @@ config METAG_META21
config SMP
bool "Symmetric multi-processing support"
depends on METAG_META21 && METAG_META21_MMU
- select USE_GENERIC_SMP_HELPERS
help
This enables support for systems with more than one thread running
Linux. If you have a system with only one thread running Linux,
diff --git a/arch/metag/include/asm/Kbuild b/arch/metag/include/asm/Kbuild
index 6ae0ccb632cb..b716d807c2ec 100644
--- a/arch/metag/include/asm/Kbuild
+++ b/arch/metag/include/asm/Kbuild
@@ -52,3 +52,5 @@ generic-y += unaligned.h
generic-y += user.h
generic-y += vga.h
generic-y += xor.h
+generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
index c90bfc6bf648..5d6b4b407dda 100644
--- a/arch/metag/include/asm/barrier.h
+++ b/arch/metag/include/asm/barrier.h
@@ -82,4 +82,19 @@ static inline void fence(void)
#define smp_read_barrier_depends() do { } while (0)
#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+ ACCESS_ONCE(*p) = (v); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+ ___p1; \
+})
+
#endif /* _ASM_METAG_BARRIER_H */
diff --git a/arch/metag/mm/init.c b/arch/metag/mm/init.c
index d05b8455c44c..7356c74ab96a 100644
--- a/arch/metag/mm/init.c
+++ b/arch/metag/mm/init.c
@@ -12,7 +12,6 @@
#include <linux/percpu.h>
#include <linux/memblock.h>
#include <linux/initrd.h>
-#include <linux/of_fdt.h>
#include <asm/setup.h>
#include <asm/page.h>
@@ -417,12 +416,3 @@ void free_initrd_mem(unsigned long start, unsigned long end)
free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd");
}
#endif
-
-#ifdef CONFIG_OF_FLATTREE
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end)
-{
- pr_err("%s(%lx, %lx)\n",
- __func__, start, end);
-}
-#endif /* CONFIG_OF_FLATTREE */
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index d3c51a6a601d..43eec338ff50 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -3,3 +3,5 @@ generic-y += clkdev.h
generic-y += exec.h
generic-y += trace_clock.h
generic-y += syscalls.h
+generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c
index 0a2c68f9f9b0..e20eb41f983a 100644
--- a/arch/microblaze/kernel/prom.c
+++ b/arch/microblaze/kernel/prom.c
@@ -41,16 +41,6 @@
#include <asm/sections.h>
#include <asm/pci-bridge.h>
-void __init early_init_dt_add_memory_arch(u64 base, u64 size)
-{
- memblock_add(base, size);
-}
-
-void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
-{
- return __va(memblock_alloc(size, align));
-}
-
#ifdef CONFIG_EARLY_PRINTK
static char *stdout;
@@ -135,16 +125,6 @@ void __init early_init_devtree(void *params)
pr_debug(" <- early_init_devtree()\n");
}
-#ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end)
-{
- initrd_start = (unsigned long)__va(start);
- initrd_end = (unsigned long)__va(end);
- initrd_below_start_ok = 1;
-}
-#endif
-
/*******
*
* New implementation of the OF "find" APIs, return a refcounted
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index e53e2b40d695..f03f340c98ae 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -2177,7 +2177,6 @@ source "mm/Kconfig"
config SMP
bool "Multi-Processing support"
depends on SYS_SUPPORTS_SMP
- select USE_GENERIC_SMP_HELPERS
help
This enables support for systems with more than one CPU. If you have
a system with only one CPU, like most personal computers, say N. If
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index 9b54b7a403d4..6930574a2a7d 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -1,2 +1,6 @@
# MIPS headers
generic-y += trace_clock.h
+generic-y += preempt.h
+generic-y += ucontext.h
+generic-y += xor.h
+generic-y += hash.h
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
index 314ab5532019..52c5b61d7aba 100644
--- a/arch/mips/include/asm/barrier.h
+++ b/arch/mips/include/asm/barrier.h
@@ -180,4 +180,19 @@
#define nudge_writes() mb()
#endif
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+ ACCESS_ONCE(*p) = (v); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+ ___p1; \
+})
+
#endif /* __ASM_BARRIER_H */
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
index 5712bb532245..233bc2ea6002 100644
--- a/arch/mips/kernel/prom.c
+++ b/arch/mips/kernel/prom.c
@@ -13,7 +13,6 @@
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/bootmem.h>
-#include <linux/initrd.h>
#include <linux/debugfs.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
@@ -57,16 +56,6 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS));
}
-#ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end)
-{
- initrd_start = (unsigned long)__va(start);
- initrd_end = (unsigned long)__va(end);
- initrd_below_start_ok = 1;
-}
-#endif
-
int __init early_init_dt_scan_model(unsigned long node, const char *uname,
int depth, void *data)
{
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 57de8b751627..1905a419aa46 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -314,7 +314,7 @@ SYSCALL_DEFINE3(32_sigaction, long, sig, const struct compat_sigaction __user *,
return ret;
}
-int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
+int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
{
int err;
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index 428da175d073..75d5b7781fb4 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -181,7 +181,6 @@ endmenu
config SMP
bool "Symmetric multi-processing support"
default y
- select USE_GENERIC_SMP_HELPERS
depends on MN10300_PROC_MN2WS0038 || MN10300_PROC_MN2WS0050
---help---
This enables support for systems with more than one CPU. If you have
diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild
index c5d767028306..bc42f14c9c2e 100644
--- a/arch/mn10300/include/asm/Kbuild
+++ b/arch/mn10300/include/asm/Kbuild
@@ -2,3 +2,5 @@
generic-y += clkdev.h
generic-y += exec.h
generic-y += trace_clock.h
+generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
index f20d01d9aaf9..4507a51c35ed 100644
--- a/arch/openrisc/include/asm/Kbuild
+++ b/arch/openrisc/include/asm/Kbuild
@@ -66,3 +66,6 @@ generic-y += types.h
generic-y += ucontext.h
generic-y += user.h
generic-y += word-at-a-time.h
+generic-y += xor.h
+generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/openrisc/kernel/prom.c b/arch/openrisc/kernel/prom.c
index 5869e3fa5dd3..1ecc7a90a10f 100644
--- a/arch/openrisc/kernel/prom.c
+++ b/arch/openrisc/kernel/prom.c
@@ -55,11 +55,6 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
memblock_add(base, size);
}
-void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
-{
- return __va(memblock_alloc(size, align));
-}
-
void __init early_init_devtree(void *params)
{
void *alloc;
@@ -94,13 +89,3 @@ void __init early_init_devtree(void *params)
initial_boot_params = alloc;
}
-
-#ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end)
-{
- initrd_start = (unsigned long)__va(start);
- initrd_end = (unsigned long)__va(end);
- initrd_below_start_ok = 1;
-}
-#endif
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 6507dabdd5dd..e1773e5d9964 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -226,7 +226,6 @@ endchoice
config SMP
bool "Symmetric multi-processing support"
- select USE_GENERIC_SMP_HELPERS
---help---
This enables support for systems with more than one CPU. If you have
a system with only one CPU, like most personal computers, say N. If
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild
index ff4c9faed546..75edd5fcc6ff 100644
--- a/arch/parisc/include/asm/Kbuild
+++ b/arch/parisc/include/asm/Kbuild
@@ -4,3 +4,5 @@ generic-y += word-at-a-time.h auxvec.h user.h cputime.h emergency-restart.h \
div64.h irq_regs.h kdebug.h kvm_para.h local64.h local.h param.h \
poll.h xor.h clkdev.h exec.h
generic-y += trace_clock.h
+generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index 2a625fb063e1..50dfafc3f2c1 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -219,7 +219,7 @@ void *module_alloc(unsigned long size)
* init_data correctly */
return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
GFP_KERNEL | __GFP_HIGHMEM,
- PAGE_KERNEL_RWX, -1,
+ PAGE_KERNEL_RWX, NUMA_NO_NODE,
__builtin_return_address(0));
}
diff --git a/arch/parisc/kernel/signal32.c b/arch/parisc/kernel/signal32.c
index 33eca1b04926..bd2d467655a8 100644
--- a/arch/parisc/kernel/signal32.c
+++ b/arch/parisc/kernel/signal32.c
@@ -320,7 +320,7 @@ copy_siginfo_from_user32 (siginfo_t *to, compat_siginfo_t __user *from)
}
int
-copy_siginfo_to_user32 (compat_siginfo_t __user *to, siginfo_t *from)
+copy_siginfo_to_user32 (compat_siginfo_t __user *to, const siginfo_t *from)
{
compat_uptr_t addr;
compat_int_t val;
diff --git a/arch/parisc/kernel/signal32.h b/arch/parisc/kernel/signal32.h
index 72ab41a51f32..af51d4ccee42 100644
--- a/arch/parisc/kernel/signal32.h
+++ b/arch/parisc/kernel/signal32.h
@@ -34,7 +34,7 @@ struct compat_ucontext {
/* ELF32 signal handling */
-int copy_siginfo_to_user32 (compat_siginfo_t __user *to, siginfo_t *from);
+int copy_siginfo_to_user32 (compat_siginfo_t __user *to, const siginfo_t *from);
int copy_siginfo_from_user32 (siginfo_t *to, compat_siginfo_t __user *from);
/* In a deft move of uber-hackery, we decide to carry the top half of all
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index fe404e77246e..22d9ea0641b9 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -106,7 +106,6 @@ config PPC
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_DMA_ATTRS
select HAVE_DMA_API_DEBUG
- select USE_GENERIC_SMP_HELPERS if SMP
select HAVE_OPROFILE
select HAVE_DEBUG_KMEMLEAK
select GENERIC_ATOMIC64 if PPC32
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index 650757c300db..6c0a955a1b06 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -2,3 +2,6 @@
generic-y += clkdev.h
generic-y += rwsem.h
generic-y += trace_clock.h
+generic-y += preempt.h
+generic-y += vtime.h
+generic-y += hash.h
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
index ae782254e731..f89da808ce31 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -45,11 +45,15 @@
# define SMPWMB eieio
#endif
+#define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
+
#define smp_mb() mb()
-#define smp_rmb() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
+#define smp_rmb() __lwsync()
#define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
#define smp_read_barrier_depends() read_barrier_depends()
#else
+#define __lwsync() barrier()
+
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
@@ -65,4 +69,19 @@
#define data_barrier(x) \
asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory");
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ __lwsync(); \
+ ACCESS_ONCE(*p) = (v); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ __lwsync(); \
+ ___p1; \
+})
+
#endif /* _ASM_POWERPC_BARRIER_H */
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 8b6f7a99cce2..11ca23b67795 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -544,20 +544,34 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
memblock_add(base, size);
}
-void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
+static void __init early_reserve_mem_dt(void)
{
- return __va(memblock_alloc(size, align));
-}
+ unsigned long i, len, dt_root;
+ const __be32 *prop;
-#ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end)
-{
- initrd_start = (unsigned long)__va(start);
- initrd_end = (unsigned long)__va(end);
- initrd_below_start_ok = 1;
+ dt_root = of_get_flat_dt_root();
+
+ prop = of_get_flat_dt_prop(dt_root, "reserved-ranges", &len);
+
+ if (!prop)
+ return;
+
+ DBG("Found new-style reserved-ranges\n");
+
+ /* Each reserved range is an (address,size) pair, 2 cells each,
+ * totalling 4 cells per range. */
+ for (i = 0; i < len / (sizeof(*prop) * 4); i++) {
+ u64 base, size;
+
+ base = of_read_number(prop + (i * 4) + 0, 2);
+ size = of_read_number(prop + (i * 4) + 2, 2);
+
+ if (size) {
+ DBG("reserving: %llx -> %llx\n", base, size);
+ memblock_reserve(base, size);
+ }
+ }
}
-#endif
static void __init early_reserve_mem(void)
{
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 7e9dff80e1dc..2ba0d133bca7 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -888,7 +888,7 @@ static long restore_tm_user_regs(struct pt_regs *regs,
#endif
#ifdef CONFIG_PPC64
-int copy_siginfo_to_user32(struct compat_siginfo __user *d, siginfo_t *s)
+int copy_siginfo_to_user32(struct compat_siginfo __user *d, const siginfo_t *s)
{
int err;
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index d9196c9f93d9..817db9bd392c 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -280,6 +280,9 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
+ if (user_mode(regs))
+ flags |= FAULT_FLAG_USER;
+
/* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the
* kernel and should generate an OOPS. Unfortunately, in the case of an
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 97dcbea97a1c..5280cea61641 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -140,7 +140,6 @@ config S390
select OLD_SIGSUSPEND3
select OLD_SIGACTION
select SYSCTL_EXCEPTION_TRACE
- select USE_GENERIC_SMP_HELPERS if SMP
select VIRT_CPU_ACCOUNTING
config SCHED_OMIT_FRAME_POINTER
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index f313f9cbcf44..8386a4a1f19a 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -2,3 +2,5 @@
generic-y += clkdev.h
generic-y += trace_clock.h
+generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index 16760eeb79b0..578680f6207a 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -32,4 +32,19 @@
#define set_mb(var, value) do { var = value; mb(); } while (0)
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ barrier(); \
+ ACCESS_ONCE(*p) = (v); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ barrier(); \
+ ___p1; \
+})
+
#endif /* __ASM_BARRIER_H */
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index c439ac9ced09..1dbed3cdfbdd 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -49,7 +49,7 @@ typedef struct
__u32 gprs_high[NUM_GPRS];
} rt_sigframe32;
-int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
+int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
{
int err;
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 7845e15a17df..b89b59158b95 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -50,7 +50,7 @@ void *module_alloc(unsigned long size)
if (PAGE_ALIGN(size) > MODULES_LEN)
return NULL;
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
- GFP_KERNEL, PAGE_KERNEL, -1,
+ GFP_KERNEL, PAGE_KERNEL, NUMA_NO_NODE,
__builtin_return_address(0));
}
#endif
diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild
index cebaff8069a1..09405b2dbd65 100644
--- a/arch/score/include/asm/Kbuild
+++ b/arch/score/include/asm/Kbuild
@@ -3,3 +3,6 @@ header-y +=
generic-y += clkdev.h
generic-y += trace_clock.h
+generic-y += xor.h
+generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 8c868cf2cf93..aad7e6a53a19 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -712,7 +712,6 @@ config CC_STACKPROTECTOR
config SMP
bool "Symmetric multi-processing support"
depends on SYS_SUPPORTS_SMP
- select USE_GENERIC_SMP_HELPERS
---help---
This enables support for systems with more than one CPU. If you have
a system with only one CPU, like most personal computers, say N. If
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild
index 280bea9e5e2b..0cd7198a4524 100644
--- a/arch/sh/include/asm/Kbuild
+++ b/arch/sh/include/asm/Kbuild
@@ -34,3 +34,5 @@ generic-y += termios.h
generic-y += trace_clock.h
generic-y += ucontext.h
generic-y += xor.h
+generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 9ac9f1666339..1dc5857dd895 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -29,7 +29,6 @@ config SPARC
select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_SHOW
select ARCH_WANT_IPC_PARSE_VERSION
- select USE_GENERIC_SMP_HELPERS if SMP
select GENERIC_PCI_IOMAP
select HAVE_NMI_WATCHDOG if SPARC64
select HAVE_BPF_JIT
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index 7e4a97fbded4..4b60a0c325ec 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -16,3 +16,5 @@ generic-y += serial.h
generic-y += trace_clock.h
generic-y += types.h
generic-y += word-at-a-time.h
+generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
index 95d45986f908..b5aad964558e 100644
--- a/arch/sparc/include/asm/barrier_64.h
+++ b/arch/sparc/include/asm/barrier_64.h
@@ -53,4 +53,19 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
#define smp_read_barrier_depends() do { } while(0)
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ barrier(); \
+ ACCESS_ONCE(*p) = (v); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ barrier(); \
+ ___p1; \
+})
+
#endif /* !(__SPARC64_BARRIER_H) */
diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c
index 4435488ebe25..97655e0fd243 100644
--- a/arch/sparc/kernel/module.c
+++ b/arch/sparc/kernel/module.c
@@ -29,7 +29,7 @@ static void *module_map(unsigned long size)
if (PAGE_ALIGN(size) > MODULES_LEN)
return NULL;
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
- GFP_KERNEL, PAGE_KERNEL, -1,
+ GFP_KERNEL, PAGE_KERNEL, NUMA_NO_NODE,
__builtin_return_address(0));
}
#else
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index b524f91dd0e5..ee789d2ef05d 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -68,7 +68,7 @@ struct rt_signal_frame32 {
/* __siginfo_rwin_t * */u32 rwin_save;
} __attribute__((aligned(8)));
-int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
+int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
{
int err;
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 3aa37669ff8c..2e263ea9cf73 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -8,7 +8,6 @@ config TILE
select HAVE_KVM if !TILEGX
select GENERIC_FIND_FIRST_BIT
select SYSCTL_EXCEPTION_TRACE
- select USE_GENERIC_SMP_HELPERS
select CC_OPTIMIZE_FOR_SIZE
select HAVE_DEBUG_KMEMLEAK
select HAVE_GENERIC_HARDIRQS
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild
index b17b9b8e53cd..ec54a9862c3e 100644
--- a/arch/tile/include/asm/Kbuild
+++ b/arch/tile/include/asm/Kbuild
@@ -37,3 +37,5 @@ generic-y += termios.h
generic-y += trace_clock.h
generic-y += types.h
generic-y += xor.h
+generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c
index d0a052e725be..732e1c1fc4fd 100644
--- a/arch/tile/kernel/compat_signal.c
+++ b/arch/tile/kernel/compat_signal.c
@@ -48,7 +48,7 @@ struct compat_rt_sigframe {
struct compat_ucontext uc;
};
-int copy_siginfo_to_user32(struct compat_siginfo __user *to, siginfo_t *from)
+int copy_siginfo_to_user32(struct compat_siginfo __user *to, const siginfo_t *from)
{
int err;
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild
index b30f34a79882..75de4abe4f94 100644
--- a/arch/um/include/asm/Kbuild
+++ b/arch/um/include/asm/Kbuild
@@ -3,3 +3,5 @@ generic-y += hw_irq.h irq_regs.h kdebug.h percpu.h sections.h topology.h xor.h
generic-y += ftrace.h pci.h io.h param.h delay.h mutex.h current.h exec.h
generic-y += switch_to.h clkdev.h
generic-y += trace_clock.h
+generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild
index 89d8b6c4e39a..3ef4f9d9bf5d 100644
--- a/arch/unicore32/include/asm/Kbuild
+++ b/arch/unicore32/include/asm/Kbuild
@@ -60,3 +60,5 @@ generic-y += unaligned.h
generic-y += user.h
generic-y += vga.h
generic-y += xor.h
+generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index fe120da25625..170e310d7c9f 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -90,7 +90,6 @@ config X86
select GENERIC_IRQ_SHOW
select GENERIC_CLOCKEVENTS_MIN_ADJUST
select IRQ_FORCED_THREADING
- select USE_GENERIC_SMP_HELPERS if SMP
select HAVE_BPF_JIT if X86_64
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select CLKEVT_I8253
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index 10adb41f162e..a6f0d3ddd179 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -36,7 +36,7 @@
#define FIX_EFLAGS __FIX_EFLAGS
-int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
+int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
{
int err = 0;
bool ia32 = test_thread_flag(TIF_IA32);
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild
index 7f669853317a..eca20286a91c 100644
--- a/arch/x86/include/asm/Kbuild
+++ b/arch/x86/include/asm/Kbuild
@@ -5,3 +5,4 @@ genhdr-y += unistd_64.h
genhdr-y += unistd_x32.h
generic-y += clkdev.h
+generic-y += preempt.h
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index c6cd358a1eec..04a48903b2eb 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -92,12 +92,53 @@
#endif
#define smp_read_barrier_depends() read_barrier_depends()
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
-#else
+#else /* !SMP */
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while (0)
#define set_mb(var, value) do { var = value; barrier(); } while (0)
+#endif /* SMP */
+
+#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
+
+/*
+ * For either of these options x86 doesn't have a strong TSO memory
+ * model and we should fall back to full barriers.
+ */
+
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+ ACCESS_ONCE(*p) = (v); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+ ___p1; \
+})
+
+#else /* regular x86 TSO memory ordering */
+
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ barrier(); \
+ ACCESS_ONCE(*p) = (v); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ barrier(); \
+ ___p1; \
+})
+
#endif
/*
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index b1581527a236..27ffa7b8beee 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -51,16 +51,6 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS));
}
-#ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end)
-{
- initrd_start = (unsigned long)__va(start);
- initrd_end = (unsigned long)__va(end);
- initrd_below_start_ok = 1;
-}
-#endif
-
void __init add_dtb(u64 data)
{
initial_dtb = data + offsetof(struct setup_data, data);
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index 216a4d754b0c..18be189368bb 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -49,7 +49,7 @@ void *module_alloc(unsigned long size)
return NULL;
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
- -1, __builtin_return_address(0));
+ NUMA_NO_NODE, __builtin_return_address(0));
}
#ifdef CONFIG_X86_32
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index 1b982641ec35..d7efa1502101 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -28,3 +28,5 @@ generic-y += termios.h
generic-y += topology.h
generic-y += trace_clock.h
generic-y += xor.h
+generic-y += preempt.h
+generic-y += hash.h
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 14c6c3a6f04b..0c5dd5635684 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -64,8 +64,8 @@ extern struct rtc_ops no_rtc_ops;
struct rtc_ops *rtc_ops;
#ifdef CONFIG_BLK_DEV_INITRD
-extern void *initrd_start;
-extern void *initrd_end;
+extern unsigned long initrd_start;
+extern unsigned long initrd_end;
int initrd_is_mapped = 0;
extern int initrd_below_start_ok;
#endif
@@ -152,8 +152,8 @@ static int __init parse_tag_initrd(const bp_tag_t* tag)
{
meminfo_t* mi;
mi = (meminfo_t*)(tag->data);
- initrd_start = __va(mi->start);
- initrd_end = __va(mi->end);
+ initrd_start = (unsigned long)__va(mi->start);
+ initrd_end = (unsigned long)__va(mi->end);
return 0;
}
@@ -170,14 +170,6 @@ static int __init parse_tag_fdt(const bp_tag_t *tag)
__tagtable(BP_TAG_FDT, parse_tag_fdt);
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end)
-{
- initrd_start = (void *)__va(start);
- initrd_end = (void *)__va(end);
- initrd_below_start_ok = 1;
-}
-
#endif /* CONFIG_OF */
#endif /* CONFIG_BLK_DEV_INITRD */
diff --git a/block/blk-mq.c b/block/blk-mq.c
new file mode 100644
index 000000000000..c661896e2465
--- /dev/null
+++ b/block/blk-mq.c
@@ -0,0 +1,1500 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/backing-dev.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/smp.h>
+#include <linux/llist.h>
+#include <linux/list_sort.h>
+#include <linux/cpu.h>
+#include <linux/cache.h>
+#include <linux/sched/sysctl.h>
+#include <linux/delay.h>
+
+#include <trace/events/block.h>
+
+#include <linux/blk-mq.h>
+#include "blk.h"
+#include "blk-mq.h"
+#include "blk-mq-tag.h"
+
+static DEFINE_MUTEX(all_q_mutex);
+static LIST_HEAD(all_q_list);
+
+static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
+
+DEFINE_PER_CPU(struct llist_head, ipi_lists);
+
+static struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
+ unsigned int cpu)
+{
+ return per_cpu_ptr(q->queue_ctx, cpu);
+}
+
+/*
+ * This assumes per-cpu software queueing queues. They could be per-node
+ * as well, for instance. For now this is hardcoded as-is. Note that we don't
+ * care about preemption, since we know the ctx's are persistent. This does
+ * mean that we can't rely on ctx always matching the currently running CPU.
+ */
+static struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
+{
+ return __blk_mq_get_ctx(q, get_cpu());
+}
+
+static void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
+{
+ put_cpu();
+}
+
+/*
+ * Check if any of the ctx's have pending work in this hardware queue
+ */
+static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
+{
+ unsigned int i;
+
+ for (i = 0; i < hctx->nr_ctx_map; i++)
+ if (hctx->ctx_map[i])
+ return true;
+
+ return false;
+}
+
+/*
+ * Mark this ctx as having pending work in this hardware queue
+ */
+static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
+ struct blk_mq_ctx *ctx)
+{
+ if (!test_bit(ctx->index_hw, hctx->ctx_map))
+ set_bit(ctx->index_hw, hctx->ctx_map);
+}
+
+static struct request *blk_mq_alloc_rq(struct blk_mq_hw_ctx *hctx, gfp_t gfp,
+ bool reserved)
+{
+ struct request *rq;
+ unsigned int tag;
+
+ tag = blk_mq_get_tag(hctx->tags, gfp, reserved);
+ if (tag != BLK_MQ_TAG_FAIL) {
+ rq = hctx->rqs[tag];
+ rq->tag = tag;
+
+ return rq;
+ }
+
+ return NULL;
+}
+
+static int blk_mq_queue_enter(struct request_queue *q)
+{
+ int ret;
+
+ __percpu_counter_add(&q->mq_usage_counter, 1, 1000000);
+ smp_wmb();
+ /* we have problems to freeze the queue if it's initializing */
+ if (!blk_queue_bypass(q) || !blk_queue_init_done(q))
+ return 0;
+
+ __percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
+
+ spin_lock_irq(q->queue_lock);
+ ret = wait_event_interruptible_lock_irq(q->mq_freeze_wq,
+ !blk_queue_bypass(q), *q->queue_lock);
+ /* inc usage with lock hold to avoid freeze_queue runs here */
+ if (!ret)
+ __percpu_counter_add(&q->mq_usage_counter, 1, 1000000);
+ spin_unlock_irq(q->queue_lock);
+
+ return ret;
+}
+
+static void blk_mq_queue_exit(struct request_queue *q)
+{
+ __percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
+}
+
+/*
+ * Guarantee no request is in use, so we can change any data structure of
+ * the queue afterward.
+ */
+static void blk_mq_freeze_queue(struct request_queue *q)
+{
+ bool drain;
+
+ spin_lock_irq(q->queue_lock);
+ drain = !q->bypass_depth++;
+ queue_flag_set(QUEUE_FLAG_BYPASS, q);
+ spin_unlock_irq(q->queue_lock);
+
+ if (!drain)
+ return;
+
+ while (true) {
+ s64 count;
+
+ spin_lock_irq(q->queue_lock);
+ count = percpu_counter_sum(&q->mq_usage_counter);
+ spin_unlock_irq(q->queue_lock);
+
+ if (count == 0)
+ break;
+ blk_mq_run_queues(q, false);
+ msleep(10);
+ }
+}
+
+static void blk_mq_unfreeze_queue(struct request_queue *q)
+{
+ bool wake = false;
+
+ spin_lock_irq(q->queue_lock);
+ if (!--q->bypass_depth) {
+ queue_flag_clear(QUEUE_FLAG_BYPASS, q);
+ wake = true;
+ }
+ WARN_ON_ONCE(q->bypass_depth < 0);
+ spin_unlock_irq(q->queue_lock);
+ if (wake)
+ wake_up_all(&q->mq_freeze_wq);
+}
+
+bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
+{
+ return blk_mq_has_free_tags(hctx->tags);
+}
+EXPORT_SYMBOL(blk_mq_can_queue);
+
+static void blk_mq_rq_ctx_init(struct blk_mq_ctx *ctx, struct request *rq,
+ unsigned int rw_flags)
+{
+ rq->mq_ctx = ctx;
+ rq->cmd_flags = rw_flags;
+ ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
+}
+
+static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx,
+ gfp_t gfp, bool reserved)
+{
+ return blk_mq_alloc_rq(hctx, gfp, reserved);
+}
+
+static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
+ int rw, gfp_t gfp,
+ bool reserved)
+{
+ struct request *rq;
+
+ do {
+ struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
+ struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu);
+
+ rq = __blk_mq_alloc_request(hctx, gfp & ~__GFP_WAIT, reserved);
+ if (rq) {
+ blk_mq_rq_ctx_init(ctx, rq, rw);
+ break;
+ } else if (!(gfp & __GFP_WAIT))
+ break;
+
+ blk_mq_put_ctx(ctx);
+ __blk_mq_run_hw_queue(hctx);
+ blk_mq_wait_for_tags(hctx->tags);
+ } while (1);
+
+ return rq;
+}
+
+struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
+ gfp_t gfp, bool reserved)
+{
+ struct request *rq;
+
+ if (blk_mq_queue_enter(q))
+ return NULL;
+
+ rq = blk_mq_alloc_request_pinned(q, rw, gfp, reserved);
+ blk_mq_put_ctx(rq->mq_ctx);
+ return rq;
+}
+
+struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw,
+ gfp_t gfp)
+{
+ struct request *rq;
+
+ if (blk_mq_queue_enter(q))
+ return NULL;
+
+ rq = blk_mq_alloc_request_pinned(q, rw, gfp, true);
+ blk_mq_put_ctx(rq->mq_ctx);
+ return rq;
+}
+EXPORT_SYMBOL(blk_mq_alloc_reserved_request);
+
+/*
+ * Re-init and set pdu, if we have it
+ */
+static void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq)
+{
+ blk_rq_init(hctx->queue, rq);
+
+ if (hctx->cmd_size)
+ rq->special = blk_mq_rq_to_pdu(rq);
+}
+
+static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
+ struct blk_mq_ctx *ctx, struct request *rq)
+{
+ const int tag = rq->tag;
+ struct request_queue *q = rq->q;
+
+ blk_mq_rq_init(hctx, rq);
+ blk_mq_put_tag(hctx->tags, tag);
+
+ blk_mq_queue_exit(q);
+}
+
+void blk_mq_free_request(struct request *rq)
+{
+ struct blk_mq_ctx *ctx = rq->mq_ctx;
+ struct blk_mq_hw_ctx *hctx;
+ struct request_queue *q = rq->q;
+
+ ctx->rq_completed[rq_is_sync(rq)]++;
+
+ hctx = q->mq_ops->map_queue(q, ctx->cpu);
+ __blk_mq_free_request(hctx, ctx, rq);
+}
+
+static void blk_mq_bio_endio(struct request *rq, struct bio *bio, int error)
+{
+ if (error)
+ clear_bit(BIO_UPTODATE, &bio->bi_flags);
+ else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
+ error = -EIO;
+
+ if (unlikely(rq->cmd_flags & REQ_QUIET))
+ set_bit(BIO_QUIET, &bio->bi_flags);
+
+ /* don't actually finish bio if it's part of flush sequence */
+ if (!(rq->cmd_flags & REQ_FLUSH_SEQ))
+ bio_endio(bio, error);
+}
+
+void blk_mq_complete_request(struct request *rq, int error)
+{
+ struct bio *bio = rq->bio;
+ unsigned int bytes = 0;
+
+ trace_block_rq_complete(rq->q, rq);
+
+ while (bio) {
+ struct bio *next = bio->bi_next;
+
+ bio->bi_next = NULL;
+ bytes += bio->bi_size;
+ blk_mq_bio_endio(rq, bio, error);
+ bio = next;
+ }
+
+ blk_account_io_completion(rq, bytes);
+
+ if (rq->end_io)
+ rq->end_io(rq, error);
+ else
+ blk_mq_free_request(rq);
+
+ blk_account_io_done(rq);
+}
+
+void __blk_mq_end_io(struct request *rq, int error)
+{
+ if (!blk_mark_rq_complete(rq))
+ blk_mq_complete_request(rq, error);
+}
+
+#if defined(CONFIG_SMP)
+
+/*
+ * Called with interrupts disabled.
+ */
+static void ipi_end_io(void *data)
+{
+ struct llist_head *list = &per_cpu(ipi_lists, smp_processor_id());
+ struct llist_node *entry, *next;
+ struct request *rq;
+
+ entry = llist_del_all(list);
+
+ while (entry) {
+ next = entry->next;
+ rq = llist_entry(entry, struct request, ll_list);
+ __blk_mq_end_io(rq, rq->errors);
+ entry = next;
+ }
+}
+
+static int ipi_remote_cpu(struct blk_mq_ctx *ctx, const int cpu,
+ struct request *rq, const int error)
+{
+ struct call_single_data *data = &rq->csd;
+
+ rq->errors = error;
+ rq->ll_list.next = NULL;
+
+ /*
+ * If the list is non-empty, an existing IPI must already
+ * be "in flight". If that is the case, we need not schedule
+ * a new one.
+ */
+ if (llist_add(&rq->ll_list, &per_cpu(ipi_lists, ctx->cpu))) {
+ data->func = ipi_end_io;
+ data->flags = 0;
+ __smp_call_function_single(ctx->cpu, data, 0);
+ }
+
+ return true;
+}
+#else /* CONFIG_SMP */
+static int ipi_remote_cpu(struct blk_mq_ctx *ctx, const int cpu,
+ struct request *rq, const int error)
+{
+ return false;
+}
+#endif
+
+/*
+ * End IO on this request on a multiqueue enabled driver. We'll either do
+ * it directly inline, or punt to a local IPI handler on the matching
+ * remote CPU.
+ */
+void blk_mq_end_io(struct request *rq, int error)
+{
+ struct blk_mq_ctx *ctx = rq->mq_ctx;
+ int cpu;
+
+ if (!ctx->ipi_redirect)
+ return __blk_mq_end_io(rq, error);
+
+ cpu = get_cpu();
+
+ if (cpu == ctx->cpu || !cpu_online(ctx->cpu) ||
+ !ipi_remote_cpu(ctx, cpu, rq, error))
+ __blk_mq_end_io(rq, error);
+
+ put_cpu();
+}
+EXPORT_SYMBOL(blk_mq_end_io);
+
+static void blk_mq_start_request(struct request *rq)
+{
+ struct request_queue *q = rq->q;
+
+ trace_block_rq_issue(q, rq);
+
+ /*
+ * Just mark start time and set the started bit. Due to memory
+ * ordering, we know we'll see the correct deadline as long as
+ * REQ_ATOMIC_STARTED is seen.
+ */
+ rq->deadline = jiffies + q->rq_timeout;
+ set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
+}
+
+static void blk_mq_requeue_request(struct request *rq)
+{
+ struct request_queue *q = rq->q;
+
+ trace_block_rq_requeue(q, rq);
+ clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
+}
+
+struct blk_mq_timeout_data {
+ struct blk_mq_hw_ctx *hctx;
+ unsigned long *next;
+ unsigned int *next_set;
+};
+
+static void blk_mq_timeout_check(void *__data, unsigned long *free_tags)
+{
+ struct blk_mq_timeout_data *data = __data;
+ struct blk_mq_hw_ctx *hctx = data->hctx;
+ unsigned int tag;
+
+ /* It may not be in flight yet (this is where
+ * the REQ_ATOMIC_STARTED flag comes in). The requests are
+ * statically allocated, so we know it's always safe to access the
+ * memory associated with a bit offset into ->rqs[].
+ */
+ tag = 0;
+ do {
+ struct request *rq;
+
+ tag = find_next_zero_bit(free_tags, hctx->queue_depth, tag);
+ if (tag >= hctx->queue_depth)
+ break;
+
+ rq = hctx->rqs[tag++];
+
+ if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
+ continue;
+
+ blk_rq_check_expired(rq, data->next, data->next_set);
+ } while (1);
+}
+
+static void blk_mq_hw_ctx_check_timeout(struct blk_mq_hw_ctx *hctx,
+ unsigned long *next,
+ unsigned int *next_set)
+{
+ struct blk_mq_timeout_data data = {
+ .hctx = hctx,
+ .next = next,
+ .next_set = next_set,
+ };
+
+ /*
+ * Ask the tagging code to iterate busy requests, so we can
+ * check them for timeout.
+ */
+ blk_mq_tag_busy_iter(hctx->tags, blk_mq_timeout_check, &data);
+}
+
+static void blk_mq_rq_timer(unsigned long data)
+{
+ struct request_queue *q = (struct request_queue *) data;
+ struct blk_mq_hw_ctx *hctx;
+ unsigned long next = 0;
+ int i, next_set = 0;
+
+ queue_for_each_hw_ctx(q, hctx, i)
+ blk_mq_hw_ctx_check_timeout(hctx, &next, &next_set);
+
+ if (next_set)
+ mod_timer(&q->timeout, round_jiffies_up(next));
+}
+
+/*
+ * Reverse check our software queue for entries that we could potentially
+ * merge with. Currently includes a hand-wavy stop count of 8, to not spend
+ * too much time checking for merges.
+ */
+static bool blk_mq_attempt_merge(struct request_queue *q,
+ struct blk_mq_ctx *ctx, struct bio *bio)
+{
+ struct request *rq;
+ int checked = 8;
+
+ list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
+ int el_ret;
+
+ if (!checked--)
+ break;
+
+ if (!blk_rq_merge_ok(rq, bio))
+ continue;
+
+ el_ret = blk_try_merge(rq, bio);
+ if (el_ret == ELEVATOR_BACK_MERGE) {
+ if (bio_attempt_back_merge(q, rq, bio)) {
+ ctx->rq_merged++;
+ return true;
+ }
+ break;
+ } else if (el_ret == ELEVATOR_FRONT_MERGE) {
+ if (bio_attempt_front_merge(q, rq, bio)) {
+ ctx->rq_merged++;
+ return true;
+ }
+ break;
+ }
+ }
+
+ return false;
+}
+
+void blk_mq_add_timer(struct request *rq)
+{
+ __blk_add_timer(rq, NULL);
+}
+
+/*
+ * Run this hardware queue, pulling any software queues mapped to it in.
+ * Note that this function currently has various problems around ordering
+ * of IO. In particular, we'd like FIFO behaviour on handling existing
+ * items on the hctx->dispatch list. Ignore that for now.
+ */
+static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
+{
+ struct request_queue *q = hctx->queue;
+ struct blk_mq_ctx *ctx;
+ struct request *rq;
+ LIST_HEAD(rq_list);
+ int bit, queued;
+
+ if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->flags)))
+ return;
+
+ hctx->run++;
+
+ /*
+ * Touch any software queue that has pending entries.
+ */
+ for_each_set_bit(bit, hctx->ctx_map, hctx->nr_ctx) {
+ clear_bit(bit, hctx->ctx_map);
+ ctx = hctx->ctxs[bit];
+ BUG_ON(bit != ctx->index_hw);
+
+ spin_lock(&ctx->lock);
+ list_splice_tail_init(&ctx->rq_list, &rq_list);
+ spin_unlock(&ctx->lock);
+ }
+
+ /*
+ * If we have previous entries on our dispatch list, grab them
+ * and stuff them at the front for more fair dispatch.
+ */
+ if (!list_empty_careful(&hctx->dispatch)) {
+ spin_lock(&hctx->lock);
+ if (!list_empty(&hctx->dispatch))
+ list_splice_init(&hctx->dispatch, &rq_list);
+ spin_unlock(&hctx->lock);
+ }
+
+ /*
+ * Delete and return all entries from our dispatch list
+ */
+ queued = 0;
+
+ /*
+ * Now process all the entries, sending them to the driver.
+ */
+ while (!list_empty(&rq_list)) {
+ int ret;
+
+ rq = list_first_entry(&rq_list, struct request, queuelist);
+ list_del_init(&rq->queuelist);
+ blk_mq_start_request(rq);
+
+ /*
+ * Last request in the series. Flag it as such, this
+ * enables drivers to know when IO should be kicked off,
+ * if they don't do it on a per-request basis.
+ *
+ * Note: the flag isn't the only condition drivers
+ * should do kick off. If drive is busy, the last
+ * request might not have the bit set.
+ */
+ if (list_empty(&rq_list))
+ rq->cmd_flags |= REQ_END;
+
+ ret = q->mq_ops->queue_rq(hctx, rq);
+ switch (ret) {
+ case BLK_MQ_RQ_QUEUE_OK:
+ queued++;
+ continue;
+ case BLK_MQ_RQ_QUEUE_BUSY:
+ /*
+ * FIXME: we should have a mechanism to stop the queue
+ * like blk_stop_queue, otherwise we will waste cpu
+ * time
+ */
+ list_add(&rq->queuelist, &rq_list);
+ blk_mq_requeue_request(rq);
+ break;
+ default:
+ pr_err("blk-mq: bad return on queue: %d\n", ret);
+ rq->errors = -EIO;
+ case BLK_MQ_RQ_QUEUE_ERROR:
+ blk_mq_end_io(rq, rq->errors);
+ break;
+ }
+
+ if (ret == BLK_MQ_RQ_QUEUE_BUSY)
+ break;
+ }
+
+ if (!queued)
+ hctx->dispatched[0]++;
+ else if (queued < (1 << (BLK_MQ_MAX_DISPATCH_ORDER - 1)))
+ hctx->dispatched[ilog2(queued) + 1]++;
+
+ /*
+ * Any items that need requeuing? Stuff them into hctx->dispatch,
+ * that is where we will continue on next queue run.
+ */
+ if (!list_empty(&rq_list)) {
+ spin_lock(&hctx->lock);
+ list_splice(&rq_list, &hctx->dispatch);
+ spin_unlock(&hctx->lock);
+ }
+}
+
+void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
+{
+ if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->flags)))
+ return;
+
+ if (!async)
+ __blk_mq_run_hw_queue(hctx);
+ else {
+ struct request_queue *q = hctx->queue;
+
+ kblockd_schedule_delayed_work(q, &hctx->delayed_work, 0);
+ }
+}
+
+void blk_mq_run_queues(struct request_queue *q, bool async)
+{
+ struct blk_mq_hw_ctx *hctx;
+ int i;
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ if ((!blk_mq_hctx_has_pending(hctx) &&
+ list_empty_careful(&hctx->dispatch)) ||
+ test_bit(BLK_MQ_S_STOPPED, &hctx->flags))
+ continue;
+
+ blk_mq_run_hw_queue(hctx, async);
+ }
+}
+EXPORT_SYMBOL(blk_mq_run_queues);
+
+void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
+{
+ cancel_delayed_work(&hctx->delayed_work);
+ set_bit(BLK_MQ_S_STOPPED, &hctx->state);
+}
+EXPORT_SYMBOL(blk_mq_stop_hw_queue);
+
+void blk_mq_stop_hw_queues(struct request_queue *q)
+{
+ struct blk_mq_hw_ctx *hctx;
+ int i;
+
+ queue_for_each_hw_ctx(q, hctx, i)
+ blk_mq_stop_hw_queue(hctx);
+}
+EXPORT_SYMBOL(blk_mq_stop_hw_queues);
+
+void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
+{
+ clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
+ __blk_mq_run_hw_queue(hctx);
+}
+EXPORT_SYMBOL(blk_mq_start_hw_queue);
+
+void blk_mq_start_stopped_hw_queues(struct request_queue *q)
+{
+ struct blk_mq_hw_ctx *hctx;
+ int i;
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state))
+ continue;
+
+ clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
+ blk_mq_run_hw_queue(hctx, true);
+ }
+}
+EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
+
+static void blk_mq_work_fn(struct work_struct *work)
+{
+ struct blk_mq_hw_ctx *hctx;
+
+ hctx = container_of(work, struct blk_mq_hw_ctx, delayed_work.work);
+ __blk_mq_run_hw_queue(hctx);
+}
+
+static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
+ struct request *rq)
+{
+ struct blk_mq_ctx *ctx = rq->mq_ctx;
+
+ list_add_tail(&rq->queuelist, &ctx->rq_list);
+ blk_mq_hctx_mark_pending(hctx, ctx);
+
+ /*
+ * We do this early, to ensure we are on the right CPU.
+ */
+ blk_mq_add_timer(rq);
+}
+
+void blk_mq_insert_request(struct request_queue *q, struct request *rq,
+ bool run_queue)
+{
+ struct blk_mq_hw_ctx *hctx;
+ struct blk_mq_ctx *ctx, *current_ctx;
+
+ ctx = rq->mq_ctx;
+ hctx = q->mq_ops->map_queue(q, ctx->cpu);
+
+ if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
+ blk_insert_flush(rq);
+ } else {
+ current_ctx = blk_mq_get_ctx(q);
+
+ if (!cpu_online(ctx->cpu)) {
+ ctx = current_ctx;
+ hctx = q->mq_ops->map_queue(q, ctx->cpu);
+ rq->mq_ctx = ctx;
+ }
+ spin_lock(&ctx->lock);
+ __blk_mq_insert_request(hctx, rq);
+ spin_unlock(&ctx->lock);
+
+ blk_mq_put_ctx(current_ctx);
+ }
+
+ if (run_queue)
+ __blk_mq_run_hw_queue(hctx);
+}
+EXPORT_SYMBOL(blk_mq_insert_request);
+
+/*
+ * This is a special version of blk_mq_insert_request to bypass FLUSH request
+ * check. Should only be used internally.
+ */
+void blk_mq_run_request(struct request *rq, bool run_queue, bool async)
+{
+ struct request_queue *q = rq->q;
+ struct blk_mq_hw_ctx *hctx;
+ struct blk_mq_ctx *ctx, *current_ctx;
+
+ current_ctx = blk_mq_get_ctx(q);
+
+ ctx = rq->mq_ctx;
+ if (!cpu_online(ctx->cpu)) {
+ ctx = current_ctx;
+ rq->mq_ctx = ctx;
+ }
+ hctx = q->mq_ops->map_queue(q, ctx->cpu);
+
+ /* ctx->cpu might be offline */
+ spin_lock(&ctx->lock);
+ __blk_mq_insert_request(hctx, rq);
+ spin_unlock(&ctx->lock);
+
+ blk_mq_put_ctx(current_ctx);
+
+ if (run_queue)
+ blk_mq_run_hw_queue(hctx, async);
+}
+
+static void blk_mq_insert_requests(struct request_queue *q,
+ struct blk_mq_ctx *ctx,
+ struct list_head *list,
+ int depth,
+ bool from_schedule)
+
+{
+ struct blk_mq_hw_ctx *hctx;
+ struct blk_mq_ctx *current_ctx;
+
+ trace_block_unplug(q, depth, !from_schedule);
+
+ current_ctx = blk_mq_get_ctx(q);
+
+ if (!cpu_online(ctx->cpu))
+ ctx = current_ctx;
+ hctx = q->mq_ops->map_queue(q, ctx->cpu);
+
+ /*
+ * preemption doesn't flush plug list, so it's possible ctx->cpu is
+ * offline now
+ */
+ spin_lock(&ctx->lock);
+ while (!list_empty(list)) {
+ struct request *rq;
+
+ rq = list_first_entry(list, struct request, queuelist);
+ list_del_init(&rq->queuelist);
+ rq->mq_ctx = ctx;
+ __blk_mq_insert_request(hctx, rq);
+ }
+ spin_unlock(&ctx->lock);
+
+ blk_mq_put_ctx(current_ctx);
+
+ blk_mq_run_hw_queue(hctx, from_schedule);
+}
+
+static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
+{
+ struct request *rqa = container_of(a, struct request, queuelist);
+ struct request *rqb = container_of(b, struct request, queuelist);
+
+ return !(rqa->mq_ctx < rqb->mq_ctx ||
+ (rqa->mq_ctx == rqb->mq_ctx &&
+ blk_rq_pos(rqa) < blk_rq_pos(rqb)));
+}
+
+void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
+{
+ struct blk_mq_ctx *this_ctx;
+ struct request_queue *this_q;
+ struct request *rq;
+ LIST_HEAD(list);
+ LIST_HEAD(ctx_list);
+ unsigned int depth;
+
+ list_splice_init(&plug->mq_list, &list);
+
+ list_sort(NULL, &list, plug_ctx_cmp);
+
+ this_q = NULL;
+ this_ctx = NULL;
+ depth = 0;
+
+ while (!list_empty(&list)) {
+ rq = list_entry_rq(list.next);
+ list_del_init(&rq->queuelist);
+ BUG_ON(!rq->q);
+ if (rq->mq_ctx != this_ctx) {
+ if (this_ctx) {
+ blk_mq_insert_requests(this_q, this_ctx,
+ &ctx_list, depth,
+ from_schedule);
+ }
+
+ this_ctx = rq->mq_ctx;
+ this_q = rq->q;
+ depth = 0;
+ }
+
+ depth++;
+ list_add_tail(&rq->queuelist, &ctx_list);
+ }
+
+ /*
+ * If 'this_ctx' is set, we know we have entries to complete
+ * on 'ctx_list'. Do those.
+ */
+ if (this_ctx) {
+ blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth,
+ from_schedule);
+ }
+}
+
+static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
+{
+ init_request_from_bio(rq, bio);
+ blk_account_io_start(rq, 1);
+}
+
+static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
+{
+ struct blk_mq_hw_ctx *hctx;
+ struct blk_mq_ctx *ctx;
+ const int is_sync = rw_is_sync(bio->bi_rw);
+ const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
+ int rw = bio_data_dir(bio);
+ struct request *rq;
+ unsigned int use_plug, request_count = 0;
+
+ /*
+ * If we have multiple hardware queues, just go directly to
+ * one of those for sync IO.
+ */
+ use_plug = !is_flush_fua && ((q->nr_hw_queues == 1) || !is_sync);
+
+ blk_queue_bounce(q, &bio);
+
+ if (use_plug && blk_attempt_plug_merge(q, bio, &request_count))
+ return;
+
+ if (blk_mq_queue_enter(q)) {
+ bio_endio(bio, -EIO);
+ return;
+ }
+
+ ctx = blk_mq_get_ctx(q);
+ hctx = q->mq_ops->map_queue(q, ctx->cpu);
+
+ trace_block_getrq(q, bio, rw);
+ rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false);
+ if (likely(rq))
+ blk_mq_rq_ctx_init(ctx, rq, rw);
+ else {
+ blk_mq_put_ctx(ctx);
+ trace_block_sleeprq(q, bio, rw);
+ rq = blk_mq_alloc_request_pinned(q, rw, __GFP_WAIT|GFP_ATOMIC,
+ false);
+ ctx = rq->mq_ctx;
+ hctx = q->mq_ops->map_queue(q, ctx->cpu);
+ }
+
+ hctx->queued++;
+
+ if (unlikely(is_flush_fua)) {
+ blk_mq_bio_to_request(rq, bio);
+ blk_mq_put_ctx(ctx);
+ blk_insert_flush(rq);
+ goto run_queue;
+ }
+
+ /*
+ * A task plug currently exists. Since this is completely lockless,
+ * utilize that to temporarily store requests until the task is
+ * either done or scheduled away.
+ */
+ if (use_plug) {
+ struct blk_plug *plug = current->plug;
+
+ if (plug) {
+ blk_mq_bio_to_request(rq, bio);
+ if (list_empty(&plug->mq_list))
+ trace_block_plug(q);
+ else if (request_count >= BLK_MAX_REQUEST_COUNT) {
+ blk_flush_plug_list(plug, false);
+ trace_block_plug(q);
+ }
+ list_add_tail(&rq->queuelist, &plug->mq_list);
+ blk_mq_put_ctx(ctx);
+ return;
+ }
+ }
+
+ spin_lock(&ctx->lock);
+
+ if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
+ blk_mq_attempt_merge(q, ctx, bio))
+ __blk_mq_free_request(hctx, ctx, rq);
+ else {
+ blk_mq_bio_to_request(rq, bio);
+ __blk_mq_insert_request(hctx, rq);
+ }
+
+ spin_unlock(&ctx->lock);
+ blk_mq_put_ctx(ctx);
+
+ /*
+ * For a SYNC request, send it to the hardware immediately. For an
+ * ASYNC request, just ensure that we run it later on. The latter
+ * allows for merging opportunities and more efficient dispatching.
+ */
+run_queue:
+ blk_mq_run_hw_queue(hctx, !is_sync || is_flush_fua);
+}
+
+/*
+ * Default mapping to a software queue, since we use one per CPU.
+ */
+struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
+{
+ return q->queue_hw_ctx[q->mq_map[cpu]];
+}
+EXPORT_SYMBOL(blk_mq_map_queue);
+
+struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_reg *reg,
+ unsigned int hctx_index)
+{
+ return kmalloc_node(sizeof(struct blk_mq_hw_ctx),
+ GFP_KERNEL | __GFP_ZERO, reg->numa_node);
+}
+EXPORT_SYMBOL(blk_mq_alloc_single_hw_queue);
+
+void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *hctx,
+ unsigned int hctx_index)
+{
+ kfree(hctx);
+}
+EXPORT_SYMBOL(blk_mq_free_single_hw_queue);
+
+static void blk_mq_hctx_notify(void *data, unsigned long action,
+ unsigned int cpu)
+{
+ struct blk_mq_hw_ctx *hctx = data;
+ struct blk_mq_ctx *ctx;
+ LIST_HEAD(tmp);
+
+ if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
+ return;
+
+ /*
+ * Move ctx entries to new CPU, if this one is going away.
+ */
+ ctx = __blk_mq_get_ctx(hctx->queue, cpu);
+
+ spin_lock(&ctx->lock);
+ if (!list_empty(&ctx->rq_list)) {
+ list_splice_init(&ctx->rq_list, &tmp);
+ clear_bit(ctx->index_hw, hctx->ctx_map);
+ }
+ spin_unlock(&ctx->lock);
+
+ if (list_empty(&tmp))
+ return;
+
+ ctx = blk_mq_get_ctx(hctx->queue);
+ spin_lock(&ctx->lock);
+
+ while (!list_empty(&tmp)) {
+ struct request *rq;
+
+ rq = list_first_entry(&tmp, struct request, queuelist);
+ rq->mq_ctx = ctx;
+ list_move_tail(&rq->queuelist, &ctx->rq_list);
+ }
+
+ blk_mq_hctx_mark_pending(hctx, ctx);
+
+ spin_unlock(&ctx->lock);
+ blk_mq_put_ctx(ctx);
+}
+
+static void blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx,
+ void (*init)(void *, struct blk_mq_hw_ctx *,
+ struct request *, unsigned int),
+ void *data)
+{
+ unsigned int i;
+
+ for (i = 0; i < hctx->queue_depth; i++) {
+ struct request *rq = hctx->rqs[i];
+
+ init(data, hctx, rq, i);
+ }
+}
+
+void blk_mq_init_commands(struct request_queue *q,
+ void (*init)(void *, struct blk_mq_hw_ctx *,
+ struct request *, unsigned int),
+ void *data)
+{
+ struct blk_mq_hw_ctx *hctx;
+ unsigned int i;
+
+ queue_for_each_hw_ctx(q, hctx, i)
+ blk_mq_init_hw_commands(hctx, init, data);
+}
+EXPORT_SYMBOL(blk_mq_init_commands);
+
+static void blk_mq_free_rq_map(struct blk_mq_hw_ctx *hctx)
+{
+ struct page *page;
+
+ while (!list_empty(&hctx->page_list)) {
+ page = list_first_entry(&hctx->page_list, struct page, list);
+ list_del_init(&page->list);
+ __free_pages(page, page->private);
+ }
+
+ kfree(hctx->rqs);
+
+ if (hctx->tags)
+ blk_mq_free_tags(hctx->tags);
+}
+
+static size_t order_to_size(unsigned int order)
+{
+ size_t ret = PAGE_SIZE;
+
+ while (order--)
+ ret *= 2;
+
+ return ret;
+}
+
+static int blk_mq_init_rq_map(struct blk_mq_hw_ctx *hctx,
+ unsigned int reserved_tags, int node)
+{
+ unsigned int i, j, entries_per_page, max_order = 4;
+ size_t rq_size, left;
+
+ INIT_LIST_HEAD(&hctx->page_list);
+
+ hctx->rqs = kmalloc_node(hctx->queue_depth * sizeof(struct request *),
+ GFP_KERNEL, node);
+ if (!hctx->rqs)
+ return -ENOMEM;
+
+ /*
+ * rq_size is the size of the request plus driver payload, rounded
+ * to the cacheline size
+ */
+ rq_size = round_up(sizeof(struct request) + hctx->cmd_size,
+ cache_line_size());
+ left = rq_size * hctx->queue_depth;
+
+ for (i = 0; i < hctx->queue_depth;) {
+ int this_order = max_order;
+ struct page *page;
+ int to_do;
+ void *p;
+
+ while (left < order_to_size(this_order - 1) && this_order)
+ this_order--;
+
+ do {
+ page = alloc_pages_node(node, GFP_KERNEL, this_order);
+ if (page)
+ break;
+ if (!this_order--)
+ break;
+ if (order_to_size(this_order) < rq_size)
+ break;
+ } while (1);
+
+ if (!page)
+ break;
+
+ page->private = this_order;
+ list_add_tail(&page->list, &hctx->page_list);
+
+ p = page_address(page);
+ entries_per_page = order_to_size(this_order) / rq_size;
+ to_do = min(entries_per_page, hctx->queue_depth - i);
+ left -= to_do * rq_size;
+ for (j = 0; j < to_do; j++) {
+ hctx->rqs[i] = p;
+ blk_mq_rq_init(hctx, hctx->rqs[i]);
+ p += rq_size;
+ i++;
+ }
+ }
+
+ if (i < (reserved_tags + BLK_MQ_TAG_MIN))
+ goto err_rq_map;
+ else if (i != hctx->queue_depth) {
+ hctx->queue_depth = i;
+ pr_warn("%s: queue depth set to %u because of low memory\n",
+ __func__, i);
+ }
+
+ hctx->tags = blk_mq_init_tags(hctx->queue_depth, reserved_tags, node);
+ if (!hctx->tags) {
+err_rq_map:
+ blk_mq_free_rq_map(hctx);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int blk_mq_init_hw_queues(struct request_queue *q,
+ struct blk_mq_reg *reg, void *driver_data)
+{
+ struct blk_mq_hw_ctx *hctx;
+ unsigned int i, j;
+
+ /*
+ * Initialize hardware queues
+ */
+ queue_for_each_hw_ctx(q, hctx, i) {
+ unsigned int num_maps;
+ int node;
+
+ node = hctx->numa_node;
+ if (node == NUMA_NO_NODE)
+ node = hctx->numa_node = reg->numa_node;
+
+ INIT_DELAYED_WORK(&hctx->delayed_work, blk_mq_work_fn);
+ spin_lock_init(&hctx->lock);
+ INIT_LIST_HEAD(&hctx->dispatch);
+ hctx->queue = q;
+ hctx->queue_num = i;
+ hctx->flags = reg->flags;
+ hctx->queue_depth = reg->queue_depth;
+ hctx->cmd_size = reg->cmd_size;
+
+ blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
+ blk_mq_hctx_notify, hctx);
+ blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
+
+ if (blk_mq_init_rq_map(hctx, reg->reserved_tags, node))
+ break;
+
+ /*
+ * Allocate space for all possible cpus to avoid allocation in
+ * runtime
+ */
+ hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
+ GFP_KERNEL, node);
+ if (!hctx->ctxs)
+ break;
+
+ num_maps = ALIGN(nr_cpu_ids, BITS_PER_LONG) / BITS_PER_LONG;
+ hctx->ctx_map = kzalloc_node(num_maps * sizeof(unsigned long),
+ GFP_KERNEL, node);
+ if (!hctx->ctx_map)
+ break;
+
+ hctx->nr_ctx_map = num_maps;
+ hctx->nr_ctx = 0;
+
+ if (reg->ops->init_hctx &&
+ reg->ops->init_hctx(hctx, driver_data, i))
+ break;
+ }
+
+ if (i == q->nr_hw_queues)
+ return 0;
+
+ /*
+ * Init failed
+ */
+ queue_for_each_hw_ctx(q, hctx, j) {
+ if (i == j)
+ break;
+
+ if (reg->ops->exit_hctx)
+ reg->ops->exit_hctx(hctx, j);
+
+ blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
+ blk_mq_free_rq_map(hctx);
+ kfree(hctx->ctxs);
+ }
+
+ return 1;
+}
+
+static void blk_mq_init_cpu_queues(struct request_queue *q,
+ unsigned int nr_hw_queues)
+{
+ unsigned int i;
+
+ for_each_possible_cpu(i) {
+ struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
+ struct blk_mq_hw_ctx *hctx;
+
+ memset(__ctx, 0, sizeof(*__ctx));
+ __ctx->cpu = i;
+ spin_lock_init(&__ctx->lock);
+ INIT_LIST_HEAD(&__ctx->rq_list);
+ __ctx->queue = q;
+
+ /* If the cpu isn't online, the cpu is mapped to first hctx */
+ hctx = q->mq_ops->map_queue(q, i);
+ hctx->nr_ctx++;
+
+ if (!cpu_online(i))
+ continue;
+
+ /*
+ * Set local node, IFF we have more than one hw queue. If
+ * not, we remain on the home node of the device
+ */
+ if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
+ hctx->numa_node = cpu_to_node(i);
+ }
+}
+
+static void blk_mq_map_swqueue(struct request_queue *q)
+{
+ unsigned int i;
+ struct blk_mq_hw_ctx *hctx;
+ struct blk_mq_ctx *ctx;
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ hctx->nr_ctx = 0;
+ }
+
+ /*
+ * Map software to hardware queues
+ */
+ queue_for_each_ctx(q, ctx, i) {
+ /* If the cpu isn't online, the cpu is mapped to first hctx */
+ hctx = q->mq_ops->map_queue(q, i);
+ ctx->index_hw = hctx->nr_ctx;
+ hctx->ctxs[hctx->nr_ctx++] = ctx;
+ }
+}
+
+struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
+ void *driver_data)
+{
+ struct blk_mq_hw_ctx **hctxs;
+ struct blk_mq_ctx *ctx;
+ struct request_queue *q;
+ int i;
+
+ if (!reg->nr_hw_queues ||
+ !reg->ops->queue_rq || !reg->ops->map_queue ||
+ !reg->ops->alloc_hctx || !reg->ops->free_hctx)
+ return ERR_PTR(-EINVAL);
+
+ if (!reg->queue_depth)
+ reg->queue_depth = BLK_MQ_MAX_DEPTH;
+ else if (reg->queue_depth > BLK_MQ_MAX_DEPTH) {
+ pr_err("blk-mq: queuedepth too large (%u)\n", reg->queue_depth);
+ reg->queue_depth = BLK_MQ_MAX_DEPTH;
+ }
+
+ /*
+ * Set aside a tag for flush requests. It will only be used while
+ * another flush request is in progress but outside the driver.
+ *
+ * TODO: only allocate if flushes are supported
+ */
+ reg->queue_depth++;
+ reg->reserved_tags++;
+
+ if (reg->queue_depth < (reg->reserved_tags + BLK_MQ_TAG_MIN))
+ return ERR_PTR(-EINVAL);
+
+ ctx = alloc_percpu(struct blk_mq_ctx);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ hctxs = kmalloc_node(reg->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL,
+ reg->numa_node);
+
+ if (!hctxs)
+ goto err_percpu;
+
+ for (i = 0; i < reg->nr_hw_queues; i++) {
+ hctxs[i] = reg->ops->alloc_hctx(reg, i);
+ if (!hctxs[i])
+ goto err_hctxs;
+
+ hctxs[i]->numa_node = NUMA_NO_NODE;
+ hctxs[i]->queue_num = i;
+ }
+
+ q = blk_alloc_queue_node(GFP_KERNEL, reg->numa_node);
+ if (!q)
+ goto err_hctxs;
+
+ q->mq_map = blk_mq_make_queue_map(reg);
+ if (!q->mq_map)
+ goto err_map;
+
+ setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
+ blk_queue_rq_timeout(q, 30000);
+
+ q->nr_queues = nr_cpu_ids;
+ q->nr_hw_queues = reg->nr_hw_queues;
+
+ q->queue_ctx = ctx;
+ q->queue_hw_ctx = hctxs;
+
+ q->mq_ops = reg->ops;
+
+ blk_queue_make_request(q, blk_mq_make_request);
+ blk_queue_rq_timed_out(q, reg->ops->timeout);
+ if (reg->timeout)
+ blk_queue_rq_timeout(q, reg->timeout);
+
+ blk_mq_init_flush(q);
+ blk_mq_init_cpu_queues(q, reg->nr_hw_queues);
+
+ if (blk_mq_init_hw_queues(q, reg, driver_data))
+ goto err_hw;
+
+ blk_mq_map_swqueue(q);
+
+ mutex_lock(&all_q_mutex);
+ list_add_tail(&q->all_q_node, &all_q_list);
+ mutex_unlock(&all_q_mutex);
+
+ return q;
+err_hw:
+ kfree(q->mq_map);
+err_map:
+ blk_cleanup_queue(q);
+err_hctxs:
+ for (i = 0; i < reg->nr_hw_queues; i++) {
+ if (!hctxs[i])
+ break;
+ reg->ops->free_hctx(hctxs[i], i);
+ }
+ kfree(hctxs);
+err_percpu:
+ free_percpu(ctx);
+ return ERR_PTR(-ENOMEM);
+}
+EXPORT_SYMBOL(blk_mq_init_queue);
+
+void blk_mq_free_queue(struct request_queue *q)
+{
+ struct blk_mq_hw_ctx *hctx;
+ int i;
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ cancel_delayed_work_sync(&hctx->delayed_work);
+ kfree(hctx->ctx_map);
+ kfree(hctx->ctxs);
+ blk_mq_free_rq_map(hctx);
+ blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
+ if (q->mq_ops->exit_hctx)
+ q->mq_ops->exit_hctx(hctx, i);
+ q->mq_ops->free_hctx(hctx, i);
+ }
+
+ free_percpu(q->queue_ctx);
+ kfree(q->queue_hw_ctx);
+ kfree(q->mq_map);
+
+ q->queue_ctx = NULL;
+ q->queue_hw_ctx = NULL;
+ q->mq_map = NULL;
+
+ mutex_lock(&all_q_mutex);
+ list_del_init(&q->all_q_node);
+ mutex_unlock(&all_q_mutex);
+}
+EXPORT_SYMBOL(blk_mq_free_queue);
+
+/* Basically redo blk_mq_init_queue with queue frozen */
+static void __cpuinit blk_mq_queue_reinit(struct request_queue *q)
+{
+ blk_mq_freeze_queue(q);
+
+ blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues);
+
+ /*
+ * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
+ * we should change hctx numa_node according to new topology (this
+ * involves free and re-allocate memory, worthy doing?)
+ */
+
+ blk_mq_map_swqueue(q);
+
+ blk_mq_unfreeze_queue(q);
+}
+
+static int __cpuinit blk_mq_queue_reinit_notify(struct notifier_block *nb,
+ unsigned long action, void *hcpu)
+{
+ struct request_queue *q;
+
+ /*
+ * Before new mapping is established, hotadded cpu might already start
+ * handling requests. This doesn't break anything as we map offline
+ * CPUs to first hardware queue. We will re-init queue below to get
+ * optimal settings.
+ */
+ if (action != CPU_DEAD && action != CPU_DEAD_FROZEN &&
+ action != CPU_ONLINE && action != CPU_ONLINE_FROZEN)
+ return NOTIFY_OK;
+
+ mutex_lock(&all_q_mutex);
+ list_for_each_entry(q, &all_q_list, all_q_node)
+ blk_mq_queue_reinit(q);
+ mutex_unlock(&all_q_mutex);
+ return NOTIFY_OK;
+}
+
+static int __init blk_mq_init(void)
+{
+ unsigned int i;
+
+ for_each_possible_cpu(i)
+ init_llist_head(&per_cpu(ipi_lists, i));
+
+ blk_mq_cpu_init();
+
+ /* Must be called after percpu_counter_hotcpu_callback() */
+ hotcpu_notifier(blk_mq_queue_reinit_notify, -10);
+
+ return 0;
+}
+subsys_initcall(blk_mq_init);
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index 467c8de88642..2b01c0b73984 100644
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
@@ -36,7 +36,7 @@ static void blk_done_softirq(struct softirq_action *h)
}
}
-#if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS)
+#ifdef CONFIG_SMP
static void trigger_softirq(void *data)
{
struct request *rq = data;
@@ -71,7 +71,7 @@ static int raise_blk_irq(int cpu, struct request *rq)
return 1;
}
-#else /* CONFIG_SMP && CONFIG_USE_GENERIC_SMP_HELPERS */
+#else /* CONFIG_SMP */
static int raise_blk_irq(int cpu, struct request *rq)
{
return 1;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 5efc5a647183..277bd218b2c3 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -287,7 +287,7 @@ static ssize_t
queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
{
ssize_t ret = -EINVAL;
-#if defined(CONFIG_USE_GENERIC_SMP_HELPERS)
+#ifdef CONFIG_SMP
unsigned long val;
ret = queue_var_store(&val, page, count);
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 044892e1ca74..277e348aef93 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -45,14 +45,28 @@ static bool arch_timer_use_virtual = true;
* Architected system timer support.
*/
-static inline irqreturn_t timer_handler(const int access,
+static __always_inline
+void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
+ struct clock_event_device *clk)
+{
+ arch_timer_reg_write_cp15(access, reg, val);
+}
+
+static __always_inline
+u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
+ struct clock_event_device *clk)
+{
+ return arch_timer_reg_read_cp15(access, reg);
+}
+
+static __always_inline irqreturn_t timer_handler(const int access,
struct clock_event_device *evt)
{
unsigned long ctrl;
- ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
+ ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
ctrl |= ARCH_TIMER_CTRL_IT_MASK;
- arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
+ arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
evt->event_handler(evt);
return IRQ_HANDLED;
}
@@ -74,15 +88,16 @@ static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
}
-static inline void timer_set_mode(const int access, int mode)
+static __always_inline void timer_set_mode(const int access, int mode,
+ struct clock_event_device *clk)
{
unsigned long ctrl;
switch (mode) {
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_SHUTDOWN:
- ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
+ ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
- arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
+ arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
break;
default:
break;
@@ -92,36 +107,37 @@ static inline void timer_set_mode(const int access, int mode)
static void arch_timer_set_mode_virt(enum clock_event_mode mode,
struct clock_event_device *clk)
{
- timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode);
+ timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode, clk);
}
static void arch_timer_set_mode_phys(enum clock_event_mode mode,
struct clock_event_device *clk)
{
- timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode);
+ timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode, clk);
}
-static inline void set_next_event(const int access, unsigned long evt)
+static __always_inline void set_next_event(const int access, unsigned long evt,
+ struct clock_event_device *clk)
{
unsigned long ctrl;
- ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
+ ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
ctrl |= ARCH_TIMER_CTRL_ENABLE;
ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
- arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt);
- arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
+ arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk);
+ arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
}
static int arch_timer_set_next_event_virt(unsigned long evt,
- struct clock_event_device *unused)
+ struct clock_event_device *clk)
{
- set_next_event(ARCH_TIMER_VIRT_ACCESS, evt);
+ set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
return 0;
}
static int arch_timer_set_next_event_phys(unsigned long evt,
- struct clock_event_device *unused)
+ struct clock_event_device *clk)
{
- set_next_event(ARCH_TIMER_PHYS_ACCESS, evt);
+ set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
return 0;
}
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 118773751ea4..a6287c6ad6c2 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/initrd.h>
+#include <linux/memblock.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
@@ -550,9 +551,10 @@ int __init of_flat_dt_match(unsigned long node, const char *const *compat)
* early_init_dt_check_for_initrd - Decode initrd location from flat tree
* @node: reference to node containing initrd location ('chosen')
*/
-void __init early_init_dt_check_for_initrd(unsigned long node)
+static void __init early_init_dt_check_for_initrd(unsigned long node)
{
- unsigned long start, end, len;
+ u64 start, end;
+ unsigned long len;
__be32 *prop;
pr_debug("Looking for initrd properties... ");
@@ -560,18 +562,22 @@ void __init early_init_dt_check_for_initrd(unsigned long node)
prop = of_get_flat_dt_prop(node, "linux,initrd-start", &len);
if (!prop)
return;
- start = of_read_ulong(prop, len/4);
+ start = of_read_number(prop, len/4);
prop = of_get_flat_dt_prop(node, "linux,initrd-end", &len);
if (!prop)
return;
- end = of_read_ulong(prop, len/4);
+ end = of_read_number(prop, len/4);
- early_init_dt_setup_initrd_arch(start, end);
- pr_debug("initrd_start=0x%lx initrd_end=0x%lx\n", start, end);
+ initrd_start = (unsigned long)__va(start);
+ initrd_end = (unsigned long)__va(end);
+ initrd_below_start_ok = 1;
+
+ pr_debug("initrd_start=0x%llx initrd_end=0x%llx\n",
+ (unsigned long long)start, (unsigned long long)end);
}
#else
-inline void early_init_dt_check_for_initrd(unsigned long node)
+static inline void early_init_dt_check_for_initrd(unsigned long node)
{
}
#endif /* CONFIG_BLK_DEV_INITRD */
@@ -698,6 +704,62 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
return 1;
}
+#ifdef CONFIG_HAVE_MEMBLOCK
+void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
+{
+ const u64 phys_offset = __pa(PAGE_OFFSET);
+ base &= PAGE_MASK;
+ size &= PAGE_MASK;
+ if (base + size < phys_offset) {
+ pr_warning("Ignoring memory block 0x%llx - 0x%llx\n",
+ base, base + size);
+ return;
+ }
+ if (base < phys_offset) {
+ pr_warning("Ignoring memory range 0x%llx - 0x%llx\n",
+ base, phys_offset);
+ size -= phys_offset - base;
+ base = phys_offset;
+ }
+ memblock_add(base, size);
+}
+
+/*
+ * called from unflatten_device_tree() to bootstrap devicetree itself
+ * Architectures can override this definition if memblock isn't used
+ */
+void * __init __weak early_init_dt_alloc_memory_arch(u64 size, u64 align)
+{
+ return __va(memblock_alloc(size, align));
+}
+#endif
+
+bool __init early_init_dt_scan(void *params)
+{
+ if (!params)
+ return false;
+
+ /* Setup flat device-tree pointer */
+ initial_boot_params = params;
+
+ /* check device tree validity */
+ if (be32_to_cpu(initial_boot_params->magic) != OF_DT_HEADER) {
+ initial_boot_params = NULL;
+ return false;
+ }
+
+ /* Retrieve various information from the /chosen node */
+ of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line);
+
+ /* Initialize {size,address}-cells info */
+ of_scan_flat_dt(early_init_dt_scan_root, NULL);
+
+ /* Setup memory, calling early_init_dt_add_memory_arch */
+ of_scan_flat_dt(early_init_dt_scan_memory, NULL);
+
+ return true;
+}
+
/**
* unflatten_device_tree - create tree of device_nodes from flat blob
*
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 3aac8e9edac3..b6df167050fb 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1394,7 +1394,7 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
}
static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
- siginfo_t *siginfo)
+ const siginfo_t *siginfo)
{
mm_segment_t old_fs = get_fs();
set_fs(KERNEL_DS);
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index 639d7a4d033b..01613b382b0e 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -46,5 +46,20 @@
#define read_barrier_depends() do {} while (0)
#define smp_read_barrier_depends() do {} while (0)
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+ ACCESS_ONCE(*p) = (v); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+ ___p1; \
+})
+
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_GENERIC_BARRIER_H */
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
new file mode 100644
index 000000000000..a1fc6590a743
--- /dev/null
+++ b/include/asm-generic/preempt.h
@@ -0,0 +1,54 @@
+#ifndef __ASM_PREEMPT_H
+#define __ASM_PREEMPT_H
+
+#include <linux/thread_info.h>
+
+/*
+ * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
+ * that think a non-zero value indicates we cannot preempt.
+ */
+static __always_inline int preempt_count(void)
+{
+ return current_thread_info()->preempt_count & ~PREEMPT_NEED_RESCHED;
+}
+
+static __always_inline int *preempt_count_ptr(void)
+{
+ return &current_thread_info()->preempt_count;
+}
+
+/*
+ * We now loose PREEMPT_NEED_RESCHED and cause an extra reschedule; however the
+ * alternative is loosing a reschedule. Better schedule too often -- also this
+ * should be a very rare operation.
+ */
+static __always_inline void preempt_count_set(int pc)
+{
+ *preempt_count_ptr() = pc;
+}
+
+/*
+ * We fold the NEED_RESCHED bit into the preempt count such that
+ * preempt_enable() can decrement and test for needing to reschedule with a
+ * single instruction.
+ *
+ * We invert the actual bit, so that when the decrement hits 0 we know we both
+ * need to resched (the bit is cleared) and can resched (no preempt count).
+ */
+
+static __always_inline void set_preempt_need_resched(void)
+{
+ *preempt_count_ptr() &= ~PREEMPT_NEED_RESCHED;
+}
+
+static __always_inline void clear_preempt_need_resched(void)
+{
+ *preempt_count_ptr() |= PREEMPT_NEED_RESCHED;
+}
+
+static __always_inline bool test_preempt_need_resched(void)
+{
+ return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED);
+}
+
+#endif /* __ASM_PREEMPT_H */
diff --git a/include/asm-generic/siginfo.h b/include/asm-generic/siginfo.h
index b685d3bd32e2..3d1a3af5cf59 100644
--- a/include/asm-generic/siginfo.h
+++ b/include/asm-generic/siginfo.h
@@ -32,6 +32,6 @@ static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
#endif
-extern int copy_siginfo_to_user(struct siginfo __user *to, struct siginfo *from);
+extern int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from);
#endif
diff --git a/include/clocksource/arm_arch_timer.h b/include/clocksource/arm_arch_timer.h
index c463ce990c48..e6bc74f1119c 100644
--- a/include/clocksource/arm_arch_timer.h
+++ b/include/clocksource/arm_arch_timer.h
@@ -23,12 +23,22 @@
#define ARCH_TIMER_CTRL_IT_MASK (1 << 1)
#define ARCH_TIMER_CTRL_IT_STAT (1 << 2)
-#define ARCH_TIMER_REG_CTRL 0
-#define ARCH_TIMER_REG_TVAL 1
+enum arch_timer_reg {
+ ARCH_TIMER_REG_CTRL,
+ ARCH_TIMER_REG_TVAL,
+};
#define ARCH_TIMER_PHYS_ACCESS 0
#define ARCH_TIMER_VIRT_ACCESS 1
+#define ARCH_TIMER_USR_PCT_ACCESS_EN (1 << 0) /* physical counter */
+#define ARCH_TIMER_USR_VCT_ACCESS_EN (1 << 1) /* virtual counter */
+#define ARCH_TIMER_VIRT_EVT_EN (1 << 2)
+#define ARCH_TIMER_EVT_TRIGGER_SHIFT (4)
+#define ARCH_TIMER_EVT_TRIGGER_MASK (0xF << ARCH_TIMER_EVT_TRIGGER_SHIFT)
+#define ARCH_TIMER_USR_VT_ACCESS_EN (1 << 8) /* virtual timer registers */
+#define ARCH_TIMER_USR_PT_ACCESS_EN (1 << 9) /* physical timer registers */
+
#ifdef CONFIG_ARM_ARCH_TIMER
extern u32 arch_timer_get_rate(void);
diff --git a/include/linux/compat.h b/include/linux/compat.h
index df7060083d85..afd3b3f211e5 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -361,7 +361,7 @@ long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
unsigned long bitmap_size);
int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from);
-int copy_siginfo_to_user32(struct compat_siginfo __user *to, siginfo_t *from);
+int copy_siginfo_to_user32(struct compat_siginfo __user *to, const siginfo_t *from);
int get_compat_sigevent(struct sigevent *event,
const struct compat_sigevent __user *u_event);
long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, compat_pid_t pid, int sig,
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 92669cd182a6..fe7a686dfd8d 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -298,6 +298,11 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
#endif
+/* Is this type a native word size -- useful for atomic operations */
+#ifndef __native_word
+# define __native_word(t) (sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
+#endif
+
/* Compile time object size, -1 for unknown */
#ifndef __compiletime_object_size
# define __compiletime_object_size(obj) -1
@@ -337,6 +342,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
#define compiletime_assert(condition, msg) \
_compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
+#define compiletime_assert_atomic_type(t) \
+ compiletime_assert(__native_word(t), \
+ "Need native word sized stores/loads for atomicity.")
+
/*
* Prevent the compiler from merging or refetching accesses. The compiler
* is also forbidden from reordering successive instances of ACCESS_ONCE(),
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 623325e2ff97..f4101f01286b 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -154,6 +154,14 @@ static inline int irq_balancing_disabled(unsigned int irq)
return desc->status_use_accessors & IRQ_NO_BALANCING_MASK;
}
+static inline int irq_is_percpu(unsigned int irq)
+{
+ struct irq_desc *desc;
+
+ desc = irq_to_desc(irq);
+ return desc->status_use_accessors & IRQ_PER_CPU;
+}
+
static inline void
irq_set_lockdep_class(unsigned int irq, struct lock_class_key *class)
{
diff --git a/include/linux/mm.h b/include/linux/mm.h
index de2a2f2bf708..0b0be4d815dc 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1261,10 +1261,11 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
#define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;})
#endif /* USE_SPLIT_PTLOCKS */
-static inline void pgtable_page_ctor(struct page *page)
+static inline bool pgtable_page_ctor(struct page *page)
{
pte_lock_init(page);
inc_zone_page_state(page, NR_PAGETABLE);
+ return true;
}
static inline void pgtable_page_dtor(struct page *page)
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index ed136ad698ce..98b340d2b14c 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -93,27 +93,18 @@ extern unsigned long of_get_flat_dt_root(void);
extern int early_init_dt_scan_chosen(unsigned long node, const char *uname,
int depth, void *data);
-extern void early_init_dt_check_for_initrd(unsigned long node);
extern int early_init_dt_scan_memory(unsigned long node, const char *uname,
int depth, void *data);
extern void early_init_dt_add_memory_arch(u64 base, u64 size);
extern void * early_init_dt_alloc_memory_arch(u64 size, u64 align);
extern u64 dt_mem_next_cell(int s, __be32 **cellp);
-/*
- * If BLK_DEV_INITRD, the fdt early init code will call this function,
- * to be provided by the arch code. start and end are specified as
- * physical addresses.
- */
-#ifdef CONFIG_BLK_DEV_INITRD
-extern void early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end);
-#endif
-
/* Early flat tree scan hooks */
extern int early_init_dt_scan_root(unsigned long node, const char *uname,
int depth, void *data);
+extern bool early_init_dt_scan(void *params);
+
/* Other Prototypes */
extern void unflatten_device_tree(void);
extern void early_init_devtree(void *);
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 27ef6b190ea6..57e890abe1f0 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -22,9 +22,12 @@
* Macro which verifies @ptr is a percpu pointer without evaluating
* @ptr. This is to be used in percpu accessors to verify that the
* input parameter is a percpu pointer.
+ *
+ * + 0 is required in order to convert the pointer type from a
+ * potential array type to a pointer to a single item of the array.
*/
#define __verify_pcpu_ptr(ptr) do { \
- const void __percpu *__vpp_verify = (typeof(ptr))NULL; \
+ const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \
(void)__vpp_verify; \
} while (0)
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index f5d4723cdb3d..116bba0d8e29 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -6,10 +6,17 @@
* preempt_count (used for kernel preemption, interrupt count, etc.)
*/
-#include <linux/thread_info.h>
#include <linux/linkage.h>
#include <linux/list.h>
+/*
+ * We use the MSB mostly because its available; see <linux/preempt_mask.h> for
+ * the other bits -- can't include that header due to inclusion hell.
+ */
+#define PREEMPT_NEED_RESCHED 0x80000000
+
+#include <asm/preempt.h>
+
#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
extern void add_preempt_count(int val);
extern void sub_preempt_count(int val);
diff --git a/include/linux/smp.h b/include/linux/smp.h
index c8488763277f..868dcf0b2eb1 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -86,14 +86,10 @@ void kick_all_cpus_sync(void);
/*
* Generic and arch helpers
*/
-#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
void __init call_function_init(void);
void generic_smp_call_function_single_interrupt(void);
#define generic_smp_call_function_interrupt \
generic_smp_call_function_single_interrupt
-#else
-static inline void call_function_init(void) { }
-#endif
/*
* Call a function on all processors
diff --git a/include/uapi/linux/elf-em.h b/include/uapi/linux/elf-em.h
index 8e2b7bac4378..59c17a2d38ad 100644
--- a/include/uapi/linux/elf-em.h
+++ b/include/uapi/linux/elf-em.h
@@ -22,6 +22,7 @@
#define EM_PPC 20 /* PowerPC */
#define EM_PPC64 21 /* PowerPC64 */
#define EM_SPU 23 /* Cell BE SPU */
+#define EM_ARM 40 /* ARM 32 bit */
#define EM_SH 42 /* SuperH */
#define EM_SPARCV9 43 /* SPARC v9 64-bit */
#define EM_IA_64 50 /* HP/Intel IA-64 */
@@ -34,6 +35,7 @@
#define EM_MN10300 89 /* Panasonic/MEI MN10300, AM33 */
#define EM_BLACKFIN 106 /* ADI Blackfin Processor */
#define EM_TI_C6000 140 /* TI C6X DSPs */
+#define EM_AARCH64 183 /* ARM 64 bit */
#define EM_FRV 0x5441 /* Fujitsu FR-V */
#define EM_AVR32 0x18ad /* Atmel AVR32 */
diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz
index 94fabd534b03..2a202a846757 100644
--- a/kernel/Kconfig.hz
+++ b/kernel/Kconfig.hz
@@ -55,4 +55,4 @@ config HZ
default 1000 if HZ_1000
config SCHED_HRTICK
- def_bool HIGH_RES_TIMERS && (!SMP || USE_GENERIC_SMP_HELPERS)
+ def_bool HIGH_RES_TIMERS
diff --git a/kernel/signal.c b/kernel/signal.c
index 50e41075ac77..5db8e47a89ff 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2723,7 +2723,7 @@ COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
-int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
+int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
{
int err;
diff --git a/kernel/smp.c b/kernel/smp.c
index 4dba0f7b72ad..5221ac677e60 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -15,7 +15,6 @@
#include "smpboot.h"
-#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
enum {
CSD_FLAG_LOCK = 0x01,
};
@@ -470,7 +469,6 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
return 0;
}
EXPORT_SYMBOL(smp_call_function);
-#endif /* USE_GENERIC_SMP_HELPERS */
/* Setup configured maximum number of CPUs to activate */
unsigned int setup_max_cpus = NR_CPUS;