summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2017-10-03 20:49:04 -0700
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-01-05 15:46:35 +0100
commit169b369f99affebb24db16f4fed58a2f8ff4060f (patch)
tree993dc8a5fb0258c52497e1429dc02a61ca725191
parent8018307a45a90ab2eecfd03d48b7efb31707df37 (diff)
kaiser: use ALTERNATIVE instead of x86_cr3_pcid_noflush
Now that we're playing the ALTERNATIVE game, use that more efficient method: instead of user-mapping an extra page, and reading an extra cacheline each time for x86_cr3_pcid_noflush. Neel has found that __stringify(bts $X86_CR3_PCID_NOFLUSH_BIT, %rax) is a working substitute for the "bts $63, %rax" in these ALTERNATIVEs; but the one line with $63 in looks clearer, so let's stick with that. Worried about what happens with an ALTERNATIVE between the jump and jump label in another ALTERNATIVE? I was, but have checked the combinations in SWITCH_KERNEL_CR3_NO_STACK at entry_SYSCALL_64, and it does a good job. Signed-off-by: Hugh Dickins <hughd@google.com> Acked-by: Jiri Kosina <jkosina@suse.cz> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--arch/x86/entry/entry_64.S7
-rw-r--r--arch/x86/include/asm/kaiser.h6
-rw-r--r--arch/x86/mm/kaiser.c11
3 files changed, 8 insertions, 16 deletions
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 4e5c41005dd0..f273fed682aa 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1084,7 +1084,8 @@ ENTRY(paranoid_entry)
jz 2f
orl $2, %ebx
andq $(~(X86_CR3_PCID_ASID_MASK | KAISER_SHADOW_PGD_OFFSET)), %rax
- orq x86_cr3_pcid_noflush, %rax
+ /* If PCID enabled, set X86_CR3_PCID_NOFLUSH_BIT */
+ ALTERNATIVE "", "bts $63, %rax", X86_FEATURE_PCID
movq %rax, %cr3
2:
#endif
@@ -1344,7 +1345,7 @@ ENTRY(nmi)
/* %rax is saved above, so OK to clobber here */
ALTERNATIVE "jmp 2f", "movq %cr3, %rax", X86_FEATURE_KAISER
/* If PCID enabled, NOFLUSH now and NOFLUSH on return */
- orq x86_cr3_pcid_noflush, %rax
+ ALTERNATIVE "", "bts $63, %rax", X86_FEATURE_PCID
pushq %rax
/* mask off "user" bit of pgd address and 12 PCID bits: */
andq $(~(X86_CR3_PCID_ASID_MASK | KAISER_SHADOW_PGD_OFFSET)), %rax
@@ -1588,7 +1589,7 @@ end_repeat_nmi:
/* %rax is saved above, so OK to clobber here */
ALTERNATIVE "jmp 2f", "movq %cr3, %rax", X86_FEATURE_KAISER
/* If PCID enabled, NOFLUSH now and NOFLUSH on return */
- orq x86_cr3_pcid_noflush, %rax
+ ALTERNATIVE "", "bts $63, %rax", X86_FEATURE_PCID
pushq %rax
/* mask off "user" bit of pgd address and 12 PCID bits: */
andq $(~(X86_CR3_PCID_ASID_MASK | KAISER_SHADOW_PGD_OFFSET)), %rax
diff --git a/arch/x86/include/asm/kaiser.h b/arch/x86/include/asm/kaiser.h
index 96643a9c194c..906150d6094e 100644
--- a/arch/x86/include/asm/kaiser.h
+++ b/arch/x86/include/asm/kaiser.h
@@ -25,7 +25,8 @@
.macro _SWITCH_TO_KERNEL_CR3 reg
movq %cr3, \reg
andq $(~(X86_CR3_PCID_ASID_MASK | KAISER_SHADOW_PGD_OFFSET)), \reg
-orq x86_cr3_pcid_noflush, \reg
+/* If PCID enabled, set X86_CR3_PCID_NOFLUSH_BIT */
+ALTERNATIVE "", "bts $63, \reg", X86_FEATURE_PCID
movq \reg, %cr3
.endm
@@ -39,7 +40,7 @@ movq \reg, %cr3
movq %cr3, \reg
orq PER_CPU_VAR(x86_cr3_pcid_user), \reg
js 9f
-/* FLUSH this time, reset to NOFLUSH for next time (if PCID enabled) */
+/* If PCID enabled, FLUSH this time, reset to NOFLUSH for next time */
movb \regb, PER_CPU_VAR(x86_cr3_pcid_user+7)
9:
movq \reg, %cr3
@@ -90,7 +91,6 @@ movq PER_CPU_VAR(unsafe_stack_register_backup), %rax
*/
DECLARE_PER_CPU_USER_MAPPED(unsigned long, unsafe_stack_register_backup);
-extern unsigned long x86_cr3_pcid_noflush;
DECLARE_PER_CPU(unsigned long, x86_cr3_pcid_user);
extern char __per_cpu_user_mapped_start[], __per_cpu_user_mapped_end[];
diff --git a/arch/x86/mm/kaiser.c b/arch/x86/mm/kaiser.c
index 1840aa0a052d..b8aa9ad59c0f 100644
--- a/arch/x86/mm/kaiser.c
+++ b/arch/x86/mm/kaiser.c
@@ -31,7 +31,6 @@ DEFINE_PER_CPU_USER_MAPPED(unsigned long, unsafe_stack_register_backup);
* This is also handy because systems that do not support PCIDs
* just end up or'ing a 0 into their CR3, which does no harm.
*/
-unsigned long x86_cr3_pcid_noflush __read_mostly;
DEFINE_PER_CPU(unsigned long, x86_cr3_pcid_user);
/*
@@ -356,10 +355,6 @@ void __init kaiser_init(void)
kaiser_add_user_map_early(&debug_idt_table,
sizeof(gate_desc) * NR_VECTORS,
__PAGE_KERNEL);
-
- kaiser_add_user_map_early(&x86_cr3_pcid_noflush,
- sizeof(x86_cr3_pcid_noflush),
- __PAGE_KERNEL);
}
/* Add a mapping to the shadow mapping, and synchronize the mappings */
@@ -433,18 +428,14 @@ pgd_t kaiser_set_shadow_pgd(pgd_t *pgdp, pgd_t pgd)
void kaiser_setup_pcid(void)
{
- unsigned long kern_cr3 = 0;
unsigned long user_cr3 = KAISER_SHADOW_PGD_OFFSET;
- if (this_cpu_has(X86_FEATURE_PCID)) {
- kern_cr3 |= X86_CR3_PCID_KERN_NOFLUSH;
+ if (this_cpu_has(X86_FEATURE_PCID))
user_cr3 |= X86_CR3_PCID_USER_NOFLUSH;
- }
/*
* These variables are used by the entry/exit
* code to change PCID and pgd and TLB flushing.
*/
- x86_cr3_pcid_noflush = kern_cr3;
this_cpu_write(x86_cr3_pcid_user, user_cr3);
}