From 2b4d1606aac27f2485061abd953ea1e103b5e26e Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 3 Dec 2017 17:36:55 +0000 Subject: arm64: KVM: Dynamically patch the kernel/hyp VA mask So far, we're using a complicated sequence of alternatives to patch the kernel/hyp VA mask on non-VHE, and NOP out the masking altogether when on VHE. The newly introduced dynamic patching gives us the opportunity to simplify that code by patching a single instruction with the correct mask (instead of the mind bending cumulative masking we have at the moment) or even a single NOP on VHE. This also adds some initial code that will allow the patching callback to switch to a more complex patching. Acked-by: Catalin Marinas Reviewed-by: James Morse Signed-off-by: Marc Zyngier --- arch/arm64/kvm/va_layout.c | 91 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 91 insertions(+) create mode 100644 arch/arm64/kvm/va_layout.c (limited to 'arch/arm64/kvm/va_layout.c') diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c new file mode 100644 index 000000000000..0d7bf8319894 --- /dev/null +++ b/arch/arm64/kvm/va_layout.c @@ -0,0 +1,91 @@ +/* + * Copyright (C) 2017 ARM Ltd. + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include + +#define HYP_PAGE_OFFSET_HIGH_MASK ((UL(1) << VA_BITS) - 1) +#define HYP_PAGE_OFFSET_LOW_MASK ((UL(1) << (VA_BITS - 1)) - 1) + +static u64 va_mask; + +static void compute_layout(void) +{ + phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start); + unsigned long mask = HYP_PAGE_OFFSET_HIGH_MASK; + + /* + * Activate the lower HYP offset only if the idmap doesn't + * clash with it, + */ + if (idmap_addr > HYP_PAGE_OFFSET_LOW_MASK) + mask = HYP_PAGE_OFFSET_LOW_MASK; + + va_mask = mask; +} + +static u32 compute_instruction(int n, u32 rd, u32 rn) +{ + u32 insn = AARCH64_BREAK_FAULT; + + switch (n) { + case 0: + insn = aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_AND, + AARCH64_INSN_VARIANT_64BIT, + rn, rd, va_mask); + break; + } + + return insn; +} + +void __init kvm_update_va_mask(struct alt_instr *alt, + __le32 *origptr, __le32 *updptr, int nr_inst) +{ + int i; + + /* We only expect a single instruction in the alternative sequence */ + BUG_ON(nr_inst != 1); + + if (!has_vhe() && !va_mask) + compute_layout(); + + for (i = 0; i < nr_inst; i++) { + u32 rd, rn, insn, oinsn; + + /* + * VHE doesn't need any address translation, let's NOP + * everything. + */ + if (has_vhe()) { + updptr[i] = cpu_to_le32(aarch64_insn_gen_nop()); + continue; + } + + oinsn = le32_to_cpu(origptr[i]); + rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, oinsn); + rn = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RN, oinsn); + + insn = compute_instruction(i, rd, rn); + BUG_ON(insn == AARCH64_BREAK_FAULT); + + updptr[i] = cpu_to_le32(insn); + } +} -- cgit v1.2.3 From 005e975a3bd08dce8d77746d6688cac615fe7c97 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 8 Dec 2017 14:18:27 +0000 Subject: arm64: KVM: Dynamically compute the HYP VA mask As we're moving towards a much more dynamic way to compute our HYP VA, let's express the mask in a slightly different way. Instead of comparing the idmap position to the "low" VA mask, we directly compute the mask by taking into account the idmap's (VA_BIT-1) bit. No functionnal change. Acked-by: Catalin Marinas Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/kvm/va_layout.c | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) (limited to 'arch/arm64/kvm/va_layout.c') diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c index 0d7bf8319894..7998d1a60916 100644 --- a/arch/arm64/kvm/va_layout.c +++ b/arch/arm64/kvm/va_layout.c @@ -21,24 +21,19 @@ #include #include -#define HYP_PAGE_OFFSET_HIGH_MASK ((UL(1) << VA_BITS) - 1) -#define HYP_PAGE_OFFSET_LOW_MASK ((UL(1) << (VA_BITS - 1)) - 1) - static u64 va_mask; static void compute_layout(void) { phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start); - unsigned long mask = HYP_PAGE_OFFSET_HIGH_MASK; + u64 hyp_va_msb; - /* - * Activate the lower HYP offset only if the idmap doesn't - * clash with it, - */ - if (idmap_addr > HYP_PAGE_OFFSET_LOW_MASK) - mask = HYP_PAGE_OFFSET_LOW_MASK; + /* Where is my RAM region? */ + hyp_va_msb = idmap_addr & BIT(VA_BITS - 1); + hyp_va_msb ^= BIT(VA_BITS - 1); - va_mask = mask; + va_mask = GENMASK_ULL(VA_BITS - 2, 0); + va_mask |= hyp_va_msb; } static u32 compute_instruction(int n, u32 rd, u32 rn) -- cgit v1.2.3 From ed57cac83e05f2e93567e4b5c57ee58a1bf8a582 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 3 Dec 2017 18:22:49 +0000 Subject: arm64: KVM: Introduce EL2 VA randomisation The main idea behind randomising the EL2 VA is that we usually have a few spare bits between the most significant bit of the VA mask and the most significant bit of the linear mapping. Those bits could be a bunch of zeroes, and could be useful to move things around a bit. Of course, the more memory you have, the less randomisation you get... Alternatively, these bits could be the result of KASLR, in which case they are already random. But it would be nice to have a *different* randomization, just to make the job of a potential attacker a bit more difficult. Inserting these random bits is a bit involved. We don't have a spare register (short of rewriting all the kern_hyp_va call sites), and the immediate we want to insert is too random to be used with the ORR instruction. The best option I could come up with is the following sequence: and x0, x0, #va_mask ror x0, x0, #first_random_bit add x0, x0, #(random & 0xfff) add x0, x0, #(random >> 12), lsl #12 ror x0, x0, #(63 - first_random_bit) making it a fairly long sequence, but one that a decent CPU should be able to execute without breaking a sweat. It is of course NOPed out on VHE. The last 4 instructions can also be turned into NOPs if it appears that there is no free bits to use. Acked-by: Catalin Marinas Reviewed-by: James Morse Signed-off-by: Marc Zyngier --- arch/arm64/kvm/va_layout.c | 76 +++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 71 insertions(+), 5 deletions(-) (limited to 'arch/arm64/kvm/va_layout.c') diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c index 7998d1a60916..3d41a480b6a5 100644 --- a/arch/arm64/kvm/va_layout.c +++ b/arch/arm64/kvm/va_layout.c @@ -16,24 +16,60 @@ */ #include +#include +#include #include #include #include #include +/* + * The LSB of the random hyp VA tag or 0 if no randomization is used. + */ +static u8 tag_lsb; +/* + * The random hyp VA tag value with the region bit if hyp randomization is used + */ +static u64 tag_val; static u64 va_mask; static void compute_layout(void) { phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start); u64 hyp_va_msb; + int kva_msb; /* Where is my RAM region? */ hyp_va_msb = idmap_addr & BIT(VA_BITS - 1); hyp_va_msb ^= BIT(VA_BITS - 1); - va_mask = GENMASK_ULL(VA_BITS - 2, 0); - va_mask |= hyp_va_msb; + kva_msb = fls64((u64)phys_to_virt(memblock_start_of_DRAM()) ^ + (u64)(high_memory - 1)); + + if (kva_msb == (VA_BITS - 1)) { + /* + * No space in the address, let's compute the mask so + * that it covers (VA_BITS - 1) bits, and the region + * bit. The tag stays set to zero. + */ + va_mask = BIT(VA_BITS - 1) - 1; + va_mask |= hyp_va_msb; + } else { + /* + * We do have some free bits to insert a random tag. + * Hyp VAs are now created from kernel linear map VAs + * using the following formula (with V == VA_BITS): + * + * 63 ... V | V-1 | V-2 .. tag_lsb | tag_lsb - 1 .. 0 + * --------------------------------------------------------- + * | 0000000 | hyp_va_msb | random tag | kern linear VA | + */ + tag_lsb = kva_msb; + va_mask = GENMASK_ULL(tag_lsb - 1, 0); + tag_val = get_random_long() & GENMASK_ULL(VA_BITS - 2, tag_lsb); + tag_val |= hyp_va_msb; + tag_val >>= tag_lsb; + } } static u32 compute_instruction(int n, u32 rd, u32 rn) @@ -46,6 +82,33 @@ static u32 compute_instruction(int n, u32 rd, u32 rn) AARCH64_INSN_VARIANT_64BIT, rn, rd, va_mask); break; + + case 1: + /* ROR is a variant of EXTR with Rm = Rn */ + insn = aarch64_insn_gen_extr(AARCH64_INSN_VARIANT_64BIT, + rn, rn, rd, + tag_lsb); + break; + + case 2: + insn = aarch64_insn_gen_add_sub_imm(rd, rn, + tag_val & GENMASK(11, 0), + AARCH64_INSN_VARIANT_64BIT, + AARCH64_INSN_ADSB_ADD); + break; + + case 3: + insn = aarch64_insn_gen_add_sub_imm(rd, rn, + tag_val & GENMASK(23, 12), + AARCH64_INSN_VARIANT_64BIT, + AARCH64_INSN_ADSB_ADD); + break; + + case 4: + /* ROR is a variant of EXTR with Rm = Rn */ + insn = aarch64_insn_gen_extr(AARCH64_INSN_VARIANT_64BIT, + rn, rn, rd, 64 - tag_lsb); + break; } return insn; @@ -56,8 +119,7 @@ void __init kvm_update_va_mask(struct alt_instr *alt, { int i; - /* We only expect a single instruction in the alternative sequence */ - BUG_ON(nr_inst != 1); + BUG_ON(nr_inst != 5); if (!has_vhe() && !va_mask) compute_layout(); @@ -68,8 +130,12 @@ void __init kvm_update_va_mask(struct alt_instr *alt, /* * VHE doesn't need any address translation, let's NOP * everything. + * + * Alternatively, if we don't have any spare bits in + * the address, NOP everything after masking that + * kernel VA. */ - if (has_vhe()) { + if (has_vhe() || (!tag_lsb && i > 0)) { updptr[i] = cpu_to_le32(aarch64_insn_gen_nop()); continue; } -- cgit v1.2.3 From 71dcb8be6d29cffff3f4a4463232f38786e97797 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 27 Feb 2018 17:38:08 +0000 Subject: arm64: KVM: Allow far branches from vector slots to the main vectors So far, the branch from the vector slots to the main vectors can at most be 4GB from the main vectors (the reach of ADRP), and this distance is known at compile time. If we were to remap the slots to an unrelated VA, things would break badly. A way to achieve VA independence would be to load the absolute address of the vectors (__kvm_hyp_vector), either using a constant pool or a series of movs, followed by an indirect branch. This patches implements the latter solution, using another instance of a patching callback. Note that since we have to save a register pair on the stack, we branch to the *second* instruction in the vectors in order to compensate for it. This also results in having to adjust this balance in the invalid vector entry point. Acked-by: Catalin Marinas Signed-off-by: Marc Zyngier --- arch/arm64/kvm/va_layout.c | 72 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) (limited to 'arch/arm64/kvm/va_layout.c') diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c index 3d41a480b6a5..2deb6e9874c9 100644 --- a/arch/arm64/kvm/va_layout.c +++ b/arch/arm64/kvm/va_layout.c @@ -150,3 +150,75 @@ void __init kvm_update_va_mask(struct alt_instr *alt, updptr[i] = cpu_to_le32(insn); } } + +void kvm_patch_vector_branch(struct alt_instr *alt, + __le32 *origptr, __le32 *updptr, int nr_inst) +{ + u64 addr; + u32 insn; + + BUG_ON(nr_inst != 5); + + if (has_vhe() || !cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) { + WARN_ON_ONCE(cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)); + return; + } + + if (!va_mask) + compute_layout(); + + /* + * Compute HYP VA by using the same computation as kern_hyp_va() + */ + addr = (uintptr_t)kvm_ksym_ref(__kvm_hyp_vector); + addr &= va_mask; + addr |= tag_val << tag_lsb; + + /* Use PC[10:7] to branch to the same vector in KVM */ + addr |= ((u64)origptr & GENMASK_ULL(10, 7)); + + /* + * Branch to the second instruction in the vectors in order to + * avoid the initial store on the stack (which we already + * perform in the hardening vectors). + */ + addr += AARCH64_INSN_SIZE; + + /* stp x0, x1, [sp, #-16]! */ + insn = aarch64_insn_gen_load_store_pair(AARCH64_INSN_REG_0, + AARCH64_INSN_REG_1, + AARCH64_INSN_REG_SP, + -16, + AARCH64_INSN_VARIANT_64BIT, + AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX); + *updptr++ = cpu_to_le32(insn); + + /* movz x0, #(addr & 0xffff) */ + insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0, + (u16)addr, + 0, + AARCH64_INSN_VARIANT_64BIT, + AARCH64_INSN_MOVEWIDE_ZERO); + *updptr++ = cpu_to_le32(insn); + + /* movk x0, #((addr >> 16) & 0xffff), lsl #16 */ + insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0, + (u16)(addr >> 16), + 16, + AARCH64_INSN_VARIANT_64BIT, + AARCH64_INSN_MOVEWIDE_KEEP); + *updptr++ = cpu_to_le32(insn); + + /* movk x0, #((addr >> 32) & 0xffff), lsl #32 */ + insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0, + (u16)(addr >> 32), + 32, + AARCH64_INSN_VARIANT_64BIT, + AARCH64_INSN_MOVEWIDE_KEEP); + *updptr++ = cpu_to_le32(insn); + + /* br x0 */ + insn = aarch64_insn_gen_branch_reg(AARCH64_INSN_REG_0, + AARCH64_INSN_BRANCH_NOLINK); + *updptr++ = cpu_to_le32(insn); +} -- cgit v1.2.3 From dee39247dc75465a24990cb1772c6aaced5fd910 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 15 Feb 2018 11:47:14 +0000 Subject: arm64: KVM: Allow mapping of vectors outside of the RAM region We're now ready to map our vectors in weird and wonderful locations. On enabling ARM64_HARDEN_EL2_VECTORS, a vector slot gets allocated if this hasn't been already done via ARM64_HARDEN_BRANCH_PREDICTOR and gets mapped outside of the normal RAM region, next to the idmap. That way, being able to obtain VBAR_EL2 doesn't reveal the mapping of the rest of the hypervisor code. Acked-by: Catalin Marinas Signed-off-by: Marc Zyngier --- arch/arm64/kvm/va_layout.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/arm64/kvm/va_layout.c') diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c index 2deb6e9874c9..c712a7376bc1 100644 --- a/arch/arm64/kvm/va_layout.c +++ b/arch/arm64/kvm/va_layout.c @@ -151,6 +151,9 @@ void __init kvm_update_va_mask(struct alt_instr *alt, } } +void *__kvm_bp_vect_base; +int __kvm_harden_el2_vector_slot; + void kvm_patch_vector_branch(struct alt_instr *alt, __le32 *origptr, __le32 *updptr, int nr_inst) { -- cgit v1.2.3