summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJan Kiszka <jan.kiszka@siemens.com>2014-10-08 18:05:39 +0200
committerPaolo Bonzini <pbonzini@redhat.com>2014-11-02 07:55:46 +0100
commit282da870f4b5868efadcd4dc2a5a738d6fdf65d4 (patch)
tree4e6885dbd2de31f0a7eabb7bea916af9088a3dd5
parent7e46dddd6f6cd5dbf3c7bd04a7e75d19475ac9f2 (diff)
KVM: nVMX: Disable preemption while reading from shadow VMCS
In order to access the shadow VMCS, we need to load it. At this point, vmx->loaded_vmcs->vmcs and the actually loaded one start to differ. If we now get preempted by Linux, vmx_vcpu_put and, on return, the vmx_vcpu_load will work against the wrong vmcs. That can cause copy_shadow_to_vmcs12 to corrupt the vmcs12 state. Fix the issue by disabling preemption during the copy operation. copy_vmcs12_to_shadow is safe from this issue as it is executed by vmx_vcpu_run when preemption is already disabled before vmentry. This bug is exposed by running Jailhouse within KVM on CPUs with shadow VMCS support. Jailhouse never expects an interrupt pending vmexit, but the bug can cause it if, after copy_shadow_to_vmcs12 is preempted, the active VMCS happens to have the virtual interrupt pending flag set in the CPU-based execution controls. Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/kvm/vmx.c4
1 files changed, 4 insertions, 0 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index a8b76c4c95e2..a0f78dbaabee 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -6426,6 +6426,8 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
const unsigned long *fields = shadow_read_write_fields;
const int num_fields = max_shadow_read_write_fields;
+ preempt_disable();
+
vmcs_load(shadow_vmcs);
for (i = 0; i < num_fields; i++) {
@@ -6449,6 +6451,8 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
vmcs_clear(shadow_vmcs);
vmcs_load(vmx->loaded_vmcs->vmcs);
+
+ preempt_enable();
}
static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)