xen: add XSA-278 fix
This commit is contained in:
parent
9dd2a33c5b
commit
2414a64198
Notes:
svn2git
2021-03-31 03:12:20 +00:00
svn path=/head/; revision=483559
2 changed files with 329 additions and 1 deletions
|
@ -2,7 +2,7 @@
|
|||
|
||||
PORTNAME= xen
|
||||
PORTVERSION= 4.11.0
|
||||
PORTREVISION= 1
|
||||
PORTREVISION= 2
|
||||
CATEGORIES= emulators
|
||||
MASTER_SITES= http://downloads.xenproject.org/release/xen/${PORTVERSION}/
|
||||
PKGNAMESUFFIX= -kernel411
|
||||
|
@ -90,6 +90,8 @@ EXTRA_PATCHES+= ${FILESDIR}/0001-xen-Port-the-array_index_nospec-infrastructure-
|
|||
${FILESDIR}/0039-x86-spec-ctrl-Introduce-an-option-to-control-L1D_FLU.patch:-p1 \
|
||||
${FILESDIR}/0040-x86-Make-spec-ctrl-no-a-global-disable-of-all-mitiga.patch:-p1 \
|
||||
${FILESDIR}/0042-x86-write-to-correct-variable-in-parse_pv_l1tf.patch:-p1
|
||||
# XSA-278: x86: Nested VT-x usable even when disabled
|
||||
EXTRA_PATCHES+= ${FILESDIR}/xsa278-4.11.patch:-p1
|
||||
|
||||
.include <bsd.port.options.mk>
|
||||
|
||||
|
|
326
emulators/xen-kernel411/files/xsa278-4.11.patch
Normal file
326
emulators/xen-kernel411/files/xsa278-4.11.patch
Normal file
|
@ -0,0 +1,326 @@
|
|||
From: Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
Subject: x86/vvmx: Disallow the use of VT-x instructions when nested virt is disabled
|
||||
|
||||
c/s ac6a4500b "vvmx: set vmxon_region_pa of vcpu out of VMX operation to an
|
||||
invalid address" was a real bugfix as described, but has a very subtle bug
|
||||
which results in all VT-x instructions being usable by a guest.
|
||||
|
||||
The toolstack constructs a guest by issuing:
|
||||
|
||||
XEN_DOMCTL_createdomain
|
||||
XEN_DOMCTL_max_vcpus
|
||||
|
||||
and optionally later, HVMOP_set_param to enable nested virt.
|
||||
|
||||
As a result, the call to nvmx_vcpu_initialise() in hvm_vcpu_initialise()
|
||||
(which is what makes the above patch look correct during review) is actually
|
||||
dead code. In practice, nvmx_vcpu_initialise() first gets called when nested
|
||||
virt is enabled, which is typically never.
|
||||
|
||||
As a result, the zeroed memory of struct vcpu causes nvmx_vcpu_in_vmx() to
|
||||
return true before nested virt is enabled for the guest.
|
||||
|
||||
Fixing the order of initialisation is a work in progress for other reasons,
|
||||
but not viable for security backports.
|
||||
|
||||
A compounding factor is that the vmexit handlers for all instructions, other
|
||||
than VMXON, pass 0 into vmx_inst_check_privilege()'s vmxop_check parameter,
|
||||
which skips the CR4.VMXE check. (This is one of many reasons why nested virt
|
||||
isn't a supported feature yet.)
|
||||
|
||||
However, the overall result is that when nested virt is not enabled by the
|
||||
toolstack (i.e. the default configuration for all production guests), the VT-x
|
||||
instructions (other than VMXON) are actually usable, and Xen very quickly
|
||||
falls over the fact that the nvmx structure is uninitialised.
|
||||
|
||||
In order to fail safe in the supported case, re-implement all the VT-x
|
||||
instruction handling using a single function with a common prologue, covering
|
||||
all the checks which should cause #UD or #GP faults. This deliberately
|
||||
doesn't use any state from the nvmx structure, in case there are other lurking
|
||||
issues.
|
||||
|
||||
This is XSA-278
|
||||
|
||||
Reported-by: Sergey Dyasli <sergey.dyasli@citrix.com>
|
||||
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
Reviewed-by: Sergey Dyasli <sergey.dyasli@citrix.com>
|
||||
|
||||
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
|
||||
index a6415f0..a4d2829 100644
|
||||
--- a/xen/arch/x86/hvm/vmx/vmx.c
|
||||
+++ b/xen/arch/x86/hvm/vmx/vmx.c
|
||||
@@ -3982,57 +3982,17 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
|
||||
break;
|
||||
|
||||
case EXIT_REASON_VMXOFF:
|
||||
- if ( nvmx_handle_vmxoff(regs) == X86EMUL_OKAY )
|
||||
- update_guest_eip();
|
||||
- break;
|
||||
-
|
||||
case EXIT_REASON_VMXON:
|
||||
- if ( nvmx_handle_vmxon(regs) == X86EMUL_OKAY )
|
||||
- update_guest_eip();
|
||||
- break;
|
||||
-
|
||||
case EXIT_REASON_VMCLEAR:
|
||||
- if ( nvmx_handle_vmclear(regs) == X86EMUL_OKAY )
|
||||
- update_guest_eip();
|
||||
- break;
|
||||
-
|
||||
case EXIT_REASON_VMPTRLD:
|
||||
- if ( nvmx_handle_vmptrld(regs) == X86EMUL_OKAY )
|
||||
- update_guest_eip();
|
||||
- break;
|
||||
-
|
||||
case EXIT_REASON_VMPTRST:
|
||||
- if ( nvmx_handle_vmptrst(regs) == X86EMUL_OKAY )
|
||||
- update_guest_eip();
|
||||
- break;
|
||||
-
|
||||
case EXIT_REASON_VMREAD:
|
||||
- if ( nvmx_handle_vmread(regs) == X86EMUL_OKAY )
|
||||
- update_guest_eip();
|
||||
- break;
|
||||
-
|
||||
case EXIT_REASON_VMWRITE:
|
||||
- if ( nvmx_handle_vmwrite(regs) == X86EMUL_OKAY )
|
||||
- update_guest_eip();
|
||||
- break;
|
||||
-
|
||||
case EXIT_REASON_VMLAUNCH:
|
||||
- if ( nvmx_handle_vmlaunch(regs) == X86EMUL_OKAY )
|
||||
- update_guest_eip();
|
||||
- break;
|
||||
-
|
||||
case EXIT_REASON_VMRESUME:
|
||||
- if ( nvmx_handle_vmresume(regs) == X86EMUL_OKAY )
|
||||
- update_guest_eip();
|
||||
- break;
|
||||
-
|
||||
case EXIT_REASON_INVEPT:
|
||||
- if ( nvmx_handle_invept(regs) == X86EMUL_OKAY )
|
||||
- update_guest_eip();
|
||||
- break;
|
||||
-
|
||||
case EXIT_REASON_INVVPID:
|
||||
- if ( nvmx_handle_invvpid(regs) == X86EMUL_OKAY )
|
||||
+ if ( nvmx_handle_vmx_insn(regs, exit_reason) == X86EMUL_OKAY )
|
||||
update_guest_eip();
|
||||
break;
|
||||
|
||||
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
|
||||
index e97db33..88cb58c 100644
|
||||
--- a/xen/arch/x86/hvm/vmx/vvmx.c
|
||||
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
|
||||
@@ -1470,7 +1470,7 @@ void nvmx_switch_guest(void)
|
||||
* VMX instructions handling
|
||||
*/
|
||||
|
||||
-int nvmx_handle_vmxon(struct cpu_user_regs *regs)
|
||||
+static int nvmx_handle_vmxon(struct cpu_user_regs *regs)
|
||||
{
|
||||
struct vcpu *v=current;
|
||||
struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
|
||||
@@ -1522,7 +1522,7 @@ int nvmx_handle_vmxon(struct cpu_user_regs *regs)
|
||||
return X86EMUL_OKAY;
|
||||
}
|
||||
|
||||
-int nvmx_handle_vmxoff(struct cpu_user_regs *regs)
|
||||
+static int nvmx_handle_vmxoff(struct cpu_user_regs *regs)
|
||||
{
|
||||
struct vcpu *v=current;
|
||||
struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
|
||||
@@ -1611,7 +1611,7 @@ static int nvmx_vmresume(struct vcpu *v, struct cpu_user_regs *regs)
|
||||
return X86EMUL_OKAY;
|
||||
}
|
||||
|
||||
-int nvmx_handle_vmresume(struct cpu_user_regs *regs)
|
||||
+static int nvmx_handle_vmresume(struct cpu_user_regs *regs)
|
||||
{
|
||||
bool_t launched;
|
||||
struct vcpu *v = current;
|
||||
@@ -1645,7 +1645,7 @@ int nvmx_handle_vmresume(struct cpu_user_regs *regs)
|
||||
return nvmx_vmresume(v,regs);
|
||||
}
|
||||
|
||||
-int nvmx_handle_vmlaunch(struct cpu_user_regs *regs)
|
||||
+static int nvmx_handle_vmlaunch(struct cpu_user_regs *regs)
|
||||
{
|
||||
bool_t launched;
|
||||
struct vcpu *v = current;
|
||||
@@ -1688,7 +1688,7 @@ int nvmx_handle_vmlaunch(struct cpu_user_regs *regs)
|
||||
return rc;
|
||||
}
|
||||
|
||||
-int nvmx_handle_vmptrld(struct cpu_user_regs *regs)
|
||||
+static int nvmx_handle_vmptrld(struct cpu_user_regs *regs)
|
||||
{
|
||||
struct vcpu *v = current;
|
||||
struct vmx_inst_decoded decode;
|
||||
@@ -1759,7 +1759,7 @@ int nvmx_handle_vmptrld(struct cpu_user_regs *regs)
|
||||
return X86EMUL_OKAY;
|
||||
}
|
||||
|
||||
-int nvmx_handle_vmptrst(struct cpu_user_regs *regs)
|
||||
+static int nvmx_handle_vmptrst(struct cpu_user_regs *regs)
|
||||
{
|
||||
struct vcpu *v = current;
|
||||
struct vmx_inst_decoded decode;
|
||||
@@ -1784,7 +1784,7 @@ int nvmx_handle_vmptrst(struct cpu_user_regs *regs)
|
||||
return X86EMUL_OKAY;
|
||||
}
|
||||
|
||||
-int nvmx_handle_vmclear(struct cpu_user_regs *regs)
|
||||
+static int nvmx_handle_vmclear(struct cpu_user_regs *regs)
|
||||
{
|
||||
struct vcpu *v = current;
|
||||
struct vmx_inst_decoded decode;
|
||||
@@ -1836,7 +1836,7 @@ int nvmx_handle_vmclear(struct cpu_user_regs *regs)
|
||||
return X86EMUL_OKAY;
|
||||
}
|
||||
|
||||
-int nvmx_handle_vmread(struct cpu_user_regs *regs)
|
||||
+static int nvmx_handle_vmread(struct cpu_user_regs *regs)
|
||||
{
|
||||
struct vcpu *v = current;
|
||||
struct vmx_inst_decoded decode;
|
||||
@@ -1878,7 +1878,7 @@ int nvmx_handle_vmread(struct cpu_user_regs *regs)
|
||||
return X86EMUL_OKAY;
|
||||
}
|
||||
|
||||
-int nvmx_handle_vmwrite(struct cpu_user_regs *regs)
|
||||
+static int nvmx_handle_vmwrite(struct cpu_user_regs *regs)
|
||||
{
|
||||
struct vcpu *v = current;
|
||||
struct vmx_inst_decoded decode;
|
||||
@@ -1926,7 +1926,7 @@ int nvmx_handle_vmwrite(struct cpu_user_regs *regs)
|
||||
return X86EMUL_OKAY;
|
||||
}
|
||||
|
||||
-int nvmx_handle_invept(struct cpu_user_regs *regs)
|
||||
+static int nvmx_handle_invept(struct cpu_user_regs *regs)
|
||||
{
|
||||
struct vmx_inst_decoded decode;
|
||||
unsigned long eptp;
|
||||
@@ -1954,7 +1954,7 @@ int nvmx_handle_invept(struct cpu_user_regs *regs)
|
||||
return X86EMUL_OKAY;
|
||||
}
|
||||
|
||||
-int nvmx_handle_invvpid(struct cpu_user_regs *regs)
|
||||
+static int nvmx_handle_invvpid(struct cpu_user_regs *regs)
|
||||
{
|
||||
struct vmx_inst_decoded decode;
|
||||
unsigned long vpid;
|
||||
@@ -1980,6 +1980,81 @@ int nvmx_handle_invvpid(struct cpu_user_regs *regs)
|
||||
return X86EMUL_OKAY;
|
||||
}
|
||||
|
||||
+int nvmx_handle_vmx_insn(struct cpu_user_regs *regs, unsigned int exit_reason)
|
||||
+{
|
||||
+ struct vcpu *curr = current;
|
||||
+ int ret;
|
||||
+
|
||||
+ if ( !(curr->arch.hvm_vcpu.guest_cr[4] & X86_CR4_VMXE) ||
|
||||
+ !nestedhvm_enabled(curr->domain) ||
|
||||
+ (vmx_guest_x86_mode(curr) < (hvm_long_mode_active(curr) ? 8 : 2)) )
|
||||
+ {
|
||||
+ hvm_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC);
|
||||
+ return X86EMUL_EXCEPTION;
|
||||
+ }
|
||||
+
|
||||
+ if ( vmx_get_cpl() > 0 )
|
||||
+ {
|
||||
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
|
||||
+ return X86EMUL_EXCEPTION;
|
||||
+ }
|
||||
+
|
||||
+ switch ( exit_reason )
|
||||
+ {
|
||||
+ case EXIT_REASON_VMXOFF:
|
||||
+ ret = nvmx_handle_vmxoff(regs);
|
||||
+ break;
|
||||
+
|
||||
+ case EXIT_REASON_VMXON:
|
||||
+ ret = nvmx_handle_vmxon(regs);
|
||||
+ break;
|
||||
+
|
||||
+ case EXIT_REASON_VMCLEAR:
|
||||
+ ret = nvmx_handle_vmclear(regs);
|
||||
+ break;
|
||||
+
|
||||
+ case EXIT_REASON_VMPTRLD:
|
||||
+ ret = nvmx_handle_vmptrld(regs);
|
||||
+ break;
|
||||
+
|
||||
+ case EXIT_REASON_VMPTRST:
|
||||
+ ret = nvmx_handle_vmptrst(regs);
|
||||
+ break;
|
||||
+
|
||||
+ case EXIT_REASON_VMREAD:
|
||||
+ ret = nvmx_handle_vmread(regs);
|
||||
+ break;
|
||||
+
|
||||
+ case EXIT_REASON_VMWRITE:
|
||||
+ ret = nvmx_handle_vmwrite(regs);
|
||||
+ break;
|
||||
+
|
||||
+ case EXIT_REASON_VMLAUNCH:
|
||||
+ ret = nvmx_handle_vmlaunch(regs);
|
||||
+ break;
|
||||
+
|
||||
+ case EXIT_REASON_VMRESUME:
|
||||
+ ret = nvmx_handle_vmresume(regs);
|
||||
+ break;
|
||||
+
|
||||
+ case EXIT_REASON_INVEPT:
|
||||
+ ret = nvmx_handle_invept(regs);
|
||||
+ break;
|
||||
+
|
||||
+ case EXIT_REASON_INVVPID:
|
||||
+ ret = nvmx_handle_invvpid(regs);
|
||||
+ break;
|
||||
+
|
||||
+ default:
|
||||
+ ASSERT_UNREACHABLE();
|
||||
+ domain_crash(curr->domain);
|
||||
+ ret = X86EMUL_UNHANDLEABLE;
|
||||
+ break;
|
||||
+ }
|
||||
+
|
||||
+ return ret;
|
||||
+}
|
||||
+
|
||||
#define __emul_value(enable1, default1) \
|
||||
((enable1 | default1) << 32 | (default1))
|
||||
|
||||
diff --git a/xen/include/asm-x86/hvm/vmx/vvmx.h b/xen/include/asm-x86/hvm/vmx/vvmx.h
|
||||
index 9ea35eb..fc4a8d1 100644
|
||||
--- a/xen/include/asm-x86/hvm/vmx/vvmx.h
|
||||
+++ b/xen/include/asm-x86/hvm/vmx/vvmx.h
|
||||
@@ -94,9 +94,6 @@ void nvmx_domain_relinquish_resources(struct domain *d);
|
||||
|
||||
bool_t nvmx_ept_enabled(struct vcpu *v);
|
||||
|
||||
-int nvmx_handle_vmxon(struct cpu_user_regs *regs);
|
||||
-int nvmx_handle_vmxoff(struct cpu_user_regs *regs);
|
||||
-
|
||||
#define EPT_TRANSLATE_SUCCEED 0
|
||||
#define EPT_TRANSLATE_VIOLATION 1
|
||||
#define EPT_TRANSLATE_MISCONFIG 2
|
||||
@@ -191,15 +188,7 @@ enum vmx_insn_errno set_vvmcs_real_safe(const struct vcpu *, u32 encoding,
|
||||
uint64_t get_shadow_eptp(struct vcpu *v);
|
||||
|
||||
void nvmx_destroy_vmcs(struct vcpu *v);
|
||||
-int nvmx_handle_vmptrld(struct cpu_user_regs *regs);
|
||||
-int nvmx_handle_vmptrst(struct cpu_user_regs *regs);
|
||||
-int nvmx_handle_vmclear(struct cpu_user_regs *regs);
|
||||
-int nvmx_handle_vmread(struct cpu_user_regs *regs);
|
||||
-int nvmx_handle_vmwrite(struct cpu_user_regs *regs);
|
||||
-int nvmx_handle_vmresume(struct cpu_user_regs *regs);
|
||||
-int nvmx_handle_vmlaunch(struct cpu_user_regs *regs);
|
||||
-int nvmx_handle_invept(struct cpu_user_regs *regs);
|
||||
-int nvmx_handle_invvpid(struct cpu_user_regs *regs);
|
||||
+int nvmx_handle_vmx_insn(struct cpu_user_regs *regs, unsigned int exit_reason);
|
||||
int nvmx_msr_read_intercept(unsigned int msr,
|
||||
u64 *msr_content);
|
||||
|
Loading…
Reference in a new issue