summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2015-09-22 21:02:14 (GMT)
committerPaolo Bonzini <pbonzini@redhat.com>2015-09-25 08:31:28 (GMT)
commit6fec21449a62702a582cecbb0b351363e039c95e (patch)
treeef62742a58747275e7d852dec6fdc8526a4f14fd /arch
parent79a8059d244e99454e474902e4325ee9b50e9178 (diff)
downloadlinux-6fec21449a62702a582cecbb0b351363e039c95e.tar.xz
KVM: x86: use correct page table format to check nested page table reserved bits
Intel CPUID on AMD host or vice versa is a weird case, but it can happen. Handle it by checking the host CPU vendor instead of the guest's in reset_tdp_shadow_zero_bits_mask. For speed, the check uses the fact that Intel EPT has an X (executable) bit while AMD NPT has NX. Reported-by: Borislav Petkov <bp@alien8.de> Tested-by: Borislav Petkov <bp@alien8.de> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/mmu.c23
1 files changed, 17 insertions, 6 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 69088a1..6f20763 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3614,7 +3614,7 @@ static void
__reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
struct rsvd_bits_validate *rsvd_check,
int maxphyaddr, int level, bool nx, bool gbpages,
- bool pse)
+ bool pse, bool amd)
{
u64 exb_bit_rsvd = 0;
u64 gbpages_bit_rsvd = 0;
@@ -3631,7 +3631,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
* Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for
* leaf entries) on AMD CPUs only.
*/
- if (guest_cpuid_is_amd(vcpu))
+ if (amd)
nonleaf_bit8_rsvd = rsvd_bits(8, 8);
switch (level) {
@@ -3699,7 +3699,7 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
__reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check,
cpuid_maxphyaddr(vcpu), context->root_level,
context->nx, guest_cpuid_has_gbpages(vcpu),
- is_pse(vcpu));
+ is_pse(vcpu), guest_cpuid_is_amd(vcpu));
}
static void
@@ -3749,13 +3749,24 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
void
reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
{
+ /*
+ * Passing "true" to the last argument is okay; it adds a check
+ * on bit 8 of the SPTEs which KVM doesn't use anyway.
+ */
__reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
boot_cpu_data.x86_phys_bits,
context->shadow_root_level, context->nx,
- guest_cpuid_has_gbpages(vcpu), is_pse(vcpu));
+ guest_cpuid_has_gbpages(vcpu), is_pse(vcpu),
+ true);
}
EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask);
+static inline bool boot_cpu_is_amd(void)
+{
+ WARN_ON_ONCE(!tdp_enabled);
+ return shadow_x_mask == 0;
+}
+
/*
* the direct page table on host, use as much mmu features as
* possible, however, kvm currently does not do execution-protection.
@@ -3764,11 +3775,11 @@ static void
reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
struct kvm_mmu *context)
{
- if (guest_cpuid_is_amd(vcpu))
+ if (boot_cpu_is_amd())
__reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
boot_cpu_data.x86_phys_bits,
context->shadow_root_level, false,
- cpu_has_gbpages, true);
+ cpu_has_gbpages, true, true);
else
__reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
boot_cpu_data.x86_phys_bits,