summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/vmx.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2010-04-28 13:40:38 (GMT)
committerAvi Kivity <avi@redhat.com>2010-05-19 08:36:31 (GMT)
commit61d2ef2ce3e0161bedf5d2867f546a8df77fa9bc (patch)
treed1e3423bac53d2222b4c607e15b29c6e7f9b546a /arch/x86/kvm/vmx.c
parent5dfa3d170e17cbf9e4816a5ba2f5913c31c03e93 (diff)
downloadlinux-fsl-qoriq-61d2ef2ce3e0161bedf5d2867f546a8df77fa9bc.tar.xz
KVM: VMX: Add facility to atomically switch MSRs on guest entry/exit
Some guest msr values cannot be used on the host (for example. EFER.NX=0), so we need to switch them atomically during guest entry or exit. Add a facility to program the vmx msr autoload registers accordingly. Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r--arch/x86/kvm/vmx.c49
1 files changed, 49 insertions, 0 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 2e87296..ae22dcf 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -98,6 +98,8 @@ module_param(ple_gap, int, S_IRUGO);
static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
module_param(ple_window, int, S_IRUGO);
+#define NR_AUTOLOAD_MSRS 1
+
struct vmcs {
u32 revision_id;
u32 abort;
@@ -125,6 +127,11 @@ struct vcpu_vmx {
u64 msr_guest_kernel_gs_base;
#endif
struct vmcs *vmcs;
+ struct msr_autoload {
+ unsigned nr;
+ struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
+ struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
+ } msr_autoload;
struct {
int loaded;
u16 fs_sel, gs_sel, ldt_sel;
@@ -595,6 +602,46 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
vmcs_write32(EXCEPTION_BITMAP, eb);
}
+static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
+{
+ unsigned i;
+ struct msr_autoload *m = &vmx->msr_autoload;
+
+ for (i = 0; i < m->nr; ++i)
+ if (m->guest[i].index == msr)
+ break;
+
+ if (i == m->nr)
+ return;
+ --m->nr;
+ m->guest[i] = m->guest[m->nr];
+ m->host[i] = m->host[m->nr];
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
+}
+
+static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
+ u64 guest_val, u64 host_val)
+{
+ unsigned i;
+ struct msr_autoload *m = &vmx->msr_autoload;
+
+ for (i = 0; i < m->nr; ++i)
+ if (m->guest[i].index == msr)
+ break;
+
+ if (i == m->nr) {
+ ++m->nr;
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
+ }
+
+ m->guest[i].index = msr;
+ m->guest[i].value = guest_val;
+ m->host[i].index = msr;
+ m->host[i].value = host_val;
+}
+
static void reload_tss(void)
{
/*
@@ -2470,7 +2517,9 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
+ vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
+ vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk);
vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs);