summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2015-05-18 13:11:46 (GMT)
committerPaolo Bonzini <pbonzini@redhat.com>2015-06-05 15:26:35 (GMT)
commite4cd1da944ed9d2acd2e4ccabf61ec443735f6db (patch)
treeda2091cadde842880044ebde0cbf97dbc6161cd3 /arch/x86
parentf481b069e674378758c73761827e83ab05c46b52 (diff)
downloadlinux-e4cd1da944ed9d2acd2e4ccabf61ec443735f6db.tar.xz
KVM: x86: pass struct kvm_mmu_page to gfn_to_rmap
This is always available (with one exception in the auditing code), and with the same auditing exception the level was coming from sp->role.level. Later, the spte's role will also be used to look up the right memslots array. Reviewed-by: Radim Krčmář <rkrcmar@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kvm/mmu.c10
-rw-r--r--arch/x86/kvm/mmu_audit.c8
2 files changed, 11 insertions, 7 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index a65ce12..0d01cbb 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1043,12 +1043,12 @@ static unsigned long *__gfn_to_rmap(gfn_t gfn, int level,
/*
* Take gfn and return the reverse mapping to it.
*/
-static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
+static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, struct kvm_mmu_page *sp)
{
struct kvm_memory_slot *slot;
slot = gfn_to_memslot(kvm, gfn);
- return __gfn_to_rmap(gfn, level, slot);
+ return __gfn_to_rmap(gfn, sp->role.level, slot);
}
static bool rmap_can_add(struct kvm_vcpu *vcpu)
@@ -1066,7 +1066,7 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
sp = page_header(__pa(spte));
kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
- rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
+ rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp);
return pte_list_add(vcpu, spte, rmapp);
}
@@ -1078,7 +1078,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
sp = page_header(__pa(spte));
gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
- rmapp = gfn_to_rmap(kvm, gfn, sp->role.level);
+ rmapp = gfn_to_rmap(kvm, gfn, sp);
pte_list_remove(spte, rmapp);
}
@@ -1612,7 +1612,7 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
sp = page_header(__pa(spte));
- rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
+ rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp);
kvm_unmap_rmapp(vcpu->kvm, rmapp, NULL, gfn, sp->role.level, 0);
kvm_flush_remote_tlbs(vcpu->kvm);
diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c
index 368d534..9d99f17 100644
--- a/arch/x86/kvm/mmu_audit.c
+++ b/arch/x86/kvm/mmu_audit.c
@@ -146,7 +146,7 @@ static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
return;
}
- rmapp = gfn_to_rmap(kvm, gfn, rev_sp->role.level);
+ rmapp = gfn_to_rmap(kvm, gfn, rev_sp);
if (!*rmapp) {
if (!__ratelimit(&ratelimit_state))
return;
@@ -191,11 +191,15 @@ static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
unsigned long *rmapp;
u64 *sptep;
struct rmap_iterator iter;
+ struct kvm_memslots *slots;
+ struct kvm_memory_slot *slot;
if (sp->role.direct || sp->unsync || sp->role.invalid)
return;
- rmapp = gfn_to_rmap(kvm, sp->gfn, PT_PAGE_TABLE_LEVEL);
+ slots = kvm_memslots(kvm);
+ slot = __gfn_to_memslot(slots, sp->gfn);
+ rmapp = __gfn_to_rmap(sp->gfn, PT_PAGE_TABLE_LEVEL, slot);
for_each_rmap_spte(rmapp, &iter, sptep)
if (is_writable_pte(*sptep))