From 547ffaed871af7db8713972eaf630802cac47cb1 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 24 Feb 2016 17:51:07 +0800 Subject: KVM: MMU: introduce kvm_mmu_gfn_{allow,disallow}_lpage Abstract the common operations from account_shadowed() and unaccount_shadowed(), then introduce kvm_mmu_gfn_disallow_lpage() and kvm_mmu_gfn_allow_lpage() These two functions will be used by page tracking in the later patch Reviewed-by: Paolo Bonzini Signed-off-by: Xiao Guangrong Signed-off-by: Paolo Bonzini diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 06dd4ab..566639d 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -776,21 +776,39 @@ static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn, return &slot->arch.lpage_info[level - 2][idx]; } +static void update_gfn_disallow_lpage_count(struct kvm_memory_slot *slot, + gfn_t gfn, int count) +{ + struct kvm_lpage_info *linfo; + int i; + + for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) { + linfo = lpage_info_slot(gfn, slot, i); + linfo->disallow_lpage += count; + WARN_ON(linfo->disallow_lpage < 0); + } +} + +void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn) +{ + update_gfn_disallow_lpage_count(slot, gfn, 1); +} + +void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn) +{ + update_gfn_disallow_lpage_count(slot, gfn, -1); +} + static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) { struct kvm_memslots *slots; struct kvm_memory_slot *slot; - struct kvm_lpage_info *linfo; gfn_t gfn; - int i; gfn = sp->gfn; slots = kvm_memslots_for_spte_role(kvm, sp->role); slot = __gfn_to_memslot(slots, gfn); - for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) { - linfo = lpage_info_slot(gfn, slot, i); - linfo->disallow_lpage += 1; - } + kvm_mmu_gfn_disallow_lpage(slot, gfn); kvm->arch.indirect_shadow_pages++; } @@ -798,18 +816,12 @@ static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) { struct kvm_memslots *slots; struct kvm_memory_slot *slot; - struct kvm_lpage_info *linfo; gfn_t gfn; - int i; gfn = sp->gfn; slots = kvm_memslots_for_spte_role(kvm, sp->role); slot = __gfn_to_memslot(slots, gfn); - for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) { - linfo = lpage_info_slot(gfn, slot, i); - linfo->disallow_lpage -= 1; - WARN_ON(linfo->disallow_lpage < 0); - } + kvm_mmu_gfn_allow_lpage(slot, gfn); kvm->arch.indirect_shadow_pages--; } diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 55ffb7b..de92bed 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -174,4 +174,7 @@ static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm); void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end); + +void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); +void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); #endif -- cgit v0.10.2