From 59573742388442aef7f74a30bf606a955b4cad0d Mon Sep 17 00:00:00 2001 From: Mihai Caraman Date: Fri, 25 Apr 2014 23:13:34 +0300 Subject: KVM: PPC: e500: Fix tlbilx_all emulation for HTW tlbilx_all emulation requies HW TLB invalidation for entries added by HTW. Add inval_tlb_on_host() function to do this invalidation and move inval_ea_on_host() to a common place. Add dedicated functions for lpid and pid tlb invalidation refactoring the existing kvmppc_e500_tlbil_all() function. Signed-off-by: Mihai Caraman (cherry picked and merged from sdk1.5 commit f9efafeef721d45eb60cdfe45534a788ca55f0bd) Change-Id: I4231ac6ac60927256d1a99084812bf958ac94f71 Reviewed-on: http://git.am.freescale.net:8181/11562 Tested-by: Review Code-CDREVIEW Reviewed-by: Stuart Yoder Reviewed-by: Jose Rivera diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h index 43fd9bb..138e790 100644 --- a/arch/powerpc/kvm/e500.h +++ b/arch/powerpc/kvm/e500.h @@ -333,6 +333,22 @@ void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500, void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500); #ifdef CONFIG_KVM_BOOKE_HV +void inval_tlb_on_host(struct kvm_vcpu *vcpu, int type, int pid); + +void inval_ea_on_host(struct kvm_vcpu *vcpu, gva_t ea, int pid, int sas, + int sind); +#else +/* TLB is fully virtualized */ +static inline void inval_tlb_on_host(struct kvm_vcpu *vcpu, + int type, int pid) +{} + +static inline void inval_ea_on_host(struct kvm_vcpu *vcpu, gva_t ea, int pid, + int sas, int sind) +{} +#endif + +#ifdef CONFIG_KVM_BOOKE_HV #define kvmppc_e500_get_tlb_stid(vcpu, gtlbe) get_tlb_tid(gtlbe) #define get_tlbmiss_tid(vcpu) get_cur_pid(vcpu) #define get_tlb_sts(gtlbe) (gtlbe->mas1 & MAS1_TS) diff --git a/arch/powerpc/kvm/e500_mmu.c b/arch/powerpc/kvm/e500_mmu.c index e07196d..4a027cb 100644 --- a/arch/powerpc/kvm/e500_mmu.c +++ b/arch/powerpc/kvm/e500_mmu.c @@ -287,6 +287,7 @@ static void tlbilx_all(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, { struct kvm_book3e_206_tlb_entry *tlbe; int tid, esel; + int sind = get_cur_sind(&vcpu_e500->vcpu); /* invalidate all entries */ for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; esel++) { @@ -297,6 +298,10 @@ static void tlbilx_all(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); } } + + /* Invalidate enties added by HTW */ + if (has_feature(&vcpu_e500->vcpu, VCPU_FTR_MMU_V2) && (!sind)) + inval_tlb_on_host(&vcpu_e500->vcpu, type, pid); } static void tlbilx_one(struct kvmppc_vcpu_e500 *vcpu_e500, int pid, diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index d8ba8bd..e6e312b2 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -232,22 +232,6 @@ void kvmppc_lrat_invalidate(struct kvm_vcpu *vcpu) book3e_tlb_unlock(); local_irq_restore(flags); } - -void inval_ea_on_host(struct kvm_vcpu *vcpu, gva_t ea, - int pid, int sas, int sind) -{ - unsigned long flags; - - local_irq_save(flags); - mtspr(SPRN_MAS6, (pid << MAS6_SPID_SHIFT) | - sas | (sind << MAS6_SIND_SHIFT)); - mtspr(SPRN_MAS5, MAS5_SGS | vcpu->arch.lpid); - asm volatile("tlbilx 3, 0, %[ea]\n" : : - [ea] "r" (ea)); - mtspr(SPRN_MAS5, 0); - isync(); - local_irq_restore(flags); -} #endif /* diff --git a/arch/powerpc/kvm/e500_mmu_host.h b/arch/powerpc/kvm/e500_mmu_host.h index d91a0ce..7624835 100644 --- a/arch/powerpc/kvm/e500_mmu_host.h +++ b/arch/powerpc/kvm/e500_mmu_host.h @@ -12,16 +12,6 @@ void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int esel); -#ifdef CONFIG_KVM_BOOKE_HV - void inval_ea_on_host(struct kvm_vcpu *vcpu, gva_t ea, - int pid, int sas, int sind); -#else -/* TLB is fully virtualized */ -static inline void inval_ea_on_host(struct kvm_vcpu *vcpu, gva_t ea, - int pid, int sas, int sind) -{} -#endif - int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500); void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500); diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c index 826bebd..bbf7cdd 100644 --- a/arch/powerpc/kvm/e500mc.c +++ b/arch/powerpc/kvm/e500mc.c @@ -90,16 +90,54 @@ void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500, local_irq_restore(flags); } -void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500) +void inval_ea_on_host(struct kvm_vcpu *vcpu, gva_t ea, int pid, int sas, + int sind) { unsigned long flags; local_irq_save(flags); - mtspr(SPRN_MAS5, MAS5_SGS | vcpu_e500->vcpu.arch.lpid); + mtspr(SPRN_MAS5, MAS5_SGS | vcpu->arch.lpid); + mtspr(SPRN_MAS6, (pid << MAS6_SPID_SHIFT) | + sas | (sind << MAS6_SIND_SHIFT)); + asm volatile("tlbilx 3, 0, %[ea]\n" : : + [ea] "r" (ea)); + local_irq_restore(flags); +} + +void kvmppc_e500_tlbil_pid(struct kvm_vcpu *vcpu, int pid) +{ + unsigned long flags; + + local_irq_save(flags); + mtspr(SPRN_MAS5, MAS5_SGS | vcpu->arch.lpid); + mtspr(SPRN_MAS6, pid << MAS6_SPID_SHIFT); + asm volatile("tlbilxpid"); + mtspr(SPRN_MAS5, 0); + local_irq_restore(flags); +} + +void kvmppc_e500_tlbil_lpid(struct kvm_vcpu *vcpu) +{ + unsigned long flags; + + local_irq_save(flags); + mtspr(SPRN_MAS5, MAS5_SGS | vcpu->arch.lpid); asm volatile("tlbilxlpid"); mtspr(SPRN_MAS5, 0); local_irq_restore(flags); +} +void inval_tlb_on_host(struct kvm_vcpu *vcpu, int type, int pid) +{ + if (type == 0) + kvmppc_e500_tlbil_lpid(vcpu); + else + kvmppc_e500_tlbil_pid(vcpu, pid); +} + +void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500) +{ + kvmppc_e500_tlbil_lpid(&vcpu_e500->vcpu); kvmppc_lrat_invalidate(&vcpu_e500->vcpu); } -- cgit v0.10.2