From 3bbb7ca5b85b10e407126047701bbb2f76023bfd Mon Sep 17 00:00:00 2001 From: Mihai Caraman Date: Tue, 23 Apr 2013 22:33:41 +0300 Subject: KVM: PPC: e500mc: Workaround guest missing tlbwe conditional e6500 has threads but does not have TLB write conditional. Currently HW threads are exposed to guests as cores so locks are not an option. Workaround this for guests not allowing one core's threads to use same LPID simultaneous. The VM's LPID allocation scheme associate one LPID per HW tread index. This limits the number of VM's to (number of LPIDs - 1)/ (threads per core), which for T4240 processor means 31. Signed-off-by: Mihai Caraman Change-Id: Ib1ba743f0f81084decbb26ebcef00d49df814c6c Reviewed-on: http://git.am.freescale.net:8181/1750 Reviewed-by: Wood Scott-B07421 Reviewed-by: Fleming Andrew-AFLEMING Tested-by: Fleming Andrew-AFLEMING diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 1aaf900..43ac154 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -234,7 +234,11 @@ struct kvm_arch_memory_slot { }; struct kvm_arch { +#ifdef CONFIG_KVM_BOOKE_HV + unsigned int lpid[2]; +#else unsigned int lpid; +#endif #ifdef CONFIG_KVM_BOOK3S_64_HV unsigned long hpt_virt; struct revmap_entry *revmap; @@ -428,6 +432,7 @@ struct kvm_vcpu_arch { u32 eplc; u32 epsc; u32 oldpir; + u32 lpid; #endif #if defined(CONFIG_BOOKE) diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index 1c6a9d72..e9a9695 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -77,7 +77,8 @@ static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode) * writing shadow tlb entry to host TLB */ static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe, - uint32_t mas0) + uint32_t mas0, + uint32_t *lpid) { unsigned long flags; @@ -88,6 +89,8 @@ static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe, mtspr(SPRN_MAS3, (u32)stlbe->mas7_3); mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32)); #ifdef CONFIG_KVM_BOOKE_HV + /* populate mas8 with latest LPID */ + stlbe->mas8 = MAS8_TGS | *lpid; mtspr(SPRN_MAS8, stlbe->mas8); #endif asm volatile("isync; tlbwe" : : : "memory"); @@ -133,11 +136,12 @@ static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500, if (tlbsel == 0) { mas0 = get_host_mas0(stlbe->mas2); - __write_host_tlbe(stlbe, mas0); + __write_host_tlbe(stlbe, mas0, &vcpu_e500->vcpu.arch.lpid); } else { __write_host_tlbe(stlbe, MAS0_TLBSEL(1) | - MAS0_ESEL(to_htlb1_esel(sesel))); + MAS0_ESEL(to_htlb1_esel(sesel)), + &vcpu_e500->vcpu.arch.lpid); } } @@ -317,9 +321,7 @@ static void kvmppc_e500_setup_stlbe( stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) | e500_shadow_mas3_attrib(gtlbe->mas7_3, pr); -#ifdef CONFIG_KVM_BOOKE_HV - stlbe->mas8 = MAS8_TGS | vcpu->kvm->arch.lpid; -#endif + /* Set mas8 when executing tlbwe since LPID can change dynamically */ } static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c index 753cc99..5db5d94 100644 --- a/arch/powerpc/kvm/e500mc.c +++ b/arch/powerpc/kvm/e500mc.c @@ -22,6 +22,7 @@ #include #include #include +#include #include "booke.h" #include "e500.h" @@ -46,10 +47,11 @@ void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type) return; } - - tag = PPC_DBELL_LPID(vcpu->kvm->arch.lpid) | vcpu->vcpu_id; + preempt_disable(); + tag = PPC_DBELL_LPID(vcpu->arch.lpid) | vcpu->vcpu_id; mb(); ppc_msgsnd(dbell_type, 0, tag); + preempt_enable(); } /* gtlbe must not be mapped by more than one host tlb entry */ @@ -58,12 +60,11 @@ void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500, { unsigned int tid, ts; gva_t eaddr; - u32 val, lpid; + u32 val; unsigned long flags; ts = get_tlb_ts(gtlbe); tid = get_tlb_tid(gtlbe); - lpid = vcpu_e500->vcpu.kvm->arch.lpid; /* We search the host TLB to invalidate its shadow TLB entry */ val = (tid << 16) | ts; @@ -72,7 +73,7 @@ void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500, local_irq_save(flags); mtspr(SPRN_MAS6, val); - mtspr(SPRN_MAS5, MAS5_SGS | lpid); + mtspr(SPRN_MAS5, MAS5_SGS | vcpu_e500->vcpu.arch.lpid); asm volatile("tlbsx 0, %[eaddr]\n" : : [eaddr] "r" (eaddr)); val = mfspr(SPRN_MAS1); @@ -93,7 +94,7 @@ void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500) unsigned long flags; local_irq_save(flags); - mtspr(SPRN_MAS5, MAS5_SGS | vcpu_e500->vcpu.kvm->arch.lpid); + mtspr(SPRN_MAS5, MAS5_SGS | vcpu_e500->vcpu.arch.lpid); asm volatile("tlbilxlpid"); mtspr(SPRN_MAS5, 0); local_irq_restore(flags); @@ -113,10 +114,22 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu_on_cpu); void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); + int lpid_idx = 0; kvmppc_booke_vcpu_load(vcpu, cpu); - mtspr(SPRN_LPID, vcpu->kvm->arch.lpid); + /* Get current core's thread index */ + lpid_idx = mfspr(SPRN_PIR) % threads_per_core; + + vcpu->arch.lpid = vcpu->kvm->arch.lpid[lpid_idx]; + vcpu->arch.eplc = EPC_EGS | (vcpu->arch.lpid << EPC_ELPID_SHIFT); + vcpu->arch.epsc = vcpu->arch.eplc; + + if (vcpu->arch.oldpir != mfspr(SPRN_PIR)) + printk(KERN_DEBUG "vcpu 0x%p loaded on PID %d, lpid %d\n", + vcpu, smp_processor_id(), (int)vcpu->arch.lpid); + + mtspr(SPRN_LPID, vcpu->arch.lpid); mtspr(SPRN_EPCR, vcpu->arch.shadow_epcr); mtspr(SPRN_GPIR, vcpu->vcpu_id); mtspr(SPRN_MSRP, vcpu->arch.shadow_msrp); @@ -195,8 +208,6 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) vcpu->arch.shadow_epcr |= SPRN_EPCR_ICM; #endif vcpu->arch.shadow_msrp = MSRP_UCLEP | MSRP_DEP | MSRP_PMMP; - vcpu->arch.eplc = EPC_EGS | (vcpu->kvm->arch.lpid << EPC_ELPID_SHIFT); - vcpu->arch.epsc = vcpu->arch.eplc; vcpu->arch.pvr = mfspr(SPRN_PVR); vcpu_e500->svr = mfspr(SPRN_SVR); @@ -329,19 +340,30 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) int kvmppc_core_init_vm(struct kvm *kvm) { - int lpid; + int i, lpid; - lpid = kvmppc_alloc_lpid(); - if (lpid < 0) - return lpid; + if (threads_per_core > 2) + return -ENOMEM; + + /* Each VM allocates one LPID per HW thread index */ + for(i = 0; i < threads_per_core; i++) { + lpid = kvmppc_alloc_lpid(); + if (lpid < 0) + return lpid; + + kvm->arch.lpid[i] = lpid; + } - kvm->arch.lpid = lpid; return 0; } void kvmppc_core_destroy_vm(struct kvm *kvm) { - kvmppc_free_lpid(kvm->arch.lpid); + int i; + + for(i = 0; i < threads_per_core; i++) { + kvmppc_free_lpid(kvm->arch.lpid[i]); + } } static int __init kvmppc_e500mc_init(void) -- cgit v0.10.2