diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/include/asm/mmu-book3e.h | 6 | ||||
-rw-r--r-- | arch/powerpc/kvm/booke.c | 6 | ||||
-rw-r--r-- | arch/powerpc/kvm/booke.h | 2 | ||||
-rw-r--r-- | arch/powerpc/kvm/bookehv_interrupts.S | 46 | ||||
-rw-r--r-- | arch/powerpc/kvm/e500.c | 4 | ||||
-rw-r--r-- | arch/powerpc/kvm/e500mc.c | 66 |
6 files changed, 89 insertions, 41 deletions
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h index 99d43e0..32e470e 100644 --- a/arch/powerpc/include/asm/mmu-book3e.h +++ b/arch/powerpc/include/asm/mmu-book3e.h @@ -40,7 +40,10 @@ /* MAS registers bit definitions */ -#define MAS0_TLBSEL(x) (((x) << 28) & 0x30000000) +#define MAS0_TLBSEL_MASK 0x30000000 +#define MAS0_TLBSEL_SHIFT 28 +#define MAS0_TLBSEL(x) (((x) << MAS0_TLBSEL_SHIFT) & MAS0_TLBSEL_MASK) +#define MAS0_GET_TLBSEL(mas0) (((mas0) & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT) #define MAS0_ESEL_MASK 0x0FFF0000 #define MAS0_ESEL_SHIFT 16 #define MAS0_ESEL(x) (((x) << MAS0_ESEL_SHIFT) & MAS0_ESEL_MASK) @@ -58,6 +61,7 @@ #define MAS1_TSIZE_MASK 0x00000f80 #define MAS1_TSIZE_SHIFT 7 #define MAS1_TSIZE(x) (((x) << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK) +#define MAS1_GET_TSIZE(mas1) (((mas1) & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT) #define MAS2_EPN (~0xFFFUL) #define MAS2_X0 0x00000040 diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 3aa212c..0aa7d24 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -997,6 +997,12 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, /* update before a new last_exit_type is rewritten */ kvmppc_update_timing_stats(vcpu); + /* + * The exception type can change at this point, such as if the TLB entry + * for the emulated instruction has been evicted. + */ + kvmppc_prepare_for_emulation(vcpu, &exit_nr); + /* restart interrupts if they were meant for the host */ kvmppc_restart_interrupt(vcpu, exit_nr); diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h index fe09482..59d1d4e 100644 --- a/arch/powerpc/kvm/booke.h +++ b/arch/powerpc/kvm/booke.h @@ -92,6 +92,8 @@ void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu); void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu); void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu); +void kvmppc_prepare_for_emulation(struct kvm_vcpu *vcpu, unsigned int *exit_nr); + enum int_class { INT_CLASS_NONCRIT, INT_CLASS_CRIT, diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S index 1d15ce4..6a10ee7 100644 --- a/arch/powerpc/kvm/bookehv_interrupts.S +++ b/arch/powerpc/kvm/bookehv_interrupts.S @@ -120,29 +120,14 @@ 1: .if \flags & NEED_EMU - /* - * Entry code uses external PID support. lwepx faults are handled - * as instruction tlb miss interrupts - * - * FIXME: we don't currently handle guest TLB entries that are - * present but have the permissions set for execute-but-not-read. - * Disallowing these mappings could be an option on e500mc, but - * not on chips with an LRAT if it is used. - */ - - mfspr r3, SPRN_EPLC /* will already have correct ELPID and EGS */ PPC_STL r15, VCPU_GPR(R15)(r4) PPC_STL r16, VCPU_GPR(R16)(r4) PPC_STL r17, VCPU_GPR(R17)(r4) PPC_STL r18, VCPU_GPR(R18)(r4) PPC_STL r19, VCPU_GPR(R19)(r4) - mr r8, r3 PPC_STL r20, VCPU_GPR(R20)(r4) - rlwimi r8, r6, EPC_EAS_SHIFT - MSR_IR_LG, EPC_EAS PPC_STL r21, VCPU_GPR(R21)(r4) - rlwimi r8, r6, EPC_EPR_SHIFT - MSR_PR_LG, EPC_EPR PPC_STL r22, VCPU_GPR(R22)(r4) - rlwimi r8, r10, EPC_EPID_SHIFT, EPC_EPID PPC_STL r23, VCPU_GPR(R23)(r4) PPC_STL r24, VCPU_GPR(R24)(r4) PPC_STL r25, VCPU_GPR(R25)(r4) @@ -152,34 +137,15 @@ PPC_STL r29, VCPU_GPR(R29)(r4) PPC_STL r30, VCPU_GPR(R30)(r4) PPC_STL r31, VCPU_GPR(R31)(r4) - mtspr SPRN_EPLC, r8 - - /* disable preemption, so we are sure we hit the fixup handler */ - CURRENT_THREAD_INFO(r8, r1) - li r7, 1 - stw r7, TI_PREEMPT(r8) - - isync /* - * In case the read goes wrong, we catch it and write an invalid value - * in LAST_INST instead. + * We don't use external PID support. lwepx faults would need to be + * handled by KVM and this implies aditional code in DO_KVM (for + * DTB_MISS, DSI and LRAT) to check ESR[EPID] and EPLC[EGS] which + * is too intrusive for the host. Get last instuction in + * kvmppc_handle_exit(). */ -1: lwepx r9, 0, r5 -2: -.section .fixup, "ax" -3: li r9, KVM_INST_FETCH_FAILED - li r14, BOOKE_INTERRUPT_ITLB_MISS - b 2b -.previous -.section __ex_table,"a" - PPC_LONG_ALIGN - PPC_LONG 1b,3b -.previous - - mtspr SPRN_EPLC, r3 - li r7, 0 - stw r7, TI_PREEMPT(r8) + li r9, KVM_INST_FETCH_FAILED stw r9, VCPU_LAST_INST(r4) .endif diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c index ce6b73c..c82a89f 100644 --- a/arch/powerpc/kvm/e500.c +++ b/arch/powerpc/kvm/e500.c @@ -439,6 +439,10 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, return r; } +void kvmppc_prepare_for_emulation(struct kvm_vcpu *vcpu, unsigned int *exit_nr) +{ +} + struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) { struct kvmppc_vcpu_e500 *vcpu_e500; diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c index 86380be..aad8b04 100644 --- a/arch/powerpc/kvm/e500mc.c +++ b/arch/powerpc/kvm/e500mc.c @@ -288,6 +288,72 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, return r; } +void kvmppc_prepare_for_emulation(struct kvm_vcpu *vcpu, unsigned int *exit_nr) +{ + gva_t geaddr; + hpa_t addr; + u64 mas7_mas3; + hva_t eaddr; + u32 mas1, mas3; + struct page *page; + unsigned int addr_space, psize_shift; + bool pr; + + if ((*exit_nr != BOOKE_INTERRUPT_DATA_STORAGE) && + (*exit_nr != BOOKE_INTERRUPT_DTLB_MISS) && + (*exit_nr != BOOKE_INTERRUPT_HV_PRIV)) + return; + + /* Search guest translation to find the real addressss */ + geaddr = vcpu->arch.pc; + addr_space = (vcpu->arch.shared->msr & MSR_IS) >> MSR_IR_LG; + mtspr(SPRN_MAS6, (vcpu->arch.pid << MAS6_SPID_SHIFT) | addr_space); + mtspr(SPRN_MAS5, MAS5_SGS | vcpu->arch.lpid); + isync(); + asm volatile("tlbsx 0, %[geaddr]\n" : : [geaddr] "r" (geaddr)); + mtspr(SPRN_MAS5, 0); + mtspr(SPRN_MAS8, 0); + + mas1 = mfspr(SPRN_MAS1); + if (!(mas1 & MAS1_VALID)) { + /* + * There is no translation for the emulated instruction. + * Simulate an instruction TLB miss. This should force the host + * or ultimately the guest to add the translation and then + * reexecute the instruction. + */ + *exit_nr = BOOKE_INTERRUPT_ITLB_MISS; + return; + } + + /* + * TODO: check permissions and return a DSI if execute permission + * is missing + */ + mas3 = mfspr(SPRN_MAS3); + pr = vcpu->arch.shared->msr & MSR_PR; + if ((pr && (!(mas3 & MAS3_UX))) || ((!pr) && (!(mas3 & MAS3_SX)))) + WARN_ON_ONCE(1); + + /* Get page size */ + if (MAS0_GET_TLBSEL(mfspr(SPRN_MAS0)) == 0) + psize_shift = PAGE_SHIFT; + else + psize_shift = MAS1_GET_TSIZE(mas1) + 10; + + mas7_mas3 = (((u64) mfspr(SPRN_MAS7)) << 32) | + mfspr(SPRN_MAS3); + addr = (mas7_mas3 & (~0ULL << psize_shift)) | + (geaddr & ((1ULL << psize_shift) - 1ULL)); + + /* Map a page and get guest's instruction */ + page = pfn_to_page(addr >> PAGE_SHIFT); + eaddr = (unsigned long)kmap_atomic(page); + eaddr |= addr & ~PAGE_MASK; + vcpu->arch.last_inst = *(u32 *)eaddr; + kunmap_atomic((u32 *)eaddr); +} + struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) { struct kvmppc_vcpu_e500 *vcpu_e500; |