summaryrefslogtreecommitdiff
path: root/arch/powerpc/kvm/e500mc.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm/e500mc.c')
-rw-r--r--arch/powerpc/kvm/e500mc.c121
1 files changed, 106 insertions, 15 deletions
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index 19c8379..10f8b20 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -22,6 +22,7 @@
#include <asm/tlbflush.h>
#include <asm/kvm_ppc.h>
#include <asm/dbell.h>
+#include <asm/cputhreads.h>
#include "booke.h"
#include "e500.h"
@@ -46,10 +47,11 @@ void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type)
return;
}
-
- tag = PPC_DBELL_LPID(vcpu->kvm->arch.lpid) | vcpu->vcpu_id;
+ preempt_disable();
+ tag = PPC_DBELL_LPID(vcpu->arch.lpid) | vcpu->vcpu_id;
mb();
ppc_msgsnd(dbell_type, 0, tag);
+ preempt_enable();
}
/* gtlbe must not be mapped by more than one host tlb entry */
@@ -58,12 +60,11 @@ void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
{
unsigned int tid, ts;
gva_t eaddr;
- u32 val, lpid;
+ u32 val;
unsigned long flags;
ts = get_tlb_ts(gtlbe);
tid = get_tlb_tid(gtlbe);
- lpid = vcpu_e500->vcpu.kvm->arch.lpid;
/* We search the host TLB to invalidate its shadow TLB entry */
val = (tid << 16) | ts;
@@ -72,7 +73,7 @@ void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
local_irq_save(flags);
mtspr(SPRN_MAS6, val);
- mtspr(SPRN_MAS5, MAS5_SGS | lpid);
+ mtspr(SPRN_MAS5, MAS5_SGS | vcpu_e500->vcpu.arch.lpid);
asm volatile("tlbsx 0, %[eaddr]\n" : : [eaddr] "r" (eaddr));
val = mfspr(SPRN_MAS1);
@@ -93,7 +94,7 @@ void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500)
unsigned long flags;
local_irq_save(flags);
- mtspr(SPRN_MAS5, MAS5_SGS | vcpu_e500->vcpu.kvm->arch.lpid);
+ mtspr(SPRN_MAS5, MAS5_SGS | vcpu_e500->vcpu.arch.lpid);
asm volatile("tlbilxlpid");
mtspr(SPRN_MAS5, 0);
local_irq_restore(flags);
@@ -113,10 +114,22 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu_on_cpu);
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+ int lpid_idx = 0;
kvmppc_booke_vcpu_load(vcpu, cpu);
- mtspr(SPRN_LPID, vcpu->kvm->arch.lpid);
+ /* Get current core's thread index */
+ lpid_idx = mfspr(SPRN_PIR) % threads_per_core;
+
+ vcpu->arch.lpid = vcpu->kvm->arch.lpid[lpid_idx];
+ vcpu->arch.eplc = EPC_EGS | (vcpu->arch.lpid << EPC_ELPID_SHIFT);
+ vcpu->arch.epsc = vcpu->arch.eplc;
+
+ if (vcpu->arch.oldpir != mfspr(SPRN_PIR))
+ pr_debug("vcpu 0x%p loaded on PID %d, lpid %d\n",
+ vcpu, smp_processor_id(), (int)vcpu->arch.lpid);
+
+ mtspr(SPRN_LPID, vcpu->arch.lpid);
mtspr(SPRN_EPCR, vcpu->arch.shadow_epcr);
mtspr(SPRN_GPIR, vcpu->vcpu_id);
mtspr(SPRN_MSRP, vcpu->arch.shadow_msrp);
@@ -145,6 +158,7 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
}
kvmppc_load_guest_fp(vcpu);
+ kvmppc_load_guest_altivec(vcpu);
}
void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
@@ -177,6 +191,8 @@ int kvmppc_core_check_processor_compat(void)
r = 0;
else if (strcmp(cur_cpu_spec->cpu_name, "e5500") == 0)
r = 0;
+ else if (strcmp(cur_cpu_spec->cpu_name, "e6500") == 0)
+ r = 0;
else
r = -ENOTSUPP;
@@ -193,8 +209,6 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu->arch.shadow_epcr |= SPRN_EPCR_ICM;
#endif
vcpu->arch.shadow_msrp = MSRP_UCLEP | MSRP_DEP | MSRP_PMMP;
- vcpu->arch.eplc = EPC_EGS | (vcpu->kvm->arch.lpid << EPC_ELPID_SHIFT);
- vcpu->arch.epsc = vcpu->arch.eplc;
vcpu->arch.pvr = mfspr(SPRN_PVR);
vcpu_e500->svr = mfspr(SPRN_SVR);
@@ -274,6 +288,72 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
return r;
}
+void kvmppc_prepare_for_emulation(struct kvm_vcpu *vcpu, unsigned int *exit_nr)
+{
+ gva_t geaddr;
+ hpa_t addr;
+ u64 mas7_mas3;
+ hva_t eaddr;
+ u32 mas1, mas3;
+ struct page *page;
+ unsigned int addr_space, psize_shift;
+ bool pr;
+
+ if ((*exit_nr != BOOKE_INTERRUPT_DATA_STORAGE) &&
+ (*exit_nr != BOOKE_INTERRUPT_DTLB_MISS) &&
+ (*exit_nr != BOOKE_INTERRUPT_HV_PRIV))
+ return;
+
+ /* Search guest translation to find the real addressss */
+ geaddr = vcpu->arch.pc;
+ addr_space = (vcpu->arch.shared->msr & MSR_IS) >> MSR_IR_LG;
+ mtspr(SPRN_MAS6, (vcpu->arch.pid << MAS6_SPID_SHIFT) | addr_space);
+ mtspr(SPRN_MAS5, MAS5_SGS | vcpu->arch.lpid);
+ isync();
+ asm volatile("tlbsx 0, %[geaddr]\n" : : [geaddr] "r" (geaddr));
+ mtspr(SPRN_MAS5, 0);
+ mtspr(SPRN_MAS8, 0);
+
+ mas1 = mfspr(SPRN_MAS1);
+ if (!(mas1 & MAS1_VALID)) {
+ /*
+ * There is no translation for the emulated instruction.
+ * Simulate an instruction TLB miss. This should force the host
+ * or ultimately the guest to add the translation and then
+ * reexecute the instruction.
+ */
+ *exit_nr = BOOKE_INTERRUPT_ITLB_MISS;
+ return;
+ }
+
+ /*
+ * TODO: check permissions and return a DSI if execute permission
+ * is missing
+ */
+ mas3 = mfspr(SPRN_MAS3);
+ pr = vcpu->arch.shared->msr & MSR_PR;
+ if ((pr && (!(mas3 & MAS3_UX))) || ((!pr) && (!(mas3 & MAS3_SX))))
+ WARN_ON_ONCE(1);
+
+ /* Get page size */
+ if (MAS0_GET_TLBSEL(mfspr(SPRN_MAS0)) == 0)
+ psize_shift = PAGE_SHIFT;
+ else
+ psize_shift = MAS1_GET_TSIZE(mas1) + 10;
+
+ mas7_mas3 = (((u64) mfspr(SPRN_MAS7)) << 32) |
+ mfspr(SPRN_MAS3);
+ addr = (mas7_mas3 & (~0ULL << psize_shift)) |
+ (geaddr & ((1ULL << psize_shift) - 1ULL));
+
+ /* Map a page and get guest's instruction */
+ page = pfn_to_page(addr >> PAGE_SHIFT);
+ eaddr = (unsigned long)kmap_atomic(page);
+ eaddr |= addr & ~PAGE_MASK;
+ vcpu->arch.last_inst = *(u32 *)eaddr;
+ kunmap_atomic((u32 *)eaddr);
+}
+
struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
{
struct kvmppc_vcpu_e500 *vcpu_e500;
@@ -327,19 +407,30 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
int kvmppc_core_init_vm(struct kvm *kvm)
{
- int lpid;
+ int i, lpid;
- lpid = kvmppc_alloc_lpid();
- if (lpid < 0)
- return lpid;
+ if (threads_per_core > 2)
+ return -ENOMEM;
+
+ /* Each VM allocates one LPID per HW thread index */
+ for(i = 0; i < threads_per_core; i++) {
+ lpid = kvmppc_alloc_lpid();
+ if (lpid < 0)
+ return lpid;
+
+ kvm->arch.lpid[i] = lpid;
+ }
- kvm->arch.lpid = lpid;
return 0;
}
void kvmppc_core_destroy_vm(struct kvm *kvm)
{
- kvmppc_free_lpid(kvm->arch.lpid);
+ int i;
+
+ for(i = 0; i < threads_per_core; i++) {
+ kvmppc_free_lpid(kvm->arch.lpid[i]);
+ }
}
static int __init kvmppc_e500mc_init(void)