summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorMihai Caraman <mihai.caraman@freescale.com>2014-04-25 19:28:16 (GMT)
committerJose Rivera <German.Rivera@freescale.com>2014-04-30 02:04:48 (GMT)
commitdf633166bd6defede969fc0ef3bb1e5fdd184077 (patch)
tree26b90b555036a0260db0ebe941ff8847667f32b7 /arch
parent5f3005e9f6cbd4d01597407aa8a7aa5a2f5a381c (diff)
downloadlinux-fsl-qoriq-df633166bd6defede969fc0ef3bb1e5fdd184077.tar.xz
KVM: PPC: Book3E: Handle LRAT error exception
Handle LRAT error exception with support for lrat mapping/invalidation and prepare for emulation. Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com> (cherry picked and merged from sdk1.5 commit fd2bba8b66657eb452c152d6cf0b1cd4320df204) Change-Id: Ifa9a9ce3a9aacab53f6184b2aff00edde1acf47d Reviewed-on: http://git.am.freescale.net:8181/11560 Tested-by: Review Code-CDREVIEW <CDREVIEW@freescale.com> Reviewed-by: Stuart Yoder <stuart.yoder@freescale.com> Reviewed-by: Jose Rivera <German.Rivera@freescale.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/kvm_host.h1
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h2
-rw-r--r--arch/powerpc/include/asm/mmu-book3e.h3
-rw-r--r--arch/powerpc/include/asm/reg_booke.h13
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kvm/booke.c41
-rw-r--r--arch/powerpc/kvm/bookehv_interrupts.S9
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c128
-rw-r--r--arch/powerpc/kvm/e500mc.c6
9 files changed, 202 insertions, 2 deletions
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 79ea6b1..b569c68 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -428,6 +428,7 @@ struct kvm_vcpu_arch {
u32 epsc;
u32 oldpir;
u32 lpid;
+ u64 fault_lper;
#endif
#if defined(CONFIG_BOOKE)
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index cefd3ed..e8b3982 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -85,6 +85,8 @@ extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
gva_t eaddr);
extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
+extern void kvmppc_lrat_map(struct kvm_vcpu *vcpu, gfn_t gfn);
+extern void kvmppc_lrat_invalidate(struct kvm_vcpu *vcpu);
extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
unsigned int id);
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index 5b57d1c..3f079ab 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -40,6 +40,8 @@
/* MAS registers bit definitions */
+#define MAS0_ATSEL 0x80000000
+#define MAS0_ATSEL_SHIFT 31
#define MAS0_TLBSEL_MASK 0x30000000
#define MAS0_TLBSEL_SHIFT 28
#define MAS0_TLBSEL(x) (((x) << MAS0_TLBSEL_SHIFT) & MAS0_TLBSEL_MASK)
@@ -54,6 +56,7 @@
#define MAS0_WQ_CLR_RSRV 0x00002000
#define MAS1_VALID 0x80000000
+#define MAS1_VALID_SHIFT 31
#define MAS1_IPROT 0x40000000
#define MAS1_TID(x) (((x) << 16) & 0x3FFF0000)
#define MAS1_IND 0x00002000
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 0e37079..189d9db 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -52,6 +52,8 @@
/* Special Purpose Registers (SPRNs)*/
#define SPRN_DECAR 0x036 /* Decrementer Auto Reload Register */
+#define SPRN_LPER 0x038 /* Logical Page Exception Register */
+#define SPRN_LPERU 0x039 /* Logical Page Exception Register Upper */
#define SPRN_IVPR 0x03F /* Interrupt Vector Prefix Register */
#define SPRN_USPRG0 0x100 /* User Special Purpose Register General 0 */
#define SPRN_SPRG3R 0x103 /* Special Purpose Register General 3 Read */
@@ -367,6 +369,9 @@
#define ESR_ILK 0x00100000 /* Instr. Cache Locking */
#define ESR_PUO 0x00040000 /* Unimplemented Operation exception */
#define ESR_BO 0x00020000 /* Byte Ordering */
+#define ESR_DATA 0x00000400 /* Page Table Data Access */
+#define ESR_TLBI 0x00000200 /* Page Table TLB Ineligible */
+#define ESR_PT 0x00000100 /* Page Table Translation */
#define ESR_SPV 0x00000080 /* Signal Processing operation */
/* Bit definitions related to the DBCR0. */
@@ -664,6 +669,14 @@
#define EPC_EPID 0x00003fff
#define EPC_EPID_SHIFT 0
+/* Bit definitions for LPER */
+#define LPER_ALPN 0x000FFFFFFFFFF000ULL
+#define LPER_ALPN_SHIFT 12
+#define LPER_WIMGE 0x00000F80
+#define LPER_WIMGE_SHIFT 7
+#define LPER_LPS 0x0000000F
+#define LPER_LPS_SHIFT 0
+
/*
* The IBM-403 is an even more odd special case, as it is much
* older than the IBM-405 series. We put these down here incase someone
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index d6bb592..ae8f94e 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -657,6 +657,7 @@ int main(void)
DEFINE(VCPU_HOST_MAS4, offsetof(struct kvm_vcpu, arch.host_mas4));
DEFINE(VCPU_HOST_MAS6, offsetof(struct kvm_vcpu, arch.host_mas6));
DEFINE(VCPU_EPLC, offsetof(struct kvm_vcpu, arch.eplc));
+ DEFINE(VCPU_FAULT_LPER, offsetof(struct kvm_vcpu, arch.fault_lper));
#endif
#ifdef CONFIG_KVM_EXIT_TIMING
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 12c6706..5db43a8 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -1249,6 +1249,47 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
}
+#ifdef CONFIG_KVM_BOOKE_HV
+ case BOOKE_INTERRUPT_LRAT_ERROR:
+ {
+ gfn_t gfn;
+
+ /*
+ * Guest TLB management instructions (EPCR.DGTMI == 0) is not
+ * supported for now
+ */
+ if (!(vcpu->arch.fault_esr & ESR_PT)) {
+ WARN(1, "%s: Guest TLB management instructions\n"
+ "not supported!\n", __func__);
+ break;
+ }
+
+ gfn = (vcpu->arch.fault_lper & LPER_ALPN) >> LPER_ALPN_SHIFT;
+
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+
+ if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
+ kvmppc_lrat_map(vcpu, gfn);
+ r = RESUME_GUEST;
+ } else if (vcpu->arch.fault_esr & ESR_DATA) {
+ vcpu->arch.paddr_accessed = (gfn << PAGE_SHIFT)
+ | (vcpu->arch.fault_dear & (PAGE_SIZE - 1));
+ vcpu->arch.vaddr_accessed =
+ vcpu->arch.fault_dear;
+
+ r = kvmppc_emulate_mmio(run, vcpu);
+ kvmppc_account_exit(vcpu, MMIO_EXITS);
+ } else {
+ kvmppc_booke_queue_irqprio(vcpu,
+ BOOKE_IRQPRIO_MACHINE_CHECK);
+ r = RESUME_GUEST;
+ }
+
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ break;
+ }
+#endif
+
case BOOKE_INTERRUPT_DEBUG: {
r = kvmppc_handle_debug(run, vcpu);
if (r == RESUME_HOST)
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
index a6a893f..891b32f 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -62,6 +62,7 @@
#define NEED_EMU 0x00000001 /* emulation -- save nv regs */
#define NEED_DEAR 0x00000002 /* save faulting DEAR */
#define NEED_ESR 0x00000004 /* save faulting ESR */
+#define NEED_LPER 0x00000008 /* save faulting LPER */
/*
* On entry:
@@ -159,6 +160,12 @@
PPC_STL r9, VCPU_FAULT_DEAR(r4)
.endif
+ /* Only suppported on 64-bit cores for now */
+ .if \flags & NEED_LPER
+ mfspr r7, SPRN_LPER
+ std r7, VCPU_FAULT_LPER(r4)
+ .endif
+
b kvmppc_resume_host
.endm
@@ -281,7 +288,7 @@ kvm_handler BOOKE_INTERRUPT_DEBUG, EX_PARAMS(DBG), \
kvm_handler BOOKE_INTERRUPT_DEBUG, EX_PARAMS(CRIT), \
SPRN_CSRR0, SPRN_CSRR1, 0
kvm_handler BOOKE_INTERRUPT_LRAT_ERROR, EX_PARAMS(GEN), \
- SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR)
+ SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR | NEED_LPER)
#else
/*
* For input register values, see arch/powerpc/include/asm/kvm_booke_hv_asm.h
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index e9386c7..af673d9 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -30,6 +30,7 @@
#include <linux/vmalloc.h>
#include <linux/hugetlb.h>
#include <asm/kvm_ppc.h>
+#include <linux/list.h>
#include "e500.h"
#include "trace.h"
@@ -106,6 +107,133 @@ static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
stlbe->mas2, stlbe->mas7_3);
}
+#ifdef CONFIG_KVM_BOOKE_HV
+#ifdef CONFIG_64BIT
+static inline int lrat_next(void)
+{
+ int this, next;
+
+ this = local_paca->tlb_per_core.lrat_next;
+ next = (this + 1) % local_paca->tlb_per_core.lrat_max;
+ local_paca->tlb_per_core.lrat_next = next;
+
+ return this;
+}
+
+static inline int lrat_size(void)
+{
+ return local_paca->tlb_per_core.lrat_max;
+}
+#else
+/* LRAT is only supported in 64-bit kernel for now */
+static inline int lrat_next(void)
+{
+ BUG();
+}
+
+static inline int lrat_size(void)
+{
+ return 0;
+}
+#endif
+
+void write_host_lrate(int tsize, gfn_t gfn, unsigned long pfn, uint32_t *lpid,
+ int valid, int lrat_entry)
+{
+ struct kvm_book3e_206_tlb_entry stlbe;
+ int esel = lrat_entry;
+ unsigned long flags;
+
+ stlbe.mas1 = (valid ? MAS1_VALID : 0) | MAS1_TSIZE(tsize);
+ stlbe.mas2 = ((u64)gfn << PAGE_SHIFT);
+ stlbe.mas7_3 = ((u64)pfn << PAGE_SHIFT);
+ stlbe.mas8 = 0;
+
+ local_irq_save(flags);
+ book3e_tlb_lock();
+
+ if (esel == -1)
+ esel = lrat_next();
+ __write_host_tlbe(&stlbe, MAS0_ATSEL | MAS0_ESEL(esel), lpid);
+
+ book3e_tlb_unlock();
+ local_irq_restore(flags);
+}
+
+void kvmppc_lrat_map(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+ struct kvm_memory_slot *slot;
+ unsigned long pfn;
+ unsigned long hva;
+ struct vm_area_struct *vma;
+ unsigned long psize;
+ int tsize;
+ unsigned long tsize_pages;
+ uint32_t *lpid = &vcpu->arch.lpid;
+
+ slot = gfn_to_memslot(vcpu->kvm, gfn);
+ if (!slot) {
+ pr_err_ratelimited("%s: couldn't find memslot for gfn %lx!\n",
+ __func__, (long)gfn);
+ return;
+ }
+
+ hva = slot->userspace_addr;
+
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma(current->mm, hva);
+ if (vma && (hva >= vma->vm_start)) {
+ psize = vma_kernel_pagesize(vma);
+ } else {
+ pr_err_ratelimited("%s: couldn't find virtual memory address\n"
+ "for gfn %lx!\n", __func__, (long)gfn);
+ return;
+ }
+ up_read(&current->mm->mmap_sem);
+
+ pfn = gfn_to_pfn_memslot(slot, gfn);
+ if (is_error_noslot_pfn(pfn)) {
+ pr_err_ratelimited("%s: couldn't get real page for gfn %lx!\n",
+ __func__, (long)gfn);
+ return;
+ }
+
+ tsize = __ilog2(psize) - 10;
+ tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT);
+ gfn &= ~(tsize_pages - 1);
+ pfn &= ~(tsize_pages - 1);
+
+ write_host_lrate(tsize, gfn, pfn, lpid, 1, -1);
+ kvm_release_pfn_clean(pfn);
+}
+
+void kvmppc_lrat_invalidate(struct kvm_vcpu *vcpu)
+{
+ uint32_t mas0, mas1 = 0;
+ int esel;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ book3e_tlb_lock();
+
+ /* LRAT does not have a dedicated instruction for invalidation */
+ for (esel = 0; esel < lrat_size(); esel++) {
+ mas0 = MAS0_ATSEL | MAS0_ESEL(esel);
+ mtspr(SPRN_MAS0, mas0);
+ asm volatile("isync; tlbre" : : : "memory");
+ mas1 = mfspr(SPRN_MAS1) & ~MAS1_VALID;
+ mtspr(SPRN_MAS1, mas1);
+ asm volatile("isync; tlbwe" : : : "memory");
+ }
+ /* Must clear mas8 for other host tlbwe's */
+ mtspr(SPRN_MAS8, 0);
+ isync();
+
+ book3e_tlb_unlock();
+ local_irq_restore(flags);
+}
+#endif
+
/*
* Acquire a mas0 with victim hint, as if we just took a TLB miss.
*
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index 459dc87..326314e 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -98,6 +98,8 @@ void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500)
asm volatile("tlbilxlpid");
mtspr(SPRN_MAS5, 0);
local_irq_restore(flags);
+
+ kvmppc_lrat_invalidate(&vcpu_e500->vcpu);
}
void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
@@ -301,7 +303,9 @@ void kvmppc_prepare_for_emulation(struct kvm_vcpu *vcpu, unsigned int *exit_nr)
if ((*exit_nr != BOOKE_INTERRUPT_DATA_STORAGE) &&
(*exit_nr != BOOKE_INTERRUPT_DTLB_MISS) &&
- (*exit_nr != BOOKE_INTERRUPT_HV_PRIV))
+ (*exit_nr != BOOKE_INTERRUPT_HV_PRIV) &&
+ ((*exit_nr != BOOKE_INTERRUPT_LRAT_ERROR) ||
+ (!(vcpu->arch.fault_esr & ESR_DATA))))
return;
/* Search guest translation to find the real addressss */