summaryrefslogtreecommitdiff
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2014-04-08 01:00:49 (GMT)
committerScott Wood <scottwood@freescale.com>2014-04-08 19:58:35 (GMT)
commit47d2261a3fa71cde24263559a4219a25e50d8c89 (patch)
tree28774d5b330ccf1b777a3af222d8356918328013 /arch/powerpc/kvm
parentfb7f27080adc65cd5f341bdf56a1d0c14f316c1b (diff)
parent5fb9d37f27351e42f002e372074249f92cbdf815 (diff)
downloadlinux-fsl-qoriq-47d2261a3fa71cde24263559a4219a25e50d8c89.tar.xz
Merge branch 'merge' into sdk-v1.6.x
This reverts v3.13-rc3+ (78fd82238d0e5716) to v3.12, except for commits which I noticed which appear relevant to the SDK. Signed-off-by: Scott Wood <scottwood@freescale.com> Conflicts: arch/powerpc/include/asm/kvm_host.h arch/powerpc/kvm/book3s_hv_rmhandlers.S arch/powerpc/kvm/book3s_interrupts.S arch/powerpc/kvm/e500.c arch/powerpc/kvm/e500mc.c arch/powerpc/sysdev/fsl_soc.h drivers/Kconfig drivers/cpufreq/ppc-corenet-cpufreq.c drivers/dma/fsldma.c drivers/dma/s3c24xx-dma.c drivers/misc/Makefile drivers/mmc/host/sdhci-of-esdhc.c drivers/mtd/devices/m25p80.c drivers/net/ethernet/freescale/gianfar.h drivers/platform/Kconfig drivers/platform/Makefile drivers/spi/spi-fsl-espi.c include/crypto/algapi.h include/linux/netdev_features.h include/linux/skbuff.h include/net/ip.h net/core/ethtool.c
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/44x.c58
-rw-r--r--arch/powerpc/kvm/44x_emulate.c8
-rw-r--r--arch/powerpc/kvm/44x_tlb.c2
-rw-r--r--arch/powerpc/kvm/Kconfig29
-rw-r--r--arch/powerpc/kvm/Makefile29
-rw-r--r--arch/powerpc/kvm/book3s.c257
-rw-r--r--arch/powerpc/kvm/book3s.h34
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu.c73
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c16
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c181
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c106
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c24
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c1
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c18
-rw-r--r--arch/powerpc/kvm/book3s_exports.c5
-rw-r--r--arch/powerpc/kvm/book3s_hv.c389
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S3
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S618
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S32
-rw-r--r--arch/powerpc/kvm/book3s_mmu_hpte.c66
-rw-r--r--arch/powerpc/kvm/book3s_pr.c534
-rw-r--r--arch/powerpc/kvm/book3s_pr_papr.c52
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S32
-rw-r--r--arch/powerpc/kvm/book3s_rtas.c1
-rw-r--r--arch/powerpc/kvm/book3s_segment.S4
-rw-r--r--arch/powerpc/kvm/book3s_xics.c7
-rw-r--r--arch/powerpc/kvm/booke.c80
-rw-r--r--arch/powerpc/kvm/booke.h24
-rw-r--r--arch/powerpc/kvm/e500.c60
-rw-r--r--arch/powerpc/kvm/e500_emulate.c8
-rw-r--r--arch/powerpc/kvm/e500_mmu.c4
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c3
-rw-r--r--arch/powerpc/kvm/e500mc.c59
-rw-r--r--arch/powerpc/kvm/emulate.c12
-rw-r--r--arch/powerpc/kvm/powerpc.c171
-rw-r--r--arch/powerpc/kvm/trace.h429
-rw-r--r--arch/powerpc/kvm/trace_booke.h177
-rw-r--r--arch/powerpc/kvm/trace_pr.h297
38 files changed, 1296 insertions, 2607 deletions
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c
index 93221e8..2f5c6b6 100644
--- a/arch/powerpc/kvm/44x.c
+++ b/arch/powerpc/kvm/44x.c
@@ -31,13 +31,13 @@
#include "44x_tlb.h"
#include "booke.h"
-static void kvmppc_core_vcpu_load_44x(struct kvm_vcpu *vcpu, int cpu)
+void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
kvmppc_booke_vcpu_load(vcpu, cpu);
kvmppc_44x_tlb_load(vcpu);
}
-static void kvmppc_core_vcpu_put_44x(struct kvm_vcpu *vcpu)
+void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
{
kvmppc_44x_tlb_put(vcpu);
kvmppc_booke_vcpu_put(vcpu);
@@ -114,32 +114,29 @@ int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
return 0;
}
-static int kvmppc_core_get_sregs_44x(struct kvm_vcpu *vcpu,
- struct kvm_sregs *sregs)
+void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
{
- return kvmppc_get_sregs_ivor(vcpu, sregs);
+ kvmppc_get_sregs_ivor(vcpu, sregs);
}
-static int kvmppc_core_set_sregs_44x(struct kvm_vcpu *vcpu,
- struct kvm_sregs *sregs)
+int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
{
return kvmppc_set_sregs_ivor(vcpu, sregs);
}
-static int kvmppc_get_one_reg_44x(struct kvm_vcpu *vcpu, u64 id,
- union kvmppc_one_reg *val)
+int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
+ union kvmppc_one_reg *val)
{
return -EINVAL;
}
-static int kvmppc_set_one_reg_44x(struct kvm_vcpu *vcpu, u64 id,
- union kvmppc_one_reg *val)
+int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
+ union kvmppc_one_reg *val)
{
return -EINVAL;
}
-static struct kvm_vcpu *kvmppc_core_vcpu_create_44x(struct kvm *kvm,
- unsigned int id)
+struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
{
struct kvmppc_vcpu_44x *vcpu_44x;
struct kvm_vcpu *vcpu;
@@ -170,7 +167,7 @@ out:
return ERR_PTR(err);
}
-static void kvmppc_core_vcpu_free_44x(struct kvm_vcpu *vcpu)
+void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
@@ -179,53 +176,28 @@ static void kvmppc_core_vcpu_free_44x(struct kvm_vcpu *vcpu)
kmem_cache_free(kvm_vcpu_cache, vcpu_44x);
}
-static int kvmppc_core_init_vm_44x(struct kvm *kvm)
+int kvmppc_core_init_vm(struct kvm *kvm)
{
return 0;
}
-static void kvmppc_core_destroy_vm_44x(struct kvm *kvm)
+void kvmppc_core_destroy_vm(struct kvm *kvm)
{
}
-static struct kvmppc_ops kvm_ops_44x = {
- .get_sregs = kvmppc_core_get_sregs_44x,
- .set_sregs = kvmppc_core_set_sregs_44x,
- .get_one_reg = kvmppc_get_one_reg_44x,
- .set_one_reg = kvmppc_set_one_reg_44x,
- .vcpu_load = kvmppc_core_vcpu_load_44x,
- .vcpu_put = kvmppc_core_vcpu_put_44x,
- .vcpu_create = kvmppc_core_vcpu_create_44x,
- .vcpu_free = kvmppc_core_vcpu_free_44x,
- .mmu_destroy = kvmppc_mmu_destroy_44x,
- .init_vm = kvmppc_core_init_vm_44x,
- .destroy_vm = kvmppc_core_destroy_vm_44x,
- .emulate_op = kvmppc_core_emulate_op_44x,
- .emulate_mtspr = kvmppc_core_emulate_mtspr_44x,
- .emulate_mfspr = kvmppc_core_emulate_mfspr_44x,
-};
-
static int __init kvmppc_44x_init(void)
{
int r;
r = kvmppc_booke_init();
if (r)
- goto err_out;
-
- r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), 0, THIS_MODULE);
- if (r)
- goto err_out;
- kvm_ops_44x.owner = THIS_MODULE;
- kvmppc_pr_ops = &kvm_ops_44x;
+ return r;
-err_out:
- return r;
+ return kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), 0, THIS_MODULE);
}
static void __exit kvmppc_44x_exit(void)
{
- kvmppc_pr_ops = NULL;
kvmppc_booke_exit();
}
diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c
index 92c9ab4..35ec0a8 100644
--- a/arch/powerpc/kvm/44x_emulate.c
+++ b/arch/powerpc/kvm/44x_emulate.c
@@ -91,8 +91,8 @@ static int emulate_mfdcr(struct kvm_vcpu *vcpu, int rt, int dcrn)
return EMULATE_DONE;
}
-int kvmppc_core_emulate_op_44x(struct kvm_run *run, struct kvm_vcpu *vcpu,
- unsigned int inst, int *advance)
+int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int inst, int *advance)
{
int emulated = EMULATE_DONE;
int dcrn = get_dcrn(inst);
@@ -152,7 +152,7 @@ int kvmppc_core_emulate_op_44x(struct kvm_run *run, struct kvm_vcpu *vcpu,
return emulated;
}
-int kvmppc_core_emulate_mtspr_44x(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
+int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
{
int emulated = EMULATE_DONE;
@@ -172,7 +172,7 @@ int kvmppc_core_emulate_mtspr_44x(struct kvm_vcpu *vcpu, int sprn, ulong spr_val
return emulated;
}
-int kvmppc_core_emulate_mfspr_44x(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
+int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
{
int emulated = EMULATE_DONE;
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index 0deef10..ed03854 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -268,7 +268,7 @@ static void kvmppc_44x_shadow_release(struct kvmppc_vcpu_44x *vcpu_44x,
trace_kvm_stlb_inval(stlb_index);
}
-void kvmppc_mmu_destroy_44x(struct kvm_vcpu *vcpu)
+void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
int i;
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 141b202..ffaef2c 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -6,7 +6,6 @@ source "virt/kvm/Kconfig"
menuconfig VIRTUALIZATION
bool "Virtualization"
- depends on !CPU_LITTLE_ENDIAN
---help---
Say Y here to get to see options for using your Linux host to run
other operating systems inside virtual machines (guests).
@@ -35,20 +34,17 @@ config KVM_BOOK3S_64_HANDLER
bool
select KVM_BOOK3S_HANDLER
-config KVM_BOOK3S_PR_POSSIBLE
+config KVM_BOOK3S_PR
bool
select KVM_MMIO
select MMU_NOTIFIER
-config KVM_BOOK3S_HV_POSSIBLE
- bool
-
config KVM_BOOK3S_32
tristate "KVM support for PowerPC book3s_32 processors"
depends on PPC_BOOK3S_32 && !SMP && !PTE_64BIT
select KVM
select KVM_BOOK3S_32_HANDLER
- select KVM_BOOK3S_PR_POSSIBLE
+ select KVM_BOOK3S_PR
---help---
Support running unmodified book3s_32 guest kernels
in virtual machines on book3s_32 host processors.
@@ -63,7 +59,6 @@ config KVM_BOOK3S_64
depends on PPC_BOOK3S_64
select KVM_BOOK3S_64_HANDLER
select KVM
- select KVM_BOOK3S_PR_POSSIBLE if !KVM_BOOK3S_HV_POSSIBLE
---help---
Support running unmodified book3s_64 and book3s_32 guest kernels
in virtual machines on book3s_64 host processors.
@@ -74,9 +69,8 @@ config KVM_BOOK3S_64
If unsure, say N.
config KVM_BOOK3S_64_HV
- tristate "KVM support for POWER7 and PPC970 using hypervisor mode in host"
+ bool "KVM support for POWER7 and PPC970 using hypervisor mode in host"
depends on KVM_BOOK3S_64
- select KVM_BOOK3S_HV_POSSIBLE
select MMU_NOTIFIER
select CMA
---help---
@@ -95,20 +89,9 @@ config KVM_BOOK3S_64_HV
If unsure, say N.
config KVM_BOOK3S_64_PR
- tristate "KVM support without using hypervisor mode in host"
- depends on KVM_BOOK3S_64
- select KVM_BOOK3S_PR_POSSIBLE
- ---help---
- Support running guest kernels in virtual machines on processors
- without using hypervisor mode in the host, by running the
- guest in user mode (problem state) and emulating all
- privileged instructions and registers.
-
- This is not as fast as using hypervisor mode, but works on
- machines where hypervisor mode is not available or not usable,
- and can emulate processors that are different from the host
- processor, including emulating 32-bit processors on a 64-bit
- host.
+ def_bool y
+ depends on KVM_BOOK3S_64 && !KVM_BOOK3S_64_HV
+ select KVM_BOOK3S_PR
config KVM_BOOKE_HV
bool
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index ce569b6..6646c95 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -53,51 +53,41 @@ kvm-e500mc-objs := \
e500_emulate.o
kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs)
-kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) := \
- book3s_64_vio_hv.o
-
-kvm-pr-y := \
+kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \
+ $(KVM)/coalesced_mmio.o \
fpu.o \
book3s_paired_singles.o \
book3s_pr.o \
book3s_pr_papr.o \
+ book3s_64_vio_hv.o \
book3s_emulate.o \
book3s_interrupts.o \
book3s_mmu_hpte.o \
book3s_64_mmu_host.o \
book3s_64_mmu.o \
book3s_32_mmu.o
-
-ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
-kvm-book3s_64-module-objs := \
- $(KVM)/coalesced_mmio.o
-
-kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
+kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \
book3s_rmhandlers.o
-endif
-kvm-hv-y += \
+kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \
book3s_hv.o \
book3s_hv_interrupts.o \
book3s_64_mmu_hv.o
-
kvm-book3s_64-builtin-xics-objs-$(CONFIG_KVM_XICS) := \
book3s_hv_rm_xics.o
-
-ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
-kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
+kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \
book3s_hv_rmhandlers.o \
book3s_hv_rm_mmu.o \
+ book3s_64_vio_hv.o \
book3s_hv_ras.o \
book3s_hv_builtin.o \
book3s_hv_cma.o \
$(kvm-book3s_64-builtin-xics-objs-y)
-endif
kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \
book3s_xics.o
-kvm-book3s_64-module-objs += \
+kvm-book3s_64-module-objs := \
$(KVM)/kvm_main.o \
$(KVM)/eventfd.o \
powerpc.o \
@@ -133,7 +123,4 @@ obj-$(CONFIG_KVM_E500MC) += kvm.o
obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o
obj-$(CONFIG_KVM_BOOK3S_32) += kvm.o
-obj-$(CONFIG_KVM_BOOK3S_64_PR) += kvm-pr.o
-obj-$(CONFIG_KVM_BOOK3S_64_HV) += kvm-hv.o
-
obj-y += $(kvm-book3s_64-builtin-objs-y)
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 8912608..700df6f 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -34,7 +34,6 @@
#include <linux/vmalloc.h>
#include <linux/highmem.h>
-#include "book3s.h"
#include "trace.h"
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
@@ -70,50 +69,6 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
{
}
-static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
-{
- if (!is_kvmppc_hv_enabled(vcpu->kvm))
- return to_book3s(vcpu)->hior;
- return 0;
-}
-
-static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
- unsigned long pending_now, unsigned long old_pending)
-{
- if (is_kvmppc_hv_enabled(vcpu->kvm))
- return;
- if (pending_now)
- vcpu->arch.shared->int_pending = 1;
- else if (old_pending)
- vcpu->arch.shared->int_pending = 0;
-}
-
-static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
-{
- ulong crit_raw;
- ulong crit_r1;
- bool crit;
-
- if (is_kvmppc_hv_enabled(vcpu->kvm))
- return false;
-
- crit_raw = vcpu->arch.shared->critical;
- crit_r1 = kvmppc_get_gpr(vcpu, 1);
-
- /* Truncate crit indicators in 32 bit mode */
- if (!(vcpu->arch.shared->msr & MSR_SF)) {
- crit_raw &= 0xffffffff;
- crit_r1 &= 0xffffffff;
- }
-
- /* Critical section when crit == r1 */
- crit = (crit_raw == crit_r1);
- /* ... and we're in supervisor mode */
- crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
-
- return crit;
-}
-
void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
{
vcpu->arch.shared->srr0 = kvmppc_get_pc(vcpu);
@@ -171,32 +126,28 @@ void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
printk(KERN_INFO "Queueing interrupt %x\n", vec);
#endif
}
-EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio);
+
void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
{
/* might as well deliver this straight away */
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags);
}
-EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);
void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
{
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
}
-EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec);
int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
{
return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
}
-EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec);
void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
{
kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
}
-EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec);
void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
struct kvm_interrupt *irq)
@@ -334,10 +285,8 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
return 0;
}
-EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter);
-pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing,
- bool *writable)
+pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{
ulong mp_pa = vcpu->arch.magic_page_pa;
@@ -353,23 +302,20 @@ pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing,
pfn = (pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
get_page(pfn_to_page(pfn));
- if (writable)
- *writable = true;
return pfn;
}
- return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable);
+ return gfn_to_pfn(vcpu->kvm, gfn);
}
-EXPORT_SYMBOL_GPL(kvmppc_gfn_to_pfn);
static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
- bool iswrite, struct kvmppc_pte *pte)
+ struct kvmppc_pte *pte)
{
int relocated = (vcpu->arch.shared->msr & (data ? MSR_DR : MSR_IR));
int r;
if (relocated) {
- r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite);
+ r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data);
} else {
pte->eaddr = eaddr;
pte->raddr = eaddr & KVM_PAM;
@@ -415,7 +361,7 @@ int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
vcpu->stat.st++;
- if (kvmppc_xlate(vcpu, *eaddr, data, true, &pte))
+ if (kvmppc_xlate(vcpu, *eaddr, data, &pte))
return -ENOENT;
*eaddr = pte.raddr;
@@ -428,7 +374,6 @@ int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
return EMULATE_DONE;
}
-EXPORT_SYMBOL_GPL(kvmppc_st);
int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
bool data)
@@ -438,7 +383,7 @@ int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
vcpu->stat.ld++;
- if (kvmppc_xlate(vcpu, *eaddr, data, false, &pte))
+ if (kvmppc_xlate(vcpu, *eaddr, data, &pte))
goto nopte;
*eaddr = pte.raddr;
@@ -459,7 +404,6 @@ nopte:
mmio:
return EMULATE_DO_MMIO;
}
-EXPORT_SYMBOL_GPL(kvmppc_ld);
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
@@ -475,18 +419,6 @@ void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
{
}
-int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
- struct kvm_sregs *sregs)
-{
- return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
-}
-
-int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
- struct kvm_sregs *sregs)
-{
- return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
-}
-
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
int i;
@@ -563,7 +495,8 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
if (size > sizeof(val))
return -EINVAL;
- r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, reg->id, &val);
+ r = kvmppc_get_one_reg(vcpu, reg->id, &val);
+
if (r == -EINVAL) {
r = 0;
switch (reg->id) {
@@ -595,9 +528,6 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
}
val = get_reg_val(reg->id, vcpu->arch.vscr.u[3]);
break;
- case KVM_REG_PPC_VRSAVE:
- val = get_reg_val(reg->id, vcpu->arch.vrsave);
- break;
#endif /* CONFIG_ALTIVEC */
case KVM_REG_PPC_DEBUG_INST: {
u32 opcode = INS_TW;
@@ -642,7 +572,8 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
return -EFAULT;
- r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, reg->id, &val);
+ r = kvmppc_set_one_reg(vcpu, reg->id, &val);
+
if (r == -EINVAL) {
r = 0;
switch (reg->id) {
@@ -674,13 +605,6 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
}
vcpu->arch.vscr.u[3] = set_reg_val(reg->id, val);
break;
- case KVM_REG_PPC_VRSAVE:
- if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
- r = -ENXIO;
- break;
- }
- vcpu->arch.vrsave = set_reg_val(reg->id, val);
- break;
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_KVM_XICS
case KVM_REG_PPC_ICP_STATE:
@@ -701,27 +625,6 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
return r;
}
-void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
-{
- vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
-}
-
-void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
-{
- vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
-}
-
-void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
-{
- vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr);
-}
-EXPORT_SYMBOL_GPL(kvmppc_set_msr);
-
-int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
-{
- return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu);
-}
-
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
struct kvm_translation *tr)
{
@@ -741,141 +644,3 @@ void kvmppc_decrementer_func(unsigned long data)
kvmppc_core_queue_dec(vcpu);
kvm_vcpu_kick(vcpu);
}
-
-struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
-{
- return kvm->arch.kvm_ops->vcpu_create(kvm, id);
-}
-
-void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
-{
- vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
-}
-
-int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
-{
- return vcpu->kvm->arch.kvm_ops->check_requests(vcpu);
-}
-
-int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
-{
- return kvm->arch.kvm_ops->get_dirty_log(kvm, log);
-}
-
-void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
- struct kvm_memory_slot *dont)
-{
- kvm->arch.kvm_ops->free_memslot(free, dont);
-}
-
-int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
- unsigned long npages)
-{
- return kvm->arch.kvm_ops->create_memslot(slot, npages);
-}
-
-void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
-{
- kvm->arch.kvm_ops->flush_memslot(kvm, memslot);
-}
-
-int kvmppc_core_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- struct kvm_userspace_memory_region *mem)
-{
- return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem);
-}
-
-void kvmppc_core_commit_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem,
- const struct kvm_memory_slot *old)
-{
- kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old);
-}
-
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
-{
- return kvm->arch.kvm_ops->unmap_hva(kvm, hva);
-}
-EXPORT_SYMBOL_GPL(kvm_unmap_hva);
-
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
-{
- return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
-}
-
-int kvm_age_hva(struct kvm *kvm, unsigned long hva)
-{
- return kvm->arch.kvm_ops->age_hva(kvm, hva);
-}
-
-int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
-{
- return kvm->arch.kvm_ops->test_age_hva(kvm, hva);
-}
-
-void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
-{
- kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte);
-}
-
-void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
-{
- vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
-}
-
-int kvmppc_core_init_vm(struct kvm *kvm)
-{
-
-#ifdef CONFIG_PPC64
- INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
- INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
-#endif
-
- return kvm->arch.kvm_ops->init_vm(kvm);
-}
-
-void kvmppc_core_destroy_vm(struct kvm *kvm)
-{
- kvm->arch.kvm_ops->destroy_vm(kvm);
-
-#ifdef CONFIG_PPC64
- kvmppc_rtas_tokens_free(kvm);
- WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
-#endif
-}
-
-int kvmppc_core_check_processor_compat(void)
-{
- /*
- * We always return 0 for book3s. We check
- * for compatability while loading the HV
- * or PR module
- */
- return 0;
-}
-
-static int kvmppc_book3s_init(void)
-{
- int r;
-
- r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
- if (r)
- return r;
-#ifdef CONFIG_KVM_BOOK3S_32
- r = kvmppc_book3s_init_pr();
-#endif
- return r;
-
-}
-
-static void kvmppc_book3s_exit(void)
-{
-#ifdef CONFIG_KVM_BOOK3S_32
- kvmppc_book3s_exit_pr();
-#endif
- kvm_exit();
-}
-
-module_init(kvmppc_book3s_init);
-module_exit(kvmppc_book3s_exit);
diff --git a/arch/powerpc/kvm/book3s.h b/arch/powerpc/kvm/book3s.h
deleted file mode 100644
index 4bf956c..0000000
--- a/arch/powerpc/kvm/book3s.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright IBM Corporation, 2013
- * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License or (at your optional) any later version of the license.
- *
- */
-
-#ifndef __POWERPC_KVM_BOOK3S_H__
-#define __POWERPC_KVM_BOOK3S_H__
-
-extern void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
- struct kvm_memory_slot *memslot);
-extern int kvm_unmap_hva_hv(struct kvm *kvm, unsigned long hva);
-extern int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start,
- unsigned long end);
-extern int kvm_age_hva_hv(struct kvm *kvm, unsigned long hva);
-extern int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva);
-extern void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte);
-
-extern void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu);
-extern int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
- unsigned int inst, int *advance);
-extern int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu,
- int sprn, ulong spr_val);
-extern int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu,
- int sprn, ulong *spr_val);
-extern int kvmppc_book3s_init_pr(void);
-extern void kvmppc_book3s_exit_pr(void);
-
-#endif
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index 76a64ce..c8cefdd 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -84,8 +84,7 @@ static inline bool sr_nx(u32 sr_raw)
}
static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
- struct kvmppc_pte *pte, bool data,
- bool iswrite);
+ struct kvmppc_pte *pte, bool data);
static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
u64 *vsid);
@@ -100,7 +99,7 @@ static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
u64 vsid;
struct kvmppc_pte pte;
- if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data, false))
+ if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data))
return pte.vpage;
kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
@@ -112,11 +111,10 @@ static void kvmppc_mmu_book3s_32_reset_msr(struct kvm_vcpu *vcpu)
kvmppc_set_msr(vcpu, 0);
}
-static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvm_vcpu *vcpu,
+static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvmppc_vcpu_book3s *vcpu_book3s,
u32 sre, gva_t eaddr,
bool primary)
{
- struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
u32 page, hash, pteg, htabmask;
hva_t r;
@@ -134,7 +132,7 @@ static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvm_vcpu *vcpu,
kvmppc_get_pc(&vcpu_book3s->vcpu), eaddr, vcpu_book3s->sdr1, pteg,
sr_vsid(sre));
- r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT);
+ r = gfn_to_hva(vcpu_book3s->vcpu.kvm, pteg >> PAGE_SHIFT);
if (kvm_is_error_hva(r))
return r;
return r | (pteg & ~PAGE_MASK);
@@ -147,8 +145,7 @@ static u32 kvmppc_mmu_book3s_32_get_ptem(u32 sre, gva_t eaddr, bool primary)
}
static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
- struct kvmppc_pte *pte, bool data,
- bool iswrite)
+ struct kvmppc_pte *pte, bool data)
{
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
struct kvmppc_bat *bat;
@@ -189,7 +186,8 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
printk(KERN_INFO "BAT is not readable!\n");
continue;
}
- if (iswrite && !pte->may_write) {
+ if (!pte->may_write) {
+ /* let's treat r/o BATs as not-readable for now */
dprintk_pte("BAT is read-only!\n");
continue;
}
@@ -203,8 +201,9 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
struct kvmppc_pte *pte, bool data,
- bool iswrite, bool primary)
+ bool primary)
{
+ struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
u32 sre;
hva_t ptegp;
u32 pteg[16];
@@ -219,7 +218,7 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data);
- ptegp = kvmppc_mmu_book3s_32_get_pteg(vcpu, sre, eaddr, primary);
+ ptegp = kvmppc_mmu_book3s_32_get_pteg(vcpu_book3s, sre, eaddr, primary);
if (kvm_is_error_hva(ptegp)) {
printk(KERN_INFO "KVM: Invalid PTEG!\n");
goto no_page_found;
@@ -259,6 +258,9 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
break;
}
+ if ( !pte->may_read )
+ continue;
+
dprintk_pte("MMU: Found PTE -> %x %x - %x\n",
pteg[i], pteg[i+1], pp);
found = 1;
@@ -269,23 +271,19 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
/* Update PTE C and A bits, so the guest's swapper knows we used the
page */
if (found) {
- u32 pte_r = pteg[i+1];
- char __user *addr = (char __user *) &pteg[i+1];
-
- /*
- * Use single-byte writes to update the HPTE, to
- * conform to what real hardware does.
- */
- if (pte->may_read && !(pte_r & PTEG_FLAG_ACCESSED)) {
- pte_r |= PTEG_FLAG_ACCESSED;
- put_user(pte_r >> 8, addr + 2);
- }
- if (iswrite && pte->may_write && !(pte_r & PTEG_FLAG_DIRTY)) {
- pte_r |= PTEG_FLAG_DIRTY;
- put_user(pte_r, addr + 3);
- }
- if (!pte->may_read || (iswrite && !pte->may_write))
- return -EPERM;
+ u32 oldpte = pteg[i+1];
+
+ if (pte->may_read)
+ pteg[i+1] |= PTEG_FLAG_ACCESSED;
+ if (pte->may_write)
+ pteg[i+1] |= PTEG_FLAG_DIRTY;
+ else
+ dprintk_pte("KVM: Mapping read-only page!\n");
+
+ /* Write back into the PTEG */
+ if (pteg[i+1] != oldpte)
+ copy_to_user((void __user *)ptegp, pteg, sizeof(pteg));
+
return 0;
}
@@ -304,14 +302,12 @@ no_page_found:
}
static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
- struct kvmppc_pte *pte, bool data,
- bool iswrite)
+ struct kvmppc_pte *pte, bool data)
{
int r;
ulong mp_ea = vcpu->arch.magic_page_ea;
pte->eaddr = eaddr;
- pte->page_size = MMU_PAGE_4K;
/* Magic page override */
if (unlikely(mp_ea) &&
@@ -327,13 +323,11 @@ static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
return 0;
}
- r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data, iswrite);
+ r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data);
if (r < 0)
- r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte,
- data, iswrite, true);
+ r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, true);
if (r < 0)
- r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte,
- data, iswrite, false);
+ r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, false);
return r;
}
@@ -353,12 +347,7 @@ static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool large)
{
- int i;
- struct kvm_vcpu *v;
-
- /* flush this VA on all cpus */
- kvm_for_each_vcpu(i, v, vcpu->kvm)
- kvmppc_mmu_pte_flush(v, ea, 0x0FFFF000);
+ kvmppc_mmu_pte_flush(vcpu, ea, 0x0FFFF000);
}
static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index 3a0abd2..00e619b 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -138,8 +138,7 @@ static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr,
extern char etext[];
-int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
- bool iswrite)
+int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
{
pfn_t hpaddr;
u64 vpn;
@@ -153,11 +152,9 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
bool evict = false;
struct hpte_cache *pte;
int r = 0;
- bool writable;
/* Get host physical address for gpa */
- hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT,
- iswrite, &writable);
+ hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
if (is_error_noslot_pfn(hpaddr)) {
printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n",
orig_pte->eaddr);
@@ -207,7 +204,7 @@ next_pteg:
(primary ? 0 : PTE_SEC);
pteg1 = hpaddr | PTE_M | PTE_R | PTE_C;
- if (orig_pte->may_write && writable) {
+ if (orig_pte->may_write) {
pteg1 |= PP_RWRW;
mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
} else {
@@ -262,11 +259,6 @@ out:
return r;
}
-void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
-{
- kvmppc_mmu_pte_vflush(vcpu, pte->vpage, 0xfffffffffULL);
-}
-
static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
{
struct kvmppc_sid_map *map;
@@ -349,7 +341,7 @@ void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
svcpu_put(svcpu);
}
-void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
+void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
{
int i;
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index 83da1f8..7e345e0 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -107,20 +107,9 @@ static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
return kvmppc_slb_calc_vpn(slb, eaddr);
}
-static int mmu_pagesize(int mmu_pg)
-{
- switch (mmu_pg) {
- case MMU_PAGE_64K:
- return 16;
- case MMU_PAGE_16M:
- return 24;
- }
- return 12;
-}
-
static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe)
{
- return mmu_pagesize(slbe->base_page_size);
+ return slbe->large ? 24 : 12;
}
static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr)
@@ -130,11 +119,11 @@ static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr)
return ((eaddr & kvmppc_slb_offset_mask(slbe)) >> p);
}
-static hva_t kvmppc_mmu_book3s_64_get_pteg(struct kvm_vcpu *vcpu,
+static hva_t kvmppc_mmu_book3s_64_get_pteg(
+ struct kvmppc_vcpu_book3s *vcpu_book3s,
struct kvmppc_slb *slbe, gva_t eaddr,
bool second)
{
- struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
u64 hash, pteg, htabsize;
u32 ssize;
hva_t r;
@@ -159,10 +148,10 @@ static hva_t kvmppc_mmu_book3s_64_get_pteg(struct kvm_vcpu *vcpu,
/* When running a PAPR guest, SDR1 contains a HVA address instead
of a GPA */
- if (vcpu->arch.papr_enabled)
+ if (vcpu_book3s->vcpu.arch.papr_enabled)
r = pteg;
else
- r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT);
+ r = gfn_to_hva(vcpu_book3s->vcpu.kvm, pteg >> PAGE_SHIFT);
if (kvm_is_error_hva(r))
return r;
@@ -177,38 +166,18 @@ static u64 kvmppc_mmu_book3s_64_get_avpn(struct kvmppc_slb *slbe, gva_t eaddr)
avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr);
avpn |= slbe->vsid << (kvmppc_slb_sid_shift(slbe) - p);
- if (p < 16)
- avpn >>= ((80 - p) - 56) - 8; /* 16 - p */
+ if (p < 24)
+ avpn >>= ((80 - p) - 56) - 8;
else
- avpn <<= p - 16;
+ avpn <<= 8;
return avpn;
}
-/*
- * Return page size encoded in the second word of a HPTE, or
- * -1 for an invalid encoding for the base page size indicated by
- * the SLB entry. This doesn't handle mixed pagesize segments yet.
- */
-static int decode_pagesize(struct kvmppc_slb *slbe, u64 r)
-{
- switch (slbe->base_page_size) {
- case MMU_PAGE_64K:
- if ((r & 0xf000) == 0x1000)
- return MMU_PAGE_64K;
- break;
- case MMU_PAGE_16M:
- if ((r & 0xff000) == 0)
- return MMU_PAGE_16M;
- break;
- }
- return -1;
-}
-
static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
- struct kvmppc_pte *gpte, bool data,
- bool iswrite)
+ struct kvmppc_pte *gpte, bool data)
{
+ struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
struct kvmppc_slb *slbe;
hva_t ptegp;
u64 pteg[16];
@@ -220,7 +189,6 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
u8 pp, key = 0;
bool found = false;
bool second = false;
- int pgsize;
ulong mp_ea = vcpu->arch.magic_page_ea;
/* Magic page override */
@@ -234,7 +202,6 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
gpte->may_execute = true;
gpte->may_read = true;
gpte->may_write = true;
- gpte->page_size = MMU_PAGE_4K;
return 0;
}
@@ -255,12 +222,8 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
v_mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_LARGE | HPTE_V_VALID |
HPTE_V_SECONDARY;
- pgsize = slbe->large ? MMU_PAGE_16M : MMU_PAGE_4K;
-
- mutex_lock(&vcpu->kvm->arch.hpt_mutex);
-
do_second:
- ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu, slbe, eaddr, second);
+ ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu_book3s, slbe, eaddr, second);
if (kvm_is_error_hva(ptegp))
goto no_page_found;
@@ -277,13 +240,6 @@ do_second:
for (i=0; i<16; i+=2) {
/* Check all relevant fields of 1st dword */
if ((pteg[i] & v_mask) == v_val) {
- /* If large page bit is set, check pgsize encoding */
- if (slbe->large &&
- (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
- pgsize = decode_pagesize(slbe, pteg[i+1]);
- if (pgsize < 0)
- continue;
- }
found = true;
break;
}
@@ -300,15 +256,13 @@ do_second:
v = pteg[i];
r = pteg[i+1];
pp = (r & HPTE_R_PP) | key;
- if (r & HPTE_R_PP0)
- pp |= 8;
+ eaddr_mask = 0xFFF;
gpte->eaddr = eaddr;
gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
-
- eaddr_mask = (1ull << mmu_pagesize(pgsize)) - 1;
+ if (slbe->large)
+ eaddr_mask = 0xFFFFFF;
gpte->raddr = (r & HPTE_R_RPN & ~eaddr_mask) | (eaddr & eaddr_mask);
- gpte->page_size = pgsize;
gpte->may_execute = ((r & HPTE_R_N) ? false : true);
gpte->may_read = false;
gpte->may_write = false;
@@ -323,7 +277,6 @@ do_second:
case 3:
case 5:
case 7:
- case 10:
gpte->may_read = true;
break;
}
@@ -334,37 +287,30 @@ do_second:
/* Update PTE R and C bits, so the guest's swapper knows we used the
* page */
- if (gpte->may_read && !(r & HPTE_R_R)) {
- /*
- * Set the accessed flag.
- * We have to write this back with a single byte write
- * because another vcpu may be accessing this on
- * non-PAPR platforms such as mac99, and this is
- * what real hardware does.
- */
- char __user *addr = (char __user *) &pteg[i+1];
+ if (gpte->may_read) {
+ /* Set the accessed flag */
r |= HPTE_R_R;
- put_user(r >> 8, addr + 6);
}
- if (iswrite && gpte->may_write && !(r & HPTE_R_C)) {
- /* Set the dirty flag */
- /* Use a single byte write */
- char __user *addr = (char __user *) &pteg[i+1];
+ if (data && gpte->may_write) {
+ /* Set the dirty flag -- XXX even if not writing */
r |= HPTE_R_C;
- put_user(r, addr + 7);
}
- mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
+ /* Write back into the PTEG */
+ if (pteg[i+1] != r) {
+ pteg[i+1] = r;
+ copy_to_user((void __user *)ptegp, pteg, sizeof(pteg));
+ }
- if (!gpte->may_read || (iswrite && !gpte->may_write))
+ if (!gpte->may_read)
return -EPERM;
return 0;
no_page_found:
- mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
return -ENOENT;
no_seg_found:
+
dprintk("KVM MMU: Trigger segment fault\n");
return -EINVAL;
}
@@ -399,21 +345,6 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
slbe->nx = (rs & SLB_VSID_N) ? 1 : 0;
slbe->class = (rs & SLB_VSID_C) ? 1 : 0;
- slbe->base_page_size = MMU_PAGE_4K;
- if (slbe->large) {
- if (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE) {
- switch (rs & SLB_VSID_LP) {
- case SLB_VSID_LP_00:
- slbe->base_page_size = MMU_PAGE_16M;
- break;
- case SLB_VSID_LP_01:
- slbe->base_page_size = MMU_PAGE_64K;
- break;
- }
- } else
- slbe->base_page_size = MMU_PAGE_16M;
- }
-
slbe->orige = rb & (ESID_MASK | SLB_ESID_V);
slbe->origv = rs;
@@ -529,45 +460,14 @@ static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va,
bool large)
{
u64 mask = 0xFFFFFFFFFULL;
- long i;
- struct kvm_vcpu *v;
dprintk("KVM MMU: tlbie(0x%lx)\n", va);
- /*
- * The tlbie instruction changed behaviour starting with
- * POWER6. POWER6 and later don't have the large page flag
- * in the instruction but in the RB value, along with bits
- * indicating page and segment sizes.
- */
- if (vcpu->arch.hflags & BOOK3S_HFLAG_NEW_TLBIE) {
- /* POWER6 or later */
- if (va & 1) { /* L bit */
- if ((va & 0xf000) == 0x1000)
- mask = 0xFFFFFFFF0ULL; /* 64k page */
- else
- mask = 0xFFFFFF000ULL; /* 16M page */
- }
- } else {
- /* older processors, e.g. PPC970 */
- if (large)
- mask = 0xFFFFFF000ULL;
- }
- /* flush this VA on all vcpus */
- kvm_for_each_vcpu(i, v, vcpu->kvm)
- kvmppc_mmu_pte_vflush(v, va >> 12, mask);
+ if (large)
+ mask = 0xFFFFFF000ULL;
+ kvmppc_mmu_pte_vflush(vcpu, va >> 12, mask);
}
-#ifdef CONFIG_PPC_64K_PAGES
-static int segment_contains_magic_page(struct kvm_vcpu *vcpu, ulong esid)
-{
- ulong mp_ea = vcpu->arch.magic_page_ea;
-
- return mp_ea && !(vcpu->arch.shared->msr & MSR_PR) &&
- (mp_ea >> SID_SHIFT) == esid;
-}
-#endif
-
static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
u64 *vsid)
{
@@ -575,13 +475,11 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
struct kvmppc_slb *slb;
u64 gvsid = esid;
ulong mp_ea = vcpu->arch.magic_page_ea;
- int pagesize = MMU_PAGE_64K;
if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
if (slb) {
gvsid = slb->vsid;
- pagesize = slb->base_page_size;
if (slb->tb) {
gvsid <<= SID_SHIFT_1T - SID_SHIFT;
gvsid |= esid & ((1ul << (SID_SHIFT_1T - SID_SHIFT)) - 1);
@@ -592,41 +490,28 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
case 0:
- gvsid = VSID_REAL | esid;
+ *vsid = VSID_REAL | esid;
break;
case MSR_IR:
- gvsid |= VSID_REAL_IR;
+ *vsid = VSID_REAL_IR | gvsid;
break;
case MSR_DR:
- gvsid |= VSID_REAL_DR;
+ *vsid = VSID_REAL_DR | gvsid;
break;
case MSR_DR|MSR_IR:
if (!slb)
goto no_slb;
+ *vsid = gvsid;
break;
default:
BUG();
break;
}
-#ifdef CONFIG_PPC_64K_PAGES
- /*
- * Mark this as a 64k segment if the host is using
- * 64k pages, the host MMU supports 64k pages and
- * the guest segment page size is >= 64k,
- * but not if this segment contains the magic page.
- */
- if (pagesize >= MMU_PAGE_64K &&
- mmu_psize_defs[MMU_PAGE_64K].shift &&
- !segment_contains_magic_page(vcpu, esid))
- gvsid |= VSID_64K;
-#endif
-
if (vcpu->arch.shared->msr & MSR_PR)
- gvsid |= VSID_PR;
+ *vsid |= VSID_PR;
- *vsid = gvsid;
return 0;
no_slb:
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 0d513af..e524052 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -27,14 +27,14 @@
#include <asm/machdep.h>
#include <asm/mmu_context.h>
#include <asm/hw_irq.h>
-#include "trace_pr.h"
+#include "trace.h"
#define PTE_SIZE 12
void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
{
ppc_md.hpte_invalidate(pte->slot, pte->host_vpn,
- pte->pagesize, pte->pagesize, MMU_SEGSIZE_256M,
+ MMU_PAGE_4K, MMU_PAGE_4K, MMU_SEGSIZE_256M,
false);
}
@@ -78,8 +78,7 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
return NULL;
}
-int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
- bool iswrite)
+int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
{
unsigned long vpn;
pfn_t hpaddr;
@@ -91,26 +90,16 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
int attempt = 0;
struct kvmppc_sid_map *map;
int r = 0;
- int hpsize = MMU_PAGE_4K;
- bool writable;
- unsigned long mmu_seq;
- struct kvm *kvm = vcpu->kvm;
- struct hpte_cache *cpte;
- unsigned long gfn = orig_pte->raddr >> PAGE_SHIFT;
- unsigned long pfn;
-
- /* used to check for invalidations in progress */
- mmu_seq = kvm->mmu_notifier_seq;
- smp_rmb();
/* Get host physical address for gpa */
- pfn = kvmppc_gfn_to_pfn(vcpu, gfn, iswrite, &writable);
- if (is_error_noslot_pfn(pfn)) {
- printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", gfn);
+ hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
+ if (is_error_noslot_pfn(hpaddr)) {
+ printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr);
r = -EINVAL;
goto out;
}
- hpaddr = pfn << PAGE_SHIFT;
+ hpaddr <<= PAGE_SHIFT;
+ hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
/* and write the mapping ea -> hpa into the pt */
vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
@@ -128,39 +117,20 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
goto out;
}
- vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M);
+ vsid = map->host_vsid;
+ vpn = hpt_vpn(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M);
- kvm_set_pfn_accessed(pfn);
- if (!orig_pte->may_write || !writable)
- rflags |= PP_RXRX;
- else {
- mark_page_dirty(vcpu->kvm, gfn);
- kvm_set_pfn_dirty(pfn);
- }
+ if (!orig_pte->may_write)
+ rflags |= HPTE_R_PP;
+ else
+ mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
if (!orig_pte->may_execute)
rflags |= HPTE_R_N;
else
- kvmppc_mmu_flush_icache(pfn);
-
- /*
- * Use 64K pages if possible; otherwise, on 64K page kernels,
- * we need to transfer 4 more bits from guest real to host real addr.
- */
- if (vsid & VSID_64K)
- hpsize = MMU_PAGE_64K;
- else
- hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
-
- hash = hpt_hash(vpn, mmu_psize_defs[hpsize].shift, MMU_SEGSIZE_256M);
+ kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT);
- cpte = kvmppc_mmu_hpte_cache_next(vcpu);
-
- spin_lock(&kvm->mmu_lock);
- if (!cpte || mmu_notifier_retry(kvm, mmu_seq)) {
- r = -EAGAIN;
- goto out_unlock;
- }
+ hash = hpt_hash(vpn, PTE_SIZE, MMU_SEGSIZE_256M);
map_again:
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
@@ -169,11 +139,11 @@ map_again:
if (attempt > 1)
if (ppc_md.hpte_remove(hpteg) < 0) {
r = -1;
- goto out_unlock;
+ goto out;
}
ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
- hpsize, hpsize, MMU_SEGSIZE_256M);
+ MMU_PAGE_4K, MMU_PAGE_4K, MMU_SEGSIZE_256M);
if (ret < 0) {
/* If we couldn't map a primary PTE, try a secondary */
@@ -182,6 +152,8 @@ map_again:
attempt++;
goto map_again;
} else {
+ struct hpte_cache *pte = kvmppc_mmu_hpte_cache_next(vcpu);
+
trace_kvm_book3s_64_mmu_map(rflags, hpteg,
vpn, hpaddr, orig_pte);
@@ -192,37 +164,19 @@ map_again:
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
}
- cpte->slot = hpteg + (ret & 7);
- cpte->host_vpn = vpn;
- cpte->pte = *orig_pte;
- cpte->pfn = pfn;
- cpte->pagesize = hpsize;
+ pte->slot = hpteg + (ret & 7);
+ pte->host_vpn = vpn;
+ pte->pte = *orig_pte;
+ pte->pfn = hpaddr >> PAGE_SHIFT;
- kvmppc_mmu_hpte_cache_map(vcpu, cpte);
- cpte = NULL;
+ kvmppc_mmu_hpte_cache_map(vcpu, pte);
}
-
-out_unlock:
- spin_unlock(&kvm->mmu_lock);
- kvm_release_pfn_clean(pfn);
- if (cpte)
- kvmppc_mmu_hpte_cache_free(cpte);
+ kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
out:
return r;
}
-void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
-{
- u64 mask = 0xfffffffffULL;
- u64 vsid;
-
- vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid);
- if (vsid & VSID_64K)
- mask = 0xffffffff0ULL;
- kvmppc_mmu_pte_vflush(vcpu, pte->vpage, mask);
-}
-
static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
{
struct kvmppc_sid_map *map;
@@ -337,12 +291,6 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
slb_vsid &= ~SLB_VSID_KP;
slb_esid |= slb_index;
-#ifdef CONFIG_PPC_64K_PAGES
- /* Set host segment base page size to 64K if possible */
- if (gvsid & VSID_64K)
- slb_vsid |= mmu_psize_defs[MMU_PAGE_64K].sllp;
-#endif
-
svcpu->slb[slb_index].esid = slb_esid;
svcpu->slb[slb_index].vsid = slb_vsid;
@@ -378,7 +326,7 @@ void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
svcpu_put(svcpu);
}
-void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
+void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
{
kvmppc_mmu_hpte_destroy(vcpu);
__destroy_context(to_book3s(vcpu)->context_id[0]);
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index f3ff587..043eec8 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -260,6 +260,10 @@ int kvmppc_mmu_hv_init(void)
return 0;
}
+void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
+{
+}
+
static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
{
kvmppc_set_msr(vcpu, MSR_SF | MSR_ME);
@@ -447,7 +451,7 @@ static unsigned long kvmppc_mmu_get_real_addr(unsigned long v, unsigned long r,
}
static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
- struct kvmppc_pte *gpte, bool data, bool iswrite)
+ struct kvmppc_pte *gpte, bool data)
{
struct kvm *kvm = vcpu->kvm;
struct kvmppc_slb *slbe;
@@ -902,22 +906,21 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
return 0;
}
-int kvm_unmap_hva_hv(struct kvm *kvm, unsigned long hva)
+int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
{
if (kvm->arch.using_mmu_notifiers)
kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
return 0;
}
-int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start, unsigned long end)
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
{
if (kvm->arch.using_mmu_notifiers)
kvm_handle_hva_range(kvm, start, end, kvm_unmap_rmapp);
return 0;
}
-void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
- struct kvm_memory_slot *memslot)
+void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
{
unsigned long *rmapp;
unsigned long gfn;
@@ -991,7 +994,7 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
return ret;
}
-int kvm_age_hva_hv(struct kvm *kvm, unsigned long hva)
+int kvm_age_hva(struct kvm *kvm, unsigned long hva)
{
if (!kvm->arch.using_mmu_notifiers)
return 0;
@@ -1029,14 +1032,14 @@ static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
return ret;
}
-int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva)
+int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
{
if (!kvm->arch.using_mmu_notifiers)
return 0;
return kvm_handle_hva(kvm, hva, kvm_test_age_rmapp);
}
-void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte)
+void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
{
if (!kvm->arch.using_mmu_notifiers)
return;
@@ -1509,8 +1512,9 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
(VRMA_VSID << SLB_VSID_SHIFT_1T);
- lpcr = senc << (LPCR_VRMASD_SH - 4);
- kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
+ lpcr = kvm->arch.lpcr & ~LPCR_VRMASD;
+ lpcr |= senc << (LPCR_VRMASD_SH - 4);
+ kvm->arch.lpcr = lpcr;
rma_setup = 1;
}
++i;
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index 2c25f54..30c2f3b 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -74,4 +74,3 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
/* Didn't find the liobn, punt it to userspace */
return H_TOO_HARD;
}
-EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index 99d40f8..360ce68 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -86,8 +86,8 @@ static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level)
return true;
}
-int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
- unsigned int inst, int *advance)
+int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int inst, int *advance)
{
int emulated = EMULATE_DONE;
int rt = get_rt(inst);
@@ -172,7 +172,7 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu->arch.mmu.tlbie(vcpu, addr, large);
break;
}
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_KVM_BOOK3S_64_PR
case OP_31_XOP_FAKE_SC1:
{
/* SC 1 papr hypercalls */
@@ -267,9 +267,12 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
r = kvmppc_st(vcpu, &addr, 32, zeros, true);
if ((r == -ENOENT) || (r == -EPERM)) {
+ struct kvmppc_book3s_shadow_vcpu *svcpu;
+
+ svcpu = svcpu_get(vcpu);
*advance = 0;
vcpu->arch.shared->dar = vaddr;
- vcpu->arch.fault_dar = vaddr;
+ svcpu->fault_dar = vaddr;
dsisr = DSISR_ISSTORE;
if (r == -ENOENT)
@@ -278,7 +281,8 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
dsisr |= DSISR_PROTFAULT;
vcpu->arch.shared->dsisr = dsisr;
- vcpu->arch.fault_dsisr = dsisr;
+ svcpu->fault_dsisr = dsisr;
+ svcpu_put(svcpu);
kvmppc_book3s_queue_irqprio(vcpu,
BOOK3S_INTERRUPT_DATA_STORAGE);
@@ -345,7 +349,7 @@ static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn)
return bat;
}
-int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
+int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
{
int emulated = EMULATE_DONE;
@@ -468,7 +472,7 @@ unprivileged:
return emulated;
}
-int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
+int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
{
int emulated = EMULATE_DONE;
diff --git a/arch/powerpc/kvm/book3s_exports.c b/arch/powerpc/kvm/book3s_exports.c
index 852989a..7057a02 100644
--- a/arch/powerpc/kvm/book3s_exports.c
+++ b/arch/powerpc/kvm/book3s_exports.c
@@ -20,10 +20,9 @@
#include <linux/export.h>
#include <asm/kvm_book3s.h>
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+#ifdef CONFIG_KVM_BOOK3S_64_HV
EXPORT_SYMBOL_GPL(kvmppc_hv_entry_trampoline);
-#endif
-#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+#else
EXPORT_SYMBOL_GPL(kvmppc_entry_trampoline);
EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu);
#ifdef CONFIG_ALTIVEC
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 072287f..62a2b5a 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -52,9 +52,6 @@
#include <linux/vmalloc.h>
#include <linux/highmem.h>
#include <linux/hugetlb.h>
-#include <linux/module.h>
-
-#include "book3s.h"
/* #define EXIT_DEBUG */
/* #define EXIT_DEBUG_SIMPLE */
@@ -69,7 +66,7 @@
static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
-static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
+void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
{
int me;
int cpu = vcpu->cpu;
@@ -128,7 +125,7 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
* purely defensive; they should never fail.)
*/
-static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
+void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct kvmppc_vcore *vc = vcpu->arch.vcore;
@@ -146,7 +143,7 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
spin_unlock(&vcpu->arch.tbacct_lock);
}
-static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
+void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcore *vc = vcpu->arch.vcore;
@@ -158,46 +155,17 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
spin_unlock(&vcpu->arch.tbacct_lock);
}
-static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
+void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
{
vcpu->arch.shregs.msr = msr;
kvmppc_end_cede(vcpu);
}
-void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr)
+void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
{
vcpu->arch.pvr = pvr;
}
-int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
-{
- unsigned long pcr = 0;
- struct kvmppc_vcore *vc = vcpu->arch.vcore;
-
- if (arch_compat) {
- if (!cpu_has_feature(CPU_FTR_ARCH_206))
- return -EINVAL; /* 970 has no compat mode support */
-
- switch (arch_compat) {
- case PVR_ARCH_205:
- pcr = PCR_ARCH_205;
- break;
- case PVR_ARCH_206:
- case PVR_ARCH_206p:
- break;
- default:
- return -EINVAL;
- }
- }
-
- spin_lock(&vc->lock);
- vc->arch_compat = arch_compat;
- vc->pcr = pcr;
- spin_unlock(&vc->lock);
-
- return 0;
-}
-
void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
{
int r;
@@ -227,7 +195,7 @@ void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
pr_err(" ESID = %.16llx VSID = %.16llx\n",
vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
- vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1,
+ vcpu->kvm->arch.lpcr, vcpu->kvm->arch.sdr1,
vcpu->arch.last_inst);
}
@@ -521,7 +489,7 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
memset(dt, 0, sizeof(struct dtl_entry));
dt->dispatch_reason = 7;
dt->processor_id = vc->pcpu + vcpu->arch.ptid;
- dt->timebase = now + vc->tb_offset;
+ dt->timebase = now;
dt->enqueue_to_dispatch_time = stolen;
dt->srr0 = kvmppc_get_pc(vcpu);
dt->srr1 = vcpu->arch.shregs.msr;
@@ -570,15 +538,6 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
}
break;
case H_CONFER:
- target = kvmppc_get_gpr(vcpu, 4);
- if (target == -1)
- break;
- tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
- if (!tvcpu) {
- ret = H_PARAMETER;
- break;
- }
- kvm_vcpu_yield_to(tvcpu);
break;
case H_REGISTER_VPA:
ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
@@ -617,8 +576,8 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
return RESUME_GUEST;
}
-static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
- struct task_struct *tsk)
+static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ struct task_struct *tsk)
{
int r = RESUME_HOST;
@@ -712,16 +671,16 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
vcpu->arch.trap, kvmppc_get_pc(vcpu),
vcpu->arch.shregs.msr);
- run->hw.hardware_exit_reason = vcpu->arch.trap;
r = RESUME_HOST;
+ BUG();
break;
}
return r;
}
-static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu,
- struct kvm_sregs *sregs)
+int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
{
int i;
@@ -735,12 +694,12 @@ static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu,
return 0;
}
-static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
- struct kvm_sregs *sregs)
+int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
{
int i, j;
- kvmppc_set_pvr_hv(vcpu, sregs->pvr);
+ kvmppc_set_pvr(vcpu, sregs->pvr);
j = 0;
for (i = 0; i < vcpu->arch.slb_nr; i++) {
@@ -755,23 +714,7 @@ static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
return 0;
}
-static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr)
-{
- struct kvmppc_vcore *vc = vcpu->arch.vcore;
- u64 mask;
-
- spin_lock(&vc->lock);
- /*
- * Userspace can only modify DPFD (default prefetch depth),
- * ILE (interrupt little-endian) and TC (translation control).
- */
- mask = LPCR_DPFD | LPCR_ILE | LPCR_TC;
- vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
- spin_unlock(&vc->lock);
-}
-
-static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
- union kvmppc_one_reg *val)
+int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
{
int r = 0;
long int i;
@@ -806,12 +749,6 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
i = id - KVM_REG_PPC_PMC1;
*val = get_reg_val(id, vcpu->arch.pmc[i]);
break;
- case KVM_REG_PPC_SIAR:
- *val = get_reg_val(id, vcpu->arch.siar);
- break;
- case KVM_REG_PPC_SDAR:
- *val = get_reg_val(id, vcpu->arch.sdar);
- break;
#ifdef CONFIG_VSX
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
if (cpu_has_feature(CPU_FTR_VSX)) {
@@ -850,18 +787,6 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
val->vpaval.length = vcpu->arch.dtl.len;
spin_unlock(&vcpu->arch.vpa_update_lock);
break;
- case KVM_REG_PPC_TB_OFFSET:
- *val = get_reg_val(id, vcpu->arch.vcore->tb_offset);
- break;
- case KVM_REG_PPC_LPCR:
- *val = get_reg_val(id, vcpu->arch.vcore->lpcr);
- break;
- case KVM_REG_PPC_PPR:
- *val = get_reg_val(id, vcpu->arch.ppr);
- break;
- case KVM_REG_PPC_ARCH_COMPAT:
- *val = get_reg_val(id, vcpu->arch.vcore->arch_compat);
- break;
default:
r = -EINVAL;
break;
@@ -870,8 +795,7 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
return r;
}
-static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
- union kvmppc_one_reg *val)
+int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
{
int r = 0;
long int i;
@@ -909,12 +833,6 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
i = id - KVM_REG_PPC_PMC1;
vcpu->arch.pmc[i] = set_reg_val(id, *val);
break;
- case KVM_REG_PPC_SIAR:
- vcpu->arch.siar = set_reg_val(id, *val);
- break;
- case KVM_REG_PPC_SDAR:
- vcpu->arch.sdar = set_reg_val(id, *val);
- break;
#ifdef CONFIG_VSX
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
if (cpu_has_feature(CPU_FTR_VSX)) {
@@ -962,20 +880,6 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
len -= len % sizeof(struct dtl_entry);
r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
break;
- case KVM_REG_PPC_TB_OFFSET:
- /* round up to multiple of 2^24 */
- vcpu->arch.vcore->tb_offset =
- ALIGN(set_reg_val(id, *val), 1UL << 24);
- break;
- case KVM_REG_PPC_LPCR:
- kvmppc_set_lpcr(vcpu, set_reg_val(id, *val));
- break;
- case KVM_REG_PPC_PPR:
- vcpu->arch.ppr = set_reg_val(id, *val);
- break;
- case KVM_REG_PPC_ARCH_COMPAT:
- r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val));
- break;
default:
r = -EINVAL;
break;
@@ -984,8 +888,14 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
return r;
}
-static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
- unsigned int id)
+int kvmppc_core_check_processor_compat(void)
+{
+ if (cpu_has_feature(CPU_FTR_HVMODE))
+ return 0;
+ return -EIO;
+}
+
+struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
{
struct kvm_vcpu *vcpu;
int err = -EINVAL;
@@ -1009,7 +919,8 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
vcpu->arch.mmcr[0] = MMCR0_FC;
vcpu->arch.ctrl = CTRL_RUNLATCH;
/* default to host PVR, since we can't spoof it */
- kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR));
+ vcpu->arch.pvr = mfspr(SPRN_PVR);
+ kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
spin_lock_init(&vcpu->arch.vpa_update_lock);
spin_lock_init(&vcpu->arch.tbacct_lock);
vcpu->arch.busy_preempt = TB_NIL;
@@ -1029,7 +940,6 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
spin_lock_init(&vcore->lock);
init_waitqueue_head(&vcore->wq);
vcore->preempt_tb = TB_NIL;
- vcore->lpcr = kvm->arch.lpcr;
}
kvm->arch.vcores[core] = vcore;
kvm->arch.online_vcores++;
@@ -1062,7 +972,7 @@ static void unpin_vpa(struct kvm *kvm, struct kvmppc_vpa *vpa)
vpa->dirty);
}
-static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu)
+void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
{
spin_lock(&vcpu->arch.vpa_update_lock);
unpin_vpa(vcpu->kvm, &vcpu->arch.dtl);
@@ -1073,12 +983,6 @@ static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu)
kmem_cache_free(kvm_vcpu_cache, vcpu);
}
-static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu)
-{
- /* Indicate we want to get back into the guest */
- return 1;
-}
-
static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
{
unsigned long dec_nsec, now;
@@ -1360,8 +1264,8 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
ret = RESUME_GUEST;
if (vcpu->arch.trap)
- ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu,
- vcpu->arch.run_task);
+ ret = kvmppc_handle_exit(vcpu->arch.kvm_run, vcpu,
+ vcpu->arch.run_task);
vcpu->arch.ret = ret;
vcpu->arch.trap = 0;
@@ -1520,7 +1424,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
return vcpu->arch.ret;
}
-static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
+int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
int r;
int srcu_idx;
@@ -1642,8 +1546,7 @@ static const struct file_operations kvm_rma_fops = {
.release = kvm_rma_release,
};
-static long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
- struct kvm_allocate_rma *ret)
+long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret)
{
long fd;
struct kvm_rma_info *ri;
@@ -1689,8 +1592,7 @@ static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps,
(*sps)++;
}
-static int kvm_vm_ioctl_get_smmu_info_hv(struct kvm *kvm,
- struct kvm_ppc_smmu_info *info)
+int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
{
struct kvm_ppc_one_seg_page_size *sps;
@@ -1711,8 +1613,7 @@ static int kvm_vm_ioctl_get_smmu_info_hv(struct kvm *kvm,
/*
* Get (and clear) the dirty memory log for a memory slot.
*/
-static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm,
- struct kvm_dirty_log *log)
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
{
struct kvm_memory_slot *memslot;
int r;
@@ -1766,8 +1667,8 @@ static void unpin_slot(struct kvm_memory_slot *memslot)
}
}
-static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *free,
- struct kvm_memory_slot *dont)
+void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
+ struct kvm_memory_slot *dont)
{
if (!dont || free->arch.rmap != dont->arch.rmap) {
vfree(free->arch.rmap);
@@ -1780,8 +1681,8 @@ static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *free,
}
}
-static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot,
- unsigned long npages)
+int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
+ unsigned long npages)
{
slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap));
if (!slot->arch.rmap)
@@ -1791,9 +1692,9 @@ static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot,
return 0;
}
-static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- struct kvm_userspace_memory_region *mem)
+int kvmppc_core_prepare_memory_region(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
+ struct kvm_userspace_memory_region *mem)
{
unsigned long *phys;
@@ -1809,9 +1710,9 @@ static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm,
return 0;
}
-static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem,
- const struct kvm_memory_slot *old)
+void kvmppc_core_commit_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem,
+ const struct kvm_memory_slot *old)
{
unsigned long npages = mem->memory_size >> PAGE_SHIFT;
struct kvm_memory_slot *memslot;
@@ -1828,37 +1729,6 @@ static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
}
}
-/*
- * Update LPCR values in kvm->arch and in vcores.
- * Caller must hold kvm->lock.
- */
-void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask)
-{
- long int i;
- u32 cores_done = 0;
-
- if ((kvm->arch.lpcr & mask) == lpcr)
- return;
-
- kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr;
-
- for (i = 0; i < KVM_MAX_VCORES; ++i) {
- struct kvmppc_vcore *vc = kvm->arch.vcores[i];
- if (!vc)
- continue;
- spin_lock(&vc->lock);
- vc->lpcr = (vc->lpcr & ~mask) | lpcr;
- spin_unlock(&vc->lock);
- if (++cores_done >= kvm->arch.online_vcores)
- break;
- }
-}
-
-static void kvmppc_mmu_destroy_hv(struct kvm_vcpu *vcpu)
-{
- return;
-}
-
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
{
int err = 0;
@@ -1867,8 +1737,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
unsigned long hva;
struct kvm_memory_slot *memslot;
struct vm_area_struct *vma;
- unsigned long lpcr = 0, senc;
- unsigned long lpcr_mask = 0;
+ unsigned long lpcr, senc;
unsigned long psize, porder;
unsigned long rma_size;
unsigned long rmls;
@@ -1933,9 +1802,9 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
senc = slb_pgsize_encoding(psize);
kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
(VRMA_VSID << SLB_VSID_SHIFT_1T);
- lpcr_mask = LPCR_VRMASD;
- /* the -4 is to account for senc values starting at 0x10 */
- lpcr = senc << (LPCR_VRMASD_SH - 4);
+ lpcr = kvm->arch.lpcr & ~LPCR_VRMASD;
+ lpcr |= senc << (LPCR_VRMASD_SH - 4);
+ kvm->arch.lpcr = lpcr;
/* Create HPTEs in the hash page table for the VRMA */
kvmppc_map_vrma(vcpu, memslot, porder);
@@ -1956,21 +1825,23 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
kvm->arch.rma = ri;
/* Update LPCR and RMOR */
+ lpcr = kvm->arch.lpcr;
if (cpu_has_feature(CPU_FTR_ARCH_201)) {
/* PPC970; insert RMLS value (split field) in HID4 */
- lpcr_mask = (1ul << HID4_RMLS0_SH) |
- (3ul << HID4_RMLS2_SH) | HID4_RMOR;
- lpcr = ((rmls >> 2) << HID4_RMLS0_SH) |
+ lpcr &= ~((1ul << HID4_RMLS0_SH) |
+ (3ul << HID4_RMLS2_SH));
+ lpcr |= ((rmls >> 2) << HID4_RMLS0_SH) |
((rmls & 3) << HID4_RMLS2_SH);
/* RMOR is also in HID4 */
lpcr |= ((ri->base_pfn >> (26 - PAGE_SHIFT)) & 0xffff)
<< HID4_RMOR_SH;
} else {
/* POWER7 */
- lpcr_mask = LPCR_VPM0 | LPCR_VRMA_L | LPCR_RMLS;
- lpcr = rmls << LPCR_RMLS_SH;
+ lpcr &= ~(LPCR_VPM0 | LPCR_VRMA_L);
+ lpcr |= rmls << LPCR_RMLS_SH;
kvm->arch.rmor = ri->base_pfn << PAGE_SHIFT;
}
+ kvm->arch.lpcr = lpcr;
pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n",
ri->base_pfn << PAGE_SHIFT, rma_size, lpcr);
@@ -1989,8 +1860,6 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
}
}
- kvmppc_update_lpcr(kvm, lpcr, lpcr_mask);
-
/* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */
smp_wmb();
kvm->arch.rma_setup_done = 1;
@@ -2006,7 +1875,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
goto out_srcu;
}
-static int kvmppc_core_init_vm_hv(struct kvm *kvm)
+int kvmppc_core_init_vm(struct kvm *kvm)
{
unsigned long lpcr, lpid;
@@ -2024,6 +1893,9 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
*/
cpumask_setall(&kvm->arch.need_tlb_flush);
+ INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
+ INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
+
kvm->arch.rma = NULL;
kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
@@ -2059,162 +1931,61 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
return 0;
}
-static void kvmppc_free_vcores(struct kvm *kvm)
-{
- long int i;
-
- for (i = 0; i < KVM_MAX_VCORES; ++i)
- kfree(kvm->arch.vcores[i]);
- kvm->arch.online_vcores = 0;
-}
-
-static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
+void kvmppc_core_destroy_vm(struct kvm *kvm)
{
uninhibit_secondary_onlining();
- kvmppc_free_vcores(kvm);
if (kvm->arch.rma) {
kvm_release_rma(kvm->arch.rma);
kvm->arch.rma = NULL;
}
+ kvmppc_rtas_tokens_free(kvm);
+
kvmppc_free_hpt(kvm);
+ WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
}
-/* We don't need to emulate any privileged instructions or dcbz */
-static int kvmppc_core_emulate_op_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
- unsigned int inst, int *advance)
+/* These are stubs for now */
+void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
{
- return EMULATE_FAIL;
}
-static int kvmppc_core_emulate_mtspr_hv(struct kvm_vcpu *vcpu, int sprn,
- ulong spr_val)
+/* We don't need to emulate any privileged instructions or dcbz */
+int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int inst, int *advance)
{
return EMULATE_FAIL;
}
-static int kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu *vcpu, int sprn,
- ulong *spr_val)
+int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
{
return EMULATE_FAIL;
}
-static int kvmppc_core_check_processor_compat_hv(void)
+int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
{
- if (!cpu_has_feature(CPU_FTR_HVMODE))
- return -EIO;
- return 0;
+ return EMULATE_FAIL;
}
-static long kvm_arch_vm_ioctl_hv(struct file *filp,
- unsigned int ioctl, unsigned long arg)
+static int kvmppc_book3s_hv_init(void)
{
- struct kvm *kvm __maybe_unused = filp->private_data;
- void __user *argp = (void __user *)arg;
- long r;
-
- switch (ioctl) {
-
- case KVM_ALLOCATE_RMA: {
- struct kvm_allocate_rma rma;
- struct kvm *kvm = filp->private_data;
-
- r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
- if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma)))
- r = -EFAULT;
- break;
- }
-
- case KVM_PPC_ALLOCATE_HTAB: {
- u32 htab_order;
-
- r = -EFAULT;
- if (get_user(htab_order, (u32 __user *)argp))
- break;
- r = kvmppc_alloc_reset_hpt(kvm, &htab_order);
- if (r)
- break;
- r = -EFAULT;
- if (put_user(htab_order, (u32 __user *)argp))
- break;
- r = 0;
- break;
- }
-
- case KVM_PPC_GET_HTAB_FD: {
- struct kvm_get_htab_fd ghf;
-
- r = -EFAULT;
- if (copy_from_user(&ghf, argp, sizeof(ghf)))
- break;
- r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf);
- break;
- }
-
- default:
- r = -ENOTTY;
- }
-
- return r;
-}
+ int r;
-static struct kvmppc_ops kvm_ops_hv = {
- .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv,
- .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv,
- .get_one_reg = kvmppc_get_one_reg_hv,
- .set_one_reg = kvmppc_set_one_reg_hv,
- .vcpu_load = kvmppc_core_vcpu_load_hv,
- .vcpu_put = kvmppc_core_vcpu_put_hv,
- .set_msr = kvmppc_set_msr_hv,
- .vcpu_run = kvmppc_vcpu_run_hv,
- .vcpu_create = kvmppc_core_vcpu_create_hv,
- .vcpu_free = kvmppc_core_vcpu_free_hv,
- .check_requests = kvmppc_core_check_requests_hv,
- .get_dirty_log = kvm_vm_ioctl_get_dirty_log_hv,
- .flush_memslot = kvmppc_core_flush_memslot_hv,
- .prepare_memory_region = kvmppc_core_prepare_memory_region_hv,
- .commit_memory_region = kvmppc_core_commit_memory_region_hv,
- .unmap_hva = kvm_unmap_hva_hv,
- .unmap_hva_range = kvm_unmap_hva_range_hv,
- .age_hva = kvm_age_hva_hv,
- .test_age_hva = kvm_test_age_hva_hv,
- .set_spte_hva = kvm_set_spte_hva_hv,
- .mmu_destroy = kvmppc_mmu_destroy_hv,
- .free_memslot = kvmppc_core_free_memslot_hv,
- .create_memslot = kvmppc_core_create_memslot_hv,
- .init_vm = kvmppc_core_init_vm_hv,
- .destroy_vm = kvmppc_core_destroy_vm_hv,
- .get_smmu_info = kvm_vm_ioctl_get_smmu_info_hv,
- .emulate_op = kvmppc_core_emulate_op_hv,
- .emulate_mtspr = kvmppc_core_emulate_mtspr_hv,
- .emulate_mfspr = kvmppc_core_emulate_mfspr_hv,
- .fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv,
- .arch_vm_ioctl = kvm_arch_vm_ioctl_hv,
-};
+ r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
-static int kvmppc_book3s_init_hv(void)
-{
- int r;
- /*
- * FIXME!! Do we need to check on all cpus ?
- */
- r = kvmppc_core_check_processor_compat_hv();
- if (r < 0)
+ if (r)
return r;
- kvm_ops_hv.owner = THIS_MODULE;
- kvmppc_hv_ops = &kvm_ops_hv;
-
r = kvmppc_mmu_hv_init();
+
return r;
}
-static void kvmppc_book3s_exit_hv(void)
+static void kvmppc_book3s_hv_exit(void)
{
- kvmppc_hv_ops = NULL;
+ kvm_exit();
}
-module_init(kvmppc_book3s_init_hv);
-module_exit(kvmppc_book3s_exit_hv);
-MODULE_LICENSE("GPL");
+module_init(kvmppc_book3s_hv_init);
+module_exit(kvmppc_book3s_hv_exit);
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index 928142c..37f1cc4 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -158,6 +158,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
* Interrupts are enabled again at this point.
*/
+.global kvmppc_handler_highmem
+kvmppc_handler_highmem:
+
/*
* Register usage at this point:
*
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index a03d8f9..c71103b 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -33,6 +33,30 @@
#error Need to fix lppaca and SLB shadow accesses in little endian mode
#endif
+/*****************************************************************************
+ * *
+ * Real Mode handlers that need to be in the linear mapping *
+ * *
+ ****************************************************************************/
+
+ .globl kvmppc_skip_interrupt
+kvmppc_skip_interrupt:
+ mfspr r13,SPRN_SRR0
+ addi r13,r13,4
+ mtspr SPRN_SRR0,r13
+ GET_SCRATCH0(r13)
+ rfid
+ b .
+
+ .globl kvmppc_skip_Hinterrupt
+kvmppc_skip_Hinterrupt:
+ mfspr r13,SPRN_HSRR0
+ addi r13,r13,4
+ mtspr SPRN_HSRR0,r13
+ GET_SCRATCH0(r13)
+ hrfid
+ b .
+
/*
* Call kvmppc_hv_entry in real mode.
* Must be called with interrupts hard-disabled.
@@ -42,11 +66,8 @@
* LR = return address to continue at after eventually re-enabling MMU
*/
_GLOBAL(kvmppc_hv_entry_trampoline)
- mflr r0
- std r0, PPC_LR_STKOFF(r1)
- stdu r1, -112(r1)
mfmsr r10
- LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
+ LOAD_REG_ADDR(r5, kvmppc_hv_entry)
li r0,MSR_RI
andc r0,r10,r0
li r6,MSR_IR | MSR_DR
@@ -56,103 +77,11 @@ _GLOBAL(kvmppc_hv_entry_trampoline)
mtsrr1 r6
RFI
-kvmppc_call_hv_entry:
- bl kvmppc_hv_entry
-
- /* Back from guest - restore host state and return to caller */
-
- /* Restore host DABR and DABRX */
- ld r5,HSTATE_DABR(r13)
- li r6,7
- mtspr SPRN_DABR,r5
- mtspr SPRN_DABRX,r6
-
- /* Restore SPRG3 */
- ld r3,PACA_SPRG_VDSO(r13)
- mtspr SPRN_SPRG_VDSO_WRITE,r3
-
- /*
- * Reload DEC. HDEC interrupts were disabled when
- * we reloaded the host's LPCR value.
- */
- ld r3, HSTATE_DECEXP(r13)
- mftb r4
- subf r4, r4, r3
- mtspr SPRN_DEC, r4
-
- /* Reload the host's PMU registers */
- ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
- lbz r4, LPPACA_PMCINUSE(r3)
- cmpwi r4, 0
- beq 23f /* skip if not */
- lwz r3, HSTATE_PMC(r13)
- lwz r4, HSTATE_PMC + 4(r13)
- lwz r5, HSTATE_PMC + 8(r13)
- lwz r6, HSTATE_PMC + 12(r13)
- lwz r8, HSTATE_PMC + 16(r13)
- lwz r9, HSTATE_PMC + 20(r13)
-BEGIN_FTR_SECTION
- lwz r10, HSTATE_PMC + 24(r13)
- lwz r11, HSTATE_PMC + 28(r13)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
- mtspr SPRN_PMC1, r3
- mtspr SPRN_PMC2, r4
- mtspr SPRN_PMC3, r5
- mtspr SPRN_PMC4, r6
- mtspr SPRN_PMC5, r8
- mtspr SPRN_PMC6, r9
-BEGIN_FTR_SECTION
- mtspr SPRN_PMC7, r10
- mtspr SPRN_PMC8, r11
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
- ld r3, HSTATE_MMCR(r13)
- ld r4, HSTATE_MMCR + 8(r13)
- ld r5, HSTATE_MMCR + 16(r13)
- mtspr SPRN_MMCR1, r4
- mtspr SPRN_MMCRA, r5
- mtspr SPRN_MMCR0, r3
- isync
-23:
-
- /*
- * For external and machine check interrupts, we need
- * to call the Linux handler to process the interrupt.
- * We do that by jumping to absolute address 0x500 for
- * external interrupts, or the machine_check_fwnmi label
- * for machine checks (since firmware might have patched
- * the vector area at 0x200). The [h]rfid at the end of the
- * handler will return to the book3s_hv_interrupts.S code.
- * For other interrupts we do the rfid to get back
- * to the book3s_hv_interrupts.S code here.
- */
- ld r8, 112+PPC_LR_STKOFF(r1)
- addi r1, r1, 112
- ld r7, HSTATE_HOST_MSR(r13)
-
- cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
- cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
-BEGIN_FTR_SECTION
- beq 11f
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
-
- /* RFI into the highmem handler, or branch to interrupt handler */
- mfmsr r6
- li r0, MSR_RI
- andc r6, r6, r0
- mtmsrd r6, 1 /* Clear RI in MSR */
- mtsrr0 r8
- mtsrr1 r7
- beqa 0x500 /* external interrupt (PPC970) */
- beq cr1, 13f /* machine check */
- RFI
-
- /* On POWER7, we have external interrupts set to use HSRR0/1 */
-11: mtspr SPRN_HSRR0, r8
- mtspr SPRN_HSRR1, r7
- ba 0x500
-
-13: b machine_check_fwnmi
-
+/******************************************************************************
+ * *
+ * Entry code *
+ * *
+ *****************************************************************************/
/*
* We come in here when wakened from nap mode on a secondary hw thread.
@@ -208,7 +137,7 @@ kvm_start_guest:
cmpdi r4,0
/* if we have no vcpu to run, go back to sleep */
beq kvm_no_guest
- b 30f
+ b kvmppc_hv_entry
27: /* XXX should handle hypervisor maintenance interrupts etc. here */
b kvm_no_guest
@@ -218,57 +147,6 @@ kvm_start_guest:
stw r8,HSTATE_SAVED_XIRR(r13)
b kvm_no_guest
-30: bl kvmppc_hv_entry
-
- /* Back from the guest, go back to nap */
- /* Clear our vcpu pointer so we don't come back in early */
- li r0, 0
- std r0, HSTATE_KVM_VCPU(r13)
- lwsync
- /* Clear any pending IPI - we're an offline thread */
- ld r5, HSTATE_XICS_PHYS(r13)
- li r7, XICS_XIRR
- lwzcix r3, r5, r7 /* ack any pending interrupt */
- rlwinm. r0, r3, 0, 0xffffff /* any pending? */
- beq 37f
- sync
- li r0, 0xff
- li r6, XICS_MFRR
- stbcix r0, r5, r6 /* clear the IPI */
- stwcix r3, r5, r7 /* EOI it */
-37: sync
-
- /* increment the nap count and then go to nap mode */
- ld r4, HSTATE_KVM_VCORE(r13)
- addi r4, r4, VCORE_NAP_COUNT
- lwsync /* make previous updates visible */
-51: lwarx r3, 0, r4
- addi r3, r3, 1
- stwcx. r3, 0, r4
- bne 51b
-
-kvm_no_guest:
- li r0, KVM_HWTHREAD_IN_NAP
- stb r0, HSTATE_HWTHREAD_STATE(r13)
- li r3, LPCR_PECE0
- mfspr r4, SPRN_LPCR
- rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
- mtspr SPRN_LPCR, r4
- isync
- std r0, HSTATE_SCRATCH0(r13)
- ptesync
- ld r0, HSTATE_SCRATCH0(r13)
-1: cmpd r0, r0
- bne 1b
- nap
- b .
-
-/******************************************************************************
- * *
- * Entry code *
- * *
- *****************************************************************************/
-
.global kvmppc_hv_entry
kvmppc_hv_entry:
@@ -281,8 +159,7 @@ kvmppc_hv_entry:
* all other volatile GPRS = free
*/
mflr r0
- std r0, PPC_LR_STKOFF(r1)
- stdu r1, -112(r1)
+ std r0, HSTATE_VMHANDLER(r13)
/* Set partition DABR */
/* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
@@ -323,12 +200,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
ld r3, VCPU_MMCR(r4)
ld r5, VCPU_MMCR + 8(r4)
ld r6, VCPU_MMCR + 16(r4)
- ld r7, VCPU_SIAR(r4)
- ld r8, VCPU_SDAR(r4)
mtspr SPRN_MMCR1, r5
mtspr SPRN_MMCRA, r6
- mtspr SPRN_SIAR, r7
- mtspr SPRN_SDAR, r8
mtspr SPRN_MMCR0, r3
isync
@@ -381,15 +254,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
/* Save R1 in the PACA */
std r1, HSTATE_HOST_R1(r13)
+ /* Increment yield count if they have a VPA */
+ ld r3, VCPU_VPA(r4)
+ cmpdi r3, 0
+ beq 25f
+ lwz r5, LPPACA_YIELDCOUNT(r3)
+ addi r5, r5, 1
+ stw r5, LPPACA_YIELDCOUNT(r3)
+ li r6, 1
+ stb r6, VCPU_VPA_DIRTY(r4)
+25:
/* Load up DAR and DSISR */
ld r5, VCPU_DAR(r4)
lwz r6, VCPU_DSISR(r4)
mtspr SPRN_DAR, r5
mtspr SPRN_DSISR, r6
- li r6, KVM_GUEST_MODE_HOST_HV
- stb r6, HSTATE_IN_GUEST(r13)
-
BEGIN_FTR_SECTION
/* Restore AMR and UAMOR, set AMOR to all 1s */
ld r5,VCPU_AMR(r4)
@@ -463,28 +343,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
bdnz 28b
ptesync
- /* Add timebase offset onto timebase */
-22: ld r8,VCORE_TB_OFFSET(r5)
- cmpdi r8,0
- beq 37f
- mftb r6 /* current host timebase */
- add r8,r8,r6
- mtspr SPRN_TBU40,r8 /* update upper 40 bits */
- mftb r7 /* check if lower 24 bits overflowed */
- clrldi r6,r6,40
- clrldi r7,r7,40
- cmpld r7,r6
- bge 37f
- addis r8,r8,0x100 /* if so, increment upper 40 bits */
- mtspr SPRN_TBU40,r8
-
- /* Load guest PCR value to select appropriate compat mode */
-37: ld r7, VCORE_PCR(r5)
- cmpdi r7, 0
- beq 38f
- mtspr SPRN_PCR, r7
-38:
- li r0,1
+22: li r0,1
stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
b 10f
@@ -494,22 +353,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
beq 20b
/* Set LPCR and RMOR. */
-10: ld r8,VCORE_LPCR(r5)
+10: ld r8,KVM_LPCR(r9)
mtspr SPRN_LPCR,r8
ld r8,KVM_RMOR(r9)
mtspr SPRN_RMOR,r8
isync
- /* Increment yield count if they have a VPA */
- ld r3, VCPU_VPA(r4)
- cmpdi r3, 0
- beq 25f
- lwz r5, LPPACA_YIELDCOUNT(r3)
- addi r5, r5, 1
- stw r5, LPPACA_YIELDCOUNT(r3)
- li r6, 1
- stb r6, VCPU_VPA_DIRTY(r4)
-25:
/* Check if HDEC expires soon */
mfspr r3,SPRN_HDEC
cmpwi r3,10
@@ -556,8 +405,7 @@ toc_tlbie_lock:
bne 24b
isync
- ld r5,HSTATE_KVM_VCORE(r13)
- ld r7,VCORE_LPCR(r5) /* use vcore->lpcr to store HID4 */
+ ld r7,KVM_LPCR(r9) /* use kvm->arch.lpcr to store HID4 */
li r0,0x18f
rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
or r0,r7,r0
@@ -693,7 +541,7 @@ fast_guest_return:
mtspr SPRN_HSRR1,r11
/* Activate guest mode, so faults get handled by KVM */
- li r9, KVM_GUEST_MODE_GUEST_HV
+ li r9, KVM_GUEST_MODE_GUEST
stb r9, HSTATE_IN_GUEST(r13)
/* Enter guest */
@@ -702,15 +550,13 @@ BEGIN_FTR_SECTION
ld r5, VCPU_CFAR(r4)
mtspr SPRN_CFAR, r5
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
-BEGIN_FTR_SECTION
- ld r0, VCPU_PPR(r4)
-END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
ld r5, VCPU_LR(r4)
lwz r6, VCPU_CR(r4)
mtlr r5
mtcr r6
+ ld r0, VCPU_GPR(R0)(r4)
ld r1, VCPU_GPR(R1)(r4)
ld r2, VCPU_GPR(R2)(r4)
ld r3, VCPU_GPR(R3)(r4)
@@ -724,10 +570,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
ld r12, VCPU_GPR(R12)(r4)
ld r13, VCPU_GPR(R13)(r4)
-BEGIN_FTR_SECTION
- mtspr SPRN_PPR, r0
-END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
- ld r0, VCPU_GPR(R0)(r4)
ld r4, VCPU_GPR(R4)(r4)
hrfid
@@ -742,8 +584,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
/*
* We come here from the first-level interrupt handlers.
*/
- .globl kvmppc_interrupt_hv
-kvmppc_interrupt_hv:
+ .globl kvmppc_interrupt
+kvmppc_interrupt:
/*
* Register contents:
* R12 = interrupt vector
@@ -753,19 +595,6 @@ kvmppc_interrupt_hv:
*/
/* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */
std r9, HSTATE_HOST_R2(r13)
-
- lbz r9, HSTATE_IN_GUEST(r13)
- cmpwi r9, KVM_GUEST_MODE_HOST_HV
- beq kvmppc_bad_host_intr
-#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
- cmpwi r9, KVM_GUEST_MODE_GUEST
- ld r9, HSTATE_HOST_R2(r13)
- beq kvmppc_interrupt_pr
-#endif
- /* We're now back in the host but in guest MMU context */
- li r9, KVM_GUEST_MODE_HOST_HV
- stb r9, HSTATE_IN_GUEST(r13)
-
ld r9, HSTATE_KVM_VCPU(r13)
/* Save registers */
@@ -791,10 +620,6 @@ BEGIN_FTR_SECTION
ld r3, HSTATE_CFAR(r13)
std r3, VCPU_CFAR(r9)
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
-BEGIN_FTR_SECTION
- ld r4, HSTATE_PPR(r13)
- std r4, VCPU_PPR(r9)
-END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
/* Restore R1/R2 so we can handle faults */
ld r1, HSTATE_HOST_R1(r13)
@@ -817,6 +642,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
std r3, VCPU_GPR(R13)(r9)
std r4, VCPU_LR(r9)
+ /* Unset guest mode */
+ li r0, KVM_GUEST_MODE_NONE
+ stb r0, HSTATE_IN_GUEST(r13)
+
stw r12,VCPU_TRAP(r9)
/* Save HEIR (HV emulation assist reg) in last_inst
@@ -867,11 +696,46 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
* set, we know the host wants us out so let's do it now
*/
do_ext_interrupt:
- bl kvmppc_read_intr
- cmpdi r3, 0
- bgt ext_interrupt_to_host
+ lbz r0, HSTATE_HOST_IPI(r13)
+ cmpwi r0, 0
+ bne ext_interrupt_to_host
+
+ /* Now read the interrupt from the ICP */
+ ld r5, HSTATE_XICS_PHYS(r13)
+ li r7, XICS_XIRR
+ cmpdi r5, 0
+ beq- ext_interrupt_to_host
+ lwzcix r3, r5, r7
+ rlwinm. r0, r3, 0, 0xffffff
+ sync
+ beq 3f /* if nothing pending in the ICP */
+
+ /* We found something in the ICP...
+ *
+ * If it's not an IPI, stash it in the PACA and return to
+ * the host, we don't (yet) handle directing real external
+ * interrupts directly to the guest
+ */
+ cmpwi r0, XICS_IPI
+ bne ext_stash_for_host
+
+ /* It's an IPI, clear the MFRR and EOI it */
+ li r0, 0xff
+ li r6, XICS_MFRR
+ stbcix r0, r5, r6 /* clear the IPI */
+ stwcix r3, r5, r7 /* EOI it */
+ sync
+
+ /* We need to re-check host IPI now in case it got set in the
+ * meantime. If it's clear, we bounce the interrupt to the
+ * guest
+ */
+ lbz r0, HSTATE_HOST_IPI(r13)
+ cmpwi r0, 0
+ bne- 1f
/* Allright, looks like an IPI for the guest, we need to set MER */
+3:
/* Check if any CPU is heading out to the host, if so head out too */
ld r5, HSTATE_KVM_VCORE(r13)
lwz r0, VCORE_ENTRY_EXIT(r5)
@@ -900,9 +764,27 @@ do_ext_interrupt:
mtspr SPRN_LPCR, r8
b fast_guest_return
+ /* We raced with the host, we need to resend that IPI, bummer */
+1: li r0, IPI_PRIORITY
+ stbcix r0, r5, r6 /* set the IPI */
+ sync
+ b ext_interrupt_to_host
+
+ext_stash_for_host:
+ /* It's not an IPI and it's for the host, stash it in the PACA
+ * before exit, it will be picked up by the host ICP driver
+ */
+ stw r3, HSTATE_SAVED_XIRR(r13)
ext_interrupt_to_host:
guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
+ /* Save DEC */
+ mfspr r5,SPRN_DEC
+ mftb r6
+ extsw r5,r5
+ add r5,r5,r6
+ std r5,VCPU_DEC_EXPIRES(r9)
+
/* Save more register state */
mfdar r6
mfdsisr r7
@@ -1072,30 +954,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
mtspr SPRN_SDR1,r6 /* switch to partition page table */
mtspr SPRN_LPID,r7
isync
-
- /* Subtract timebase offset from timebase */
- ld r8,VCORE_TB_OFFSET(r5)
- cmpdi r8,0
- beq 17f
- mftb r6 /* current host timebase */
- subf r8,r8,r6
- mtspr SPRN_TBU40,r8 /* update upper 40 bits */
- mftb r7 /* check if lower 24 bits overflowed */
- clrldi r6,r6,40
- clrldi r7,r7,40
- cmpld r7,r6
- bge 17f
- addis r8,r8,0x100 /* if so, increment upper 40 bits */
- mtspr SPRN_TBU40,r8
-
- /* Reset PCR */
-17: ld r0, VCORE_PCR(r5)
- cmpdi r0, 0
- beq 18f
- li r0, 0
- mtspr SPRN_PCR, r0
-18:
- /* Signal secondary CPUs to continue */
+ li r0,0
stb r0,VCORE_IN_GUEST(r5)
lis r8,0x7fff /* MAX_INT@h */
mtspr SPRN_HDEC,r8
@@ -1193,13 +1052,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1: addi r8,r8,16
.endr
- /* Save DEC */
- mfspr r5,SPRN_DEC
- mftb r6
- extsw r5,r5
- add r5,r5,r6
- std r5,VCPU_DEC_EXPIRES(r9)
-
/* Save and reset AMR and UAMOR before turning on the MMU */
BEGIN_FTR_SECTION
mfspr r5,SPRN_AMR
@@ -1210,10 +1062,6 @@ BEGIN_FTR_SECTION
mtspr SPRN_AMR,r6
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
- /* Unset guest mode */
- li r0, KVM_GUEST_MODE_NONE
- stb r0, HSTATE_IN_GUEST(r13)
-
/* Switch DSCR back to host value */
BEGIN_FTR_SECTION
mfspr r8, SPRN_DSCR
@@ -1286,13 +1134,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
b 22f
21: mfspr r5, SPRN_MMCR1
- mfspr r7, SPRN_SIAR
- mfspr r8, SPRN_SDAR
std r4, VCPU_MMCR(r9)
std r5, VCPU_MMCR + 8(r9)
std r6, VCPU_MMCR + 16(r9)
- std r7, VCPU_SIAR(r9)
- std r8, VCPU_SDAR(r9)
mfspr r3, SPRN_PMC1
mfspr r4, SPRN_PMC2
mfspr r5, SPRN_PMC3
@@ -1314,30 +1158,103 @@ BEGIN_FTR_SECTION
stw r11, VCPU_PMC + 28(r9)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
22:
- ld r0, 112+PPC_LR_STKOFF(r1)
- addi r1, r1, 112
- mtlr r0
- blr
-secondary_too_late:
- ld r5,HSTATE_KVM_VCORE(r13)
- HMT_LOW
-13: lbz r3,VCORE_IN_GUEST(r5)
- cmpwi r3,0
- bne 13b
- HMT_MEDIUM
- li r0, KVM_GUEST_MODE_NONE
- stb r0, HSTATE_IN_GUEST(r13)
- ld r11,PACA_SLBSHADOWPTR(r13)
- .rept SLB_NUM_BOLTED
- ld r5,SLBSHADOW_SAVEAREA(r11)
- ld r6,SLBSHADOW_SAVEAREA+8(r11)
- andis. r7,r5,SLB_ESID_V@h
- beq 1f
- slbmte r6,r5
-1: addi r11,r11,16
- .endr
- b 22b
+ /* Secondary threads go off to take a nap on POWER7 */
+BEGIN_FTR_SECTION
+ lwz r0,VCPU_PTID(r9)
+ cmpwi r0,0
+ bne secondary_nap
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+
+ /* Restore host DABR and DABRX */
+ ld r5,HSTATE_DABR(r13)
+ li r6,7
+ mtspr SPRN_DABR,r5
+ mtspr SPRN_DABRX,r6
+
+ /* Restore SPRG3 */
+ ld r3,PACA_SPRG3(r13)
+ mtspr SPRN_SPRG3,r3
+
+ /*
+ * Reload DEC. HDEC interrupts were disabled when
+ * we reloaded the host's LPCR value.
+ */
+ ld r3, HSTATE_DECEXP(r13)
+ mftb r4
+ subf r4, r4, r3
+ mtspr SPRN_DEC, r4
+
+ /* Reload the host's PMU registers */
+ ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
+ lbz r4, LPPACA_PMCINUSE(r3)
+ cmpwi r4, 0
+ beq 23f /* skip if not */
+ lwz r3, HSTATE_PMC(r13)
+ lwz r4, HSTATE_PMC + 4(r13)
+ lwz r5, HSTATE_PMC + 8(r13)
+ lwz r6, HSTATE_PMC + 12(r13)
+ lwz r8, HSTATE_PMC + 16(r13)
+ lwz r9, HSTATE_PMC + 20(r13)
+BEGIN_FTR_SECTION
+ lwz r10, HSTATE_PMC + 24(r13)
+ lwz r11, HSTATE_PMC + 28(r13)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+ mtspr SPRN_PMC1, r3
+ mtspr SPRN_PMC2, r4
+ mtspr SPRN_PMC3, r5
+ mtspr SPRN_PMC4, r6
+ mtspr SPRN_PMC5, r8
+ mtspr SPRN_PMC6, r9
+BEGIN_FTR_SECTION
+ mtspr SPRN_PMC7, r10
+ mtspr SPRN_PMC8, r11
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+ ld r3, HSTATE_MMCR(r13)
+ ld r4, HSTATE_MMCR + 8(r13)
+ ld r5, HSTATE_MMCR + 16(r13)
+ mtspr SPRN_MMCR1, r4
+ mtspr SPRN_MMCRA, r5
+ mtspr SPRN_MMCR0, r3
+ isync
+23:
+ /*
+ * For external and machine check interrupts, we need
+ * to call the Linux handler to process the interrupt.
+ * We do that by jumping to absolute address 0x500 for
+ * external interrupts, or the machine_check_fwnmi label
+ * for machine checks (since firmware might have patched
+ * the vector area at 0x200). The [h]rfid at the end of the
+ * handler will return to the book3s_hv_interrupts.S code.
+ * For other interrupts we do the rfid to get back
+ * to the book3s_hv_interrupts.S code here.
+ */
+ ld r8, HSTATE_VMHANDLER(r13)
+ ld r7, HSTATE_HOST_MSR(r13)
+
+ cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
+ cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
+BEGIN_FTR_SECTION
+ beq 11f
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+
+ /* RFI into the highmem handler, or branch to interrupt handler */
+ mfmsr r6
+ li r0, MSR_RI
+ andc r6, r6, r0
+ mtmsrd r6, 1 /* Clear RI in MSR */
+ mtsrr0 r8
+ mtsrr1 r7
+ beqa 0x500 /* external interrupt (PPC970) */
+ beq cr1, 13f /* machine check */
+ RFI
+
+ /* On POWER7, we have external interrupts set to use HSRR0/1 */
+11: mtspr SPRN_HSRR0, r8
+ mtspr SPRN_HSRR1, r7
+ ba 0x500
+
+13: b machine_check_fwnmi
/*
* Check whether an HDSI is an HPTE not found fault or something else.
@@ -1416,7 +1333,7 @@ fast_interrupt_c_return:
stw r8, VCPU_LAST_INST(r9)
/* Unset guest mode. */
- li r0, KVM_GUEST_MODE_HOST_HV
+ li r0, KVM_GUEST_MODE_NONE
stb r0, HSTATE_IN_GUEST(r13)
b guest_exit_cont
@@ -1784,70 +1701,67 @@ machine_check_realmode:
rotldi r11, r11, 63
b fast_interrupt_c_return
-/*
- * Determine what sort of external interrupt is pending (if any).
- * Returns:
- * 0 if no interrupt is pending
- * 1 if an interrupt is pending that needs to be handled by the host
- * -1 if there was a guest wakeup IPI (which has now been cleared)
- */
-kvmppc_read_intr:
- /* see if a host IPI is pending */
- li r3, 1
- lbz r0, HSTATE_HOST_IPI(r13)
- cmpwi r0, 0
- bne 1f
-
- /* Now read the interrupt from the ICP */
- ld r6, HSTATE_XICS_PHYS(r13)
- li r7, XICS_XIRR
- cmpdi r6, 0
- beq- 1f
- lwzcix r0, r6, r7
- rlwinm. r3, r0, 0, 0xffffff
- sync
- beq 1f /* if nothing pending in the ICP */
+secondary_too_late:
+ ld r5,HSTATE_KVM_VCORE(r13)
+ HMT_LOW
+13: lbz r3,VCORE_IN_GUEST(r5)
+ cmpwi r3,0
+ bne 13b
+ HMT_MEDIUM
+ ld r11,PACA_SLBSHADOWPTR(r13)
- /* We found something in the ICP...
- *
- * If it's not an IPI, stash it in the PACA and return to
- * the host, we don't (yet) handle directing real external
- * interrupts directly to the guest
- */
- cmpwi r3, XICS_IPI /* if there is, is it an IPI? */
- li r3, 1
- bne 42f
+ .rept SLB_NUM_BOLTED
+ ld r5,SLBSHADOW_SAVEAREA(r11)
+ ld r6,SLBSHADOW_SAVEAREA+8(r11)
+ andis. r7,r5,SLB_ESID_V@h
+ beq 1f
+ slbmte r6,r5
+1: addi r11,r11,16
+ .endr
- /* It's an IPI, clear the MFRR and EOI it */
- li r3, 0xff
- li r8, XICS_MFRR
- stbcix r3, r6, r8 /* clear the IPI */
- stwcix r0, r6, r7 /* EOI it */
+secondary_nap:
+ /* Clear our vcpu pointer so we don't come back in early */
+ li r0, 0
+ std r0, HSTATE_KVM_VCPU(r13)
+ lwsync
+ /* Clear any pending IPI - assume we're a secondary thread */
+ ld r5, HSTATE_XICS_PHYS(r13)
+ li r7, XICS_XIRR
+ lwzcix r3, r5, r7 /* ack any pending interrupt */
+ rlwinm. r0, r3, 0, 0xffffff /* any pending? */
+ beq 37f
sync
+ li r0, 0xff
+ li r6, XICS_MFRR
+ stbcix r0, r5, r6 /* clear the IPI */
+ stwcix r3, r5, r7 /* EOI it */
+37: sync
- /* We need to re-check host IPI now in case it got set in the
- * meantime. If it's clear, we bounce the interrupt to the
- * guest
- */
- lbz r0, HSTATE_HOST_IPI(r13)
- cmpwi r0, 0
- bne- 43f
-
- /* OK, it's an IPI for us */
- li r3, -1
-1: blr
+ /* increment the nap count and then go to nap mode */
+ ld r4, HSTATE_KVM_VCORE(r13)
+ addi r4, r4, VCORE_NAP_COUNT
+ lwsync /* make previous updates visible */
+51: lwarx r3, 0, r4
+ addi r3, r3, 1
+ stwcx. r3, 0, r4
+ bne 51b
-42: /* It's not an IPI and it's for the host, stash it in the PACA
- * before exit, it will be picked up by the host ICP driver
- */
- stw r0, HSTATE_SAVED_XIRR(r13)
- b 1b
+kvm_no_guest:
+ li r0, KVM_HWTHREAD_IN_NAP
+ stb r0, HSTATE_HWTHREAD_STATE(r13)
-43: /* We raced with the host, we need to resend that IPI, bummer */
- li r0, IPI_PRIORITY
- stbcix r0, r6, r8 /* set the IPI */
- sync
- b 1b
+ li r3, LPCR_PECE0
+ mfspr r4, SPRN_LPCR
+ rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
+ mtspr SPRN_LPCR, r4
+ isync
+ std r0, HSTATE_SCRATCH0(r13)
+ ptesync
+ ld r0, HSTATE_SCRATCH0(r13)
+1: cmpd r0, r0
+ bne 1b
+ nap
+ b .
/*
* Save away FP, VMX and VSX registers.
@@ -1965,11 +1879,3 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
lwz r7,VCPU_VRSAVE(r4)
mtspr SPRN_VRSAVE,r7
blr
-
-/*
- * We come here if we get any exception or interrupt while we are
- * executing host real mode code while in guest MMU context.
- * For now just spin, but we should do something better.
- */
-kvmppc_bad_host_intr:
- b .
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index 2d88672..01f10d4 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -26,12 +26,8 @@
#if defined(CONFIG_PPC_BOOK3S_64)
#define FUNC(name) GLUE(.,name)
-#define GET_SHADOW_VCPU(reg) addi reg, r13, PACA_SVCPU
-
#elif defined(CONFIG_PPC_BOOK3S_32)
#define FUNC(name) name
-#define GET_SHADOW_VCPU(reg) lwz reg, (THREAD + THREAD_KVM_SVCPU)(r2)
-
#endif /* CONFIG_PPC_BOOK3S_XX */
#define VCPU_LOAD_NVGPRS(vcpu) \
@@ -91,14 +87,8 @@ kvm_start_entry:
VCPU_LOAD_NVGPRS(r4)
kvm_start_lightweight:
- /* Copy registers into shadow vcpu so we can access them in real mode */
- GET_SHADOW_VCPU(r3)
- bl FUNC(kvmppc_copy_to_svcpu)
- nop
- REST_GPR(4, r1)
#ifdef CONFIG_PPC_BOOK3S_64
- /* Get the dcbz32 flag */
PPC_LL r3, VCPU_HFLAGS(r4)
rldicl r3, r3, 0, 63 /* r3 &= 1 */
stb r3, HSTATE_RESTORE_HID5(r13)
@@ -121,6 +111,9 @@ kvm_start_lightweight:
*
*/
+.global kvmppc_handler_highmem
+kvmppc_handler_highmem:
+
/*
* Register usage at this point:
*
@@ -132,31 +125,18 @@ kvm_start_lightweight:
*
*/
- /* Transfer reg values from shadow vcpu back to vcpu struct */
- /* On 64-bit, interrupts are still off at this point */
- PPC_LL r3, GPR4(r1) /* vcpu pointer */
- GET_SHADOW_VCPU(r4)
- bl FUNC(kvmppc_copy_from_svcpu)
- nop
+ /* R7 = vcpu */
+ PPC_LL r7, GPR4(r1)
#ifdef CONFIG_PPC_BOOK3S_64
- /* Re-enable interrupts */
- ld r3, HSTATE_HOST_MSR(r13)
- ori r3, r3, MSR_EE
- MTMSR_EERI(r3)
-
/*
* Reload kernel SPRG3 value.
* No need to save guest value as usermode can't modify SPRG3.
*/
ld r3, PACA_SPRG_VDSO(r13)
mtspr SPRN_SPRG_VDSO_WRITE, r3
-
#endif /* CONFIG_PPC_BOOK3S_64 */
- /* R7 = vcpu */
- PPC_LL r7, GPR4(r1)
-
PPC_STL r14, VCPU_GPR(R14)(r7)
PPC_STL r15, VCPU_GPR(R15)(r7)
PPC_STL r16, VCPU_GPR(R16)(r7)
@@ -181,7 +161,7 @@ kvm_start_lightweight:
/* Restore r3 (kvm_run) and r4 (vcpu) */
REST_2GPRS(3, r1)
- bl FUNC(kvmppc_handle_exit_pr)
+ bl FUNC(kvmppc_handle_exit)
/* If RESUME_GUEST, get back in the loop */
cmpwi r3, RESUME_GUEST
diff --git a/arch/powerpc/kvm/book3s_mmu_hpte.c b/arch/powerpc/kvm/book3s_mmu_hpte.c
index 5a1ab12..da8b13c 100644
--- a/arch/powerpc/kvm/book3s_mmu_hpte.c
+++ b/arch/powerpc/kvm/book3s_mmu_hpte.c
@@ -28,7 +28,7 @@
#include <asm/mmu_context.h>
#include <asm/hw_irq.h>
-#include "trace_pr.h"
+#include "trace.h"
#define PTE_SIZE 12
@@ -56,14 +56,6 @@ static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage)
HPTEG_HASH_BITS_VPTE_LONG);
}
-#ifdef CONFIG_PPC_BOOK3S_64
-static inline u64 kvmppc_mmu_hash_vpte_64k(u64 vpage)
-{
- return hash_64((vpage & 0xffffffff0ULL) >> 4,
- HPTEG_HASH_BITS_VPTE_64K);
-}
-#endif
-
void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
{
u64 index;
@@ -91,15 +83,6 @@ void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
hlist_add_head_rcu(&pte->list_vpte_long,
&vcpu3s->hpte_hash_vpte_long[index]);
-#ifdef CONFIG_PPC_BOOK3S_64
- /* Add to vPTE_64k list */
- index = kvmppc_mmu_hash_vpte_64k(pte->pte.vpage);
- hlist_add_head_rcu(&pte->list_vpte_64k,
- &vcpu3s->hpte_hash_vpte_64k[index]);
-#endif
-
- vcpu3s->hpte_cache_count++;
-
spin_unlock(&vcpu3s->mmu_lock);
}
@@ -130,13 +113,10 @@ static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
hlist_del_init_rcu(&pte->list_pte_long);
hlist_del_init_rcu(&pte->list_vpte);
hlist_del_init_rcu(&pte->list_vpte_long);
-#ifdef CONFIG_PPC_BOOK3S_64
- hlist_del_init_rcu(&pte->list_vpte_64k);
-#endif
- vcpu3s->hpte_cache_count--;
spin_unlock(&vcpu3s->mmu_lock);
+ vcpu3s->hpte_cache_count--;
call_rcu(&pte->rcu_head, free_pte_rcu);
}
@@ -239,29 +219,6 @@ static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
rcu_read_unlock();
}
-#ifdef CONFIG_PPC_BOOK3S_64
-/* Flush with mask 0xffffffff0 */
-static void kvmppc_mmu_pte_vflush_64k(struct kvm_vcpu *vcpu, u64 guest_vp)
-{
- struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
- struct hlist_head *list;
- struct hpte_cache *pte;
- u64 vp_mask = 0xffffffff0ULL;
-
- list = &vcpu3s->hpte_hash_vpte_64k[
- kvmppc_mmu_hash_vpte_64k(guest_vp)];
-
- rcu_read_lock();
-
- /* Check the list for matching entries and invalidate */
- hlist_for_each_entry_rcu(pte, list, list_vpte_64k)
- if ((pte->pte.vpage & vp_mask) == guest_vp)
- invalidate_pte(vcpu, pte);
-
- rcu_read_unlock();
-}
-#endif
-
/* Flush with mask 0xffffff000 */
static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
{
@@ -292,11 +249,6 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
case 0xfffffffffULL:
kvmppc_mmu_pte_vflush_short(vcpu, guest_vp);
break;
-#ifdef CONFIG_PPC_BOOK3S_64
- case 0xffffffff0ULL:
- kvmppc_mmu_pte_vflush_64k(vcpu, guest_vp);
- break;
-#endif
case 0xffffff000ULL:
kvmppc_mmu_pte_vflush_long(vcpu, guest_vp);
break;
@@ -333,19 +285,15 @@ struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hpte_cache *pte;
+ pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL);
+ vcpu3s->hpte_cache_count++;
+
if (vcpu3s->hpte_cache_count == HPTEG_CACHE_NUM)
kvmppc_mmu_pte_flush_all(vcpu);
- pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL);
-
return pte;
}
-void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte)
-{
- kmem_cache_free(hpte_cache, pte);
-}
-
void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu)
{
kvmppc_mmu_pte_flush(vcpu, 0, 0);
@@ -372,10 +320,6 @@ int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu)
ARRAY_SIZE(vcpu3s->hpte_hash_vpte));
kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_long,
ARRAY_SIZE(vcpu3s->hpte_hash_vpte_long));
-#ifdef CONFIG_PPC_BOOK3S_64
- kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_64k,
- ARRAY_SIZE(vcpu3s->hpte_hash_vpte_64k));
-#endif
spin_lock_init(&vcpu3s->mmu_lock);
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 93cf4c1..6846ebc 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -40,12 +40,8 @@
#include <linux/sched.h>
#include <linux/vmalloc.h>
#include <linux/highmem.h>
-#include <linux/module.h>
-#include "book3s.h"
-
-#define CREATE_TRACE_POINTS
-#include "trace_pr.h"
+#include "trace.h"
/* #define EXIT_DEBUG */
/* #define DEBUG_EXT */
@@ -60,25 +56,29 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
#define HW_PAGE_SIZE PAGE_SIZE
#endif
-static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
+void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
#ifdef CONFIG_PPC_BOOK3S_64
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
+ memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu,
+ sizeof(get_paca()->shadow_vcpu));
svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
svcpu_put(svcpu);
#endif
vcpu->cpu = smp_processor_id();
#ifdef CONFIG_PPC_BOOK3S_32
- current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu;
+ current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu;
#endif
}
-static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
+void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_PPC_BOOK3S_64
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
+ memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
+ sizeof(get_paca()->shadow_vcpu));
to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
svcpu_put(svcpu);
#endif
@@ -87,61 +87,7 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
vcpu->cpu = -1;
}
-/* Copy data needed by real-mode code from vcpu to shadow vcpu */
-void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
- struct kvm_vcpu *vcpu)
-{
- svcpu->gpr[0] = vcpu->arch.gpr[0];
- svcpu->gpr[1] = vcpu->arch.gpr[1];
- svcpu->gpr[2] = vcpu->arch.gpr[2];
- svcpu->gpr[3] = vcpu->arch.gpr[3];
- svcpu->gpr[4] = vcpu->arch.gpr[4];
- svcpu->gpr[5] = vcpu->arch.gpr[5];
- svcpu->gpr[6] = vcpu->arch.gpr[6];
- svcpu->gpr[7] = vcpu->arch.gpr[7];
- svcpu->gpr[8] = vcpu->arch.gpr[8];
- svcpu->gpr[9] = vcpu->arch.gpr[9];
- svcpu->gpr[10] = vcpu->arch.gpr[10];
- svcpu->gpr[11] = vcpu->arch.gpr[11];
- svcpu->gpr[12] = vcpu->arch.gpr[12];
- svcpu->gpr[13] = vcpu->arch.gpr[13];
- svcpu->cr = vcpu->arch.cr;
- svcpu->xer = vcpu->arch.xer;
- svcpu->ctr = vcpu->arch.ctr;
- svcpu->lr = vcpu->arch.lr;
- svcpu->pc = vcpu->arch.pc;
-}
-
-/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
-void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
- struct kvmppc_book3s_shadow_vcpu *svcpu)
-{
- vcpu->arch.gpr[0] = svcpu->gpr[0];
- vcpu->arch.gpr[1] = svcpu->gpr[1];
- vcpu->arch.gpr[2] = svcpu->gpr[2];
- vcpu->arch.gpr[3] = svcpu->gpr[3];
- vcpu->arch.gpr[4] = svcpu->gpr[4];
- vcpu->arch.gpr[5] = svcpu->gpr[5];
- vcpu->arch.gpr[6] = svcpu->gpr[6];
- vcpu->arch.gpr[7] = svcpu->gpr[7];
- vcpu->arch.gpr[8] = svcpu->gpr[8];
- vcpu->arch.gpr[9] = svcpu->gpr[9];
- vcpu->arch.gpr[10] = svcpu->gpr[10];
- vcpu->arch.gpr[11] = svcpu->gpr[11];
- vcpu->arch.gpr[12] = svcpu->gpr[12];
- vcpu->arch.gpr[13] = svcpu->gpr[13];
- vcpu->arch.cr = svcpu->cr;
- vcpu->arch.xer = svcpu->xer;
- vcpu->arch.ctr = svcpu->ctr;
- vcpu->arch.lr = svcpu->lr;
- vcpu->arch.pc = svcpu->pc;
- vcpu->arch.shadow_srr1 = svcpu->shadow_srr1;
- vcpu->arch.fault_dar = svcpu->fault_dar;
- vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
- vcpu->arch.last_inst = svcpu->last_inst;
-}
-
-static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
+int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
{
int r = 1; /* Indicate we want to get back into the guest */
@@ -154,69 +100,44 @@ static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
}
/************* MMU Notifiers *************/
-static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start,
- unsigned long end)
-{
- long i;
- struct kvm_vcpu *vcpu;
- struct kvm_memslots *slots;
- struct kvm_memory_slot *memslot;
-
- slots = kvm_memslots(kvm);
- kvm_for_each_memslot(memslot, slots) {
- unsigned long hva_start, hva_end;
- gfn_t gfn, gfn_end;
-
- hva_start = max(start, memslot->userspace_addr);
- hva_end = min(end, memslot->userspace_addr +
- (memslot->npages << PAGE_SHIFT));
- if (hva_start >= hva_end)
- continue;
- /*
- * {gfn(page) | page intersects with [hva_start, hva_end)} =
- * {gfn, gfn+1, ..., gfn_end-1}.
- */
- gfn = hva_to_gfn_memslot(hva_start, memslot);
- gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
- kvm_for_each_vcpu(i, vcpu, kvm)
- kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT,
- gfn_end << PAGE_SHIFT);
- }
-}
-static int kvm_unmap_hva_pr(struct kvm *kvm, unsigned long hva)
+int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
{
trace_kvm_unmap_hva(hva);
- do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
+ /*
+ * Flush all shadow tlb entries everywhere. This is slow, but
+ * we are 100% sure that we catch the to be unmapped page
+ */
+ kvm_flush_remote_tlbs(kvm);
return 0;
}
-static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start,
- unsigned long end)
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
{
- do_kvm_unmap_hva(kvm, start, end);
+ /* kvm_unmap_hva flushes everything anyways */
+ kvm_unmap_hva(kvm, start);
return 0;
}
-static int kvm_age_hva_pr(struct kvm *kvm, unsigned long hva)
+int kvm_age_hva(struct kvm *kvm, unsigned long hva)
{
/* XXX could be more clever ;) */
return 0;
}
-static int kvm_test_age_hva_pr(struct kvm *kvm, unsigned long hva)
+int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
{
/* XXX could be more clever ;) */
return 0;
}
-static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte)
+void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
{
/* The page will get remapped properly on its next fault */
- do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
+ kvm_unmap_hva(kvm, hva);
}
/*****************************************/
@@ -238,7 +159,7 @@ static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
vcpu->arch.shadow_msr = smsr;
}
-static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
+void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
{
ulong old_msr = vcpu->arch.shared->msr;
@@ -298,7 +219,7 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
}
-void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
+void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
{
u32 host_pvr;
@@ -335,23 +256,6 @@ void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
- /*
- * If they're asking for POWER6 or later, set the flag
- * indicating that we can do multiple large page sizes
- * and 1TB segments.
- * Also set the flag that indicates that tlbie has the large
- * page bit in the RB operand instead of the instruction.
- */
- switch (PVR_VER(pvr)) {
- case PVR_POWER6:
- case PVR_POWER7:
- case PVR_POWER7p:
- case PVR_POWER8:
- vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE |
- BOOK3S_HFLAG_NEW_TLBIE;
- break;
- }
-
#ifdef CONFIG_PPC_BOOK3S_32
/* 32 bit Book3S always has 32 byte dcbz */
vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
@@ -430,7 +334,6 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
ulong eaddr, int vec)
{
bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
- bool iswrite = false;
int r = RESUME_GUEST;
int relocated;
int page_found = 0;
@@ -441,12 +344,10 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
u64 vsid;
relocated = data ? dr : ir;
- if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE))
- iswrite = true;
/* Resolve real address if translation turned on */
if (relocated) {
- page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite);
+ page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data);
} else {
pte.may_execute = true;
pte.may_read = true;
@@ -454,7 +355,6 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
pte.raddr = eaddr & KVM_PAM;
pte.eaddr = eaddr;
pte.vpage = eaddr >> 12;
- pte.page_size = MMU_PAGE_64K;
}
switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
@@ -488,18 +388,22 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (page_found == -ENOENT) {
/* Page not found in guest PTE entries */
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
- vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr;
+ vcpu->arch.shared->dsisr = svcpu->fault_dsisr;
vcpu->arch.shared->msr |=
- vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL;
+ (svcpu->shadow_srr1 & 0x00000000f8000000ULL);
+ svcpu_put(svcpu);
kvmppc_book3s_queue_irqprio(vcpu, vec);
} else if (page_found == -EPERM) {
/* Storage protection */
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
- vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE;
+ vcpu->arch.shared->dsisr = svcpu->fault_dsisr & ~DSISR_NOHPTE;
vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
vcpu->arch.shared->msr |=
- vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL;
+ svcpu->shadow_srr1 & 0x00000000f8000000ULL;
+ svcpu_put(svcpu);
kvmppc_book3s_queue_irqprio(vcpu, vec);
} else if (page_found == -EINVAL) {
/* Page not found in guest SLB */
@@ -507,20 +411,12 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
} else if (!is_mmio &&
kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
- if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
- /*
- * There is already a host HPTE there, presumably
- * a read-only one for a page the guest thinks
- * is writable, so get rid of it first.
- */
- kvmppc_mmu_unmap_page(vcpu, &pte);
- }
/* The guest's PTE is not mapped yet. Map on the host */
- kvmppc_mmu_map_page(vcpu, &pte, iswrite);
+ kvmppc_mmu_map_page(vcpu, &pte);
if (data)
vcpu->stat.sp_storage++;
else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
- (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
+ (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
kvmppc_patch_dcbz(vcpu, &pte);
} else {
/* MMIO */
@@ -548,7 +444,7 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
#ifdef CONFIG_VSX
u64 *vcpu_vsx = vcpu->arch.vsr;
#endif
- u64 *thread_fpr = &t->fp_state.fpr[0][0];
+ u64 *thread_fpr = (u64*)t->fpr;
int i;
/*
@@ -570,14 +466,14 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
/*
* Note that on CPUs with VSX, giveup_fpu stores
* both the traditional FP registers and the added VSX
- * registers into thread.fp_state.fpr[].
+ * registers into thread.fpr[].
*/
if (current->thread.regs->msr & MSR_FP)
giveup_fpu(current);
for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
- vcpu->arch.fpscr = t->fp_state.fpscr;
+ vcpu->arch.fpscr = t->fpscr.val;
#ifdef CONFIG_VSX
if (cpu_has_feature(CPU_FTR_VSX))
@@ -590,8 +486,8 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
if (msr & MSR_VEC) {
if (current->thread.regs->msr & MSR_VEC)
giveup_altivec(current);
- memcpy(vcpu->arch.vr, t->vr_state.vr, sizeof(vcpu->arch.vr));
- vcpu->arch.vscr = t->vr_state.vscr;
+ memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr));
+ vcpu->arch.vscr = t->vscr;
}
#endif
@@ -643,7 +539,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
#ifdef CONFIG_VSX
u64 *vcpu_vsx = vcpu->arch.vsr;
#endif
- u64 *thread_fpr = &t->fp_state.fpr[0][0];
+ u64 *thread_fpr = (u64*)t->fpr;
int i;
/* When we have paired singles, we emulate in software */
@@ -688,15 +584,15 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
#endif
- t->fp_state.fpscr = vcpu->arch.fpscr;
+ t->fpscr.val = vcpu->arch.fpscr;
t->fpexc_mode = 0;
kvmppc_load_up_fpu();
}
if (msr & MSR_VEC) {
#ifdef CONFIG_ALTIVEC
- memcpy(t->vr_state.vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
- t->vr_state.vscr = vcpu->arch.vscr;
+ memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
+ t->vscr = vcpu->arch.vscr;
t->vrsave = -1;
kvmppc_load_up_altivec();
#endif
@@ -723,15 +619,13 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
if (lost_ext & MSR_FP)
kvmppc_load_up_fpu();
-#ifdef CONFIG_ALTIVEC
if (lost_ext & MSR_VEC)
kvmppc_load_up_altivec();
-#endif
current->thread.regs->msr |= lost_ext;
}
-int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
- unsigned int exit_nr)
+int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int exit_nr)
{
int r = RESUME_HOST;
int s;
@@ -749,32 +643,25 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
switch (exit_nr) {
case BOOK3S_INTERRUPT_INST_STORAGE:
{
- ulong shadow_srr1 = vcpu->arch.shadow_srr1;
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
+ ulong shadow_srr1 = svcpu->shadow_srr1;
vcpu->stat.pf_instruc++;
#ifdef CONFIG_PPC_BOOK3S_32
/* We set segments as unused segments when invalidating them. So
* treat the respective fault as segment fault. */
- {
- struct kvmppc_book3s_shadow_vcpu *svcpu;
- u32 sr;
-
- svcpu = svcpu_get(vcpu);
- sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT];
+ if (svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT] == SR_INVALID) {
+ kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
+ r = RESUME_GUEST;
svcpu_put(svcpu);
- if (sr == SR_INVALID) {
- kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
- r = RESUME_GUEST;
- break;
- }
+ break;
}
#endif
+ svcpu_put(svcpu);
/* only care about PTEG not found errors, but leave NX alone */
if (shadow_srr1 & 0x40000000) {
- int idx = srcu_read_lock(&vcpu->kvm->srcu);
r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
- srcu_read_unlock(&vcpu->kvm->srcu, idx);
vcpu->stat.sp_instruc++;
} else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
(!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
@@ -795,36 +682,25 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
case BOOK3S_INTERRUPT_DATA_STORAGE:
{
ulong dar = kvmppc_get_fault_dar(vcpu);
- u32 fault_dsisr = vcpu->arch.fault_dsisr;
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
+ u32 fault_dsisr = svcpu->fault_dsisr;
vcpu->stat.pf_storage++;
#ifdef CONFIG_PPC_BOOK3S_32
/* We set segments as unused segments when invalidating them. So
* treat the respective fault as segment fault. */
- {
- struct kvmppc_book3s_shadow_vcpu *svcpu;
- u32 sr;
-
- svcpu = svcpu_get(vcpu);
- sr = svcpu->sr[dar >> SID_SHIFT];
+ if ((svcpu->sr[dar >> SID_SHIFT]) == SR_INVALID) {
+ kvmppc_mmu_map_segment(vcpu, dar);
+ r = RESUME_GUEST;
svcpu_put(svcpu);
- if (sr == SR_INVALID) {
- kvmppc_mmu_map_segment(vcpu, dar);
- r = RESUME_GUEST;
- break;
- }
+ break;
}
#endif
+ svcpu_put(svcpu);
- /*
- * We need to handle missing shadow PTEs, and
- * protection faults due to us mapping a page read-only
- * when the guest thinks it is writable.
- */
- if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) {
- int idx = srcu_read_lock(&vcpu->kvm->srcu);
+ /* The only case we need to handle is missing shadow PTEs */
+ if (fault_dsisr & DSISR_NOHPTE) {
r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
- srcu_read_unlock(&vcpu->kvm->srcu, idx);
} else {
vcpu->arch.shared->dar = dar;
vcpu->arch.shared->dsisr = fault_dsisr;
@@ -867,10 +743,13 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
{
enum emulation_result er;
+ struct kvmppc_book3s_shadow_vcpu *svcpu;
ulong flags;
program_interrupt:
- flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
+ svcpu = svcpu_get(vcpu);
+ flags = svcpu->shadow_srr1 & 0x1f0000ull;
+ svcpu_put(svcpu);
if (vcpu->arch.shared->msr & MSR_PR) {
#ifdef EXIT_DEBUG
@@ -919,7 +798,7 @@ program_interrupt:
ulong cmd = kvmppc_get_gpr(vcpu, 3);
int i;
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_KVM_BOOK3S_64_PR
if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
r = RESUME_GUEST;
break;
@@ -1002,7 +881,9 @@ program_interrupt:
break;
default:
{
- ulong shadow_srr1 = vcpu->arch.shadow_srr1;
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
+ ulong shadow_srr1 = svcpu->shadow_srr1;
+ svcpu_put(svcpu);
/* Ugh - bork here! What did we get? */
printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
@@ -1037,8 +918,8 @@ program_interrupt:
return r;
}
-static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu,
- struct kvm_sregs *sregs)
+int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
{
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
int i;
@@ -1064,13 +945,13 @@ static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu,
return 0;
}
-static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu,
- struct kvm_sregs *sregs)
+int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
{
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
int i;
- kvmppc_set_pvr_pr(vcpu, sregs->pvr);
+ kvmppc_set_pvr(vcpu, sregs->pvr);
vcpu3s->sdr1 = sregs->u.s.sdr1;
if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
@@ -1100,8 +981,7 @@ static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu,
return 0;
}
-static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
- union kvmppc_one_reg *val)
+int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
{
int r = 0;
@@ -1130,8 +1010,7 @@ static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
return r;
}
-static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
- union kvmppc_one_reg *val)
+int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
{
int r = 0;
@@ -1161,30 +1040,28 @@ static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
return r;
}
-static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
- unsigned int id)
+int kvmppc_core_check_processor_compat(void)
+{
+ return 0;
+}
+
+struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
{
struct kvmppc_vcpu_book3s *vcpu_book3s;
struct kvm_vcpu *vcpu;
int err = -ENOMEM;
unsigned long p;
- vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
- if (!vcpu)
- goto out;
-
vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
if (!vcpu_book3s)
- goto free_vcpu;
- vcpu->arch.book3s = vcpu_book3s;
+ goto out;
-#ifdef CONFIG_KVM_BOOK3S_32
- vcpu->arch.shadow_vcpu =
- kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL);
- if (!vcpu->arch.shadow_vcpu)
- goto free_vcpu3s;
-#endif
+ vcpu_book3s->shadow_vcpu =
+ kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL);
+ if (!vcpu_book3s->shadow_vcpu)
+ goto free_vcpu;
+ vcpu = &vcpu_book3s->vcpu;
err = kvm_vcpu_init(vcpu, kvm, id);
if (err)
goto free_shadow_vcpu;
@@ -1197,19 +1074,13 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096);
#ifdef CONFIG_PPC_BOOK3S_64
- /*
- * Default to the same as the host if we're on sufficiently
- * recent machine that we have 1TB segments;
- * otherwise default to PPC970FX.
- */
+ /* default to book3s_64 (970fx) */
vcpu->arch.pvr = 0x3C0301;
- if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
- vcpu->arch.pvr = mfspr(SPRN_PVR);
#else
/* default to book3s_32 (750) */
vcpu->arch.pvr = 0x84202;
#endif
- kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr);
+ kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
vcpu->arch.slb_nr = 64;
vcpu->arch.shadow_msr = MSR_USER64;
@@ -1223,37 +1094,32 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
uninit_vcpu:
kvm_vcpu_uninit(vcpu);
free_shadow_vcpu:
-#ifdef CONFIG_KVM_BOOK3S_32
- kfree(vcpu->arch.shadow_vcpu);
-free_vcpu3s:
-#endif
- vfree(vcpu_book3s);
+ kfree(vcpu_book3s->shadow_vcpu);
free_vcpu:
- kmem_cache_free(kvm_vcpu_cache, vcpu);
+ vfree(vcpu_book3s);
out:
return ERR_PTR(err);
}
-static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
+void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
kvm_vcpu_uninit(vcpu);
-#ifdef CONFIG_KVM_BOOK3S_32
- kfree(vcpu->arch.shadow_vcpu);
-#endif
+ kfree(vcpu_book3s->shadow_vcpu);
vfree(vcpu_book3s);
- kmem_cache_free(kvm_vcpu_cache, vcpu);
}
-static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
+int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
{
int ret;
- struct thread_fp_state fp;
+ double fpr[32][TS_FPRWIDTH];
+ unsigned int fpscr;
int fpexc_mode;
#ifdef CONFIG_ALTIVEC
- struct thread_vr_state vr;
+ vector128 vr[32];
+ vector128 vscr;
unsigned long uninitialized_var(vrsave);
int used_vr;
#endif
@@ -1282,7 +1148,8 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
/* Save FPU state in stack */
if (current->thread.regs->msr & MSR_FP)
giveup_fpu(current);
- fp = current->thread.fp_state;
+ memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
+ fpscr = current->thread.fpscr.val;
fpexc_mode = current->thread.fpexc_mode;
#ifdef CONFIG_ALTIVEC
@@ -1291,7 +1158,8 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
if (used_vr) {
if (current->thread.regs->msr & MSR_VEC)
giveup_altivec(current);
- vr = current->thread.vr_state;
+ memcpy(vr, current->thread.vr, sizeof(current->thread.vr));
+ vscr = current->thread.vscr;
vrsave = current->thread.vrsave;
}
#endif
@@ -1323,13 +1191,15 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
current->thread.regs->msr = ext_msr;
/* Restore FPU/VSX state from stack */
- current->thread.fp_state = fp;
+ memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
+ current->thread.fpscr.val = fpscr;
current->thread.fpexc_mode = fpexc_mode;
#ifdef CONFIG_ALTIVEC
/* Restore Altivec state from stack */
if (used_vr && current->thread.used_vr) {
- current->thread.vr_state = vr;
+ memcpy(current->thread.vr, vr, sizeof(current->thread.vr));
+ current->thread.vscr = vscr;
current->thread.vrsave = vrsave;
}
current->thread.used_vr = used_vr;
@@ -1347,8 +1217,8 @@ out:
/*
* Get (and clear) the dirty memory log for a memory slot.
*/
-static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
- struct kvm_dirty_log *log)
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
+ struct kvm_dirty_log *log)
{
struct kvm_memory_slot *memslot;
struct kvm_vcpu *vcpu;
@@ -1383,100 +1253,67 @@ out:
return r;
}
-static void kvmppc_core_flush_memslot_pr(struct kvm *kvm,
- struct kvm_memory_slot *memslot)
+#ifdef CONFIG_PPC64
+int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
{
- return;
-}
+ info->flags = KVM_PPC_1T_SEGMENTS;
+
+ /* SLB is always 64 entries */
+ info->slb_size = 64;
+
+ /* Standard 4k base page size segment */
+ info->sps[0].page_shift = 12;
+ info->sps[0].slb_enc = 0;
+ info->sps[0].enc[0].page_shift = 12;
+ info->sps[0].enc[0].pte_enc = 0;
+
+ /* Standard 16M large page size segment */
+ info->sps[1].page_shift = 24;
+ info->sps[1].slb_enc = SLB_VSID_L;
+ info->sps[1].enc[0].page_shift = 24;
+ info->sps[1].enc[0].pte_enc = 0;
-static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- struct kvm_userspace_memory_region *mem)
-{
return 0;
}
+#endif /* CONFIG_PPC64 */
-static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem,
- const struct kvm_memory_slot *old)
+void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
+ struct kvm_memory_slot *dont)
{
- return;
}
-static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *free,
- struct kvm_memory_slot *dont)
+int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
+ unsigned long npages)
{
- return;
+ return 0;
}
-static int kvmppc_core_create_memslot_pr(struct kvm_memory_slot *slot,
- unsigned long npages)
+int kvmppc_core_prepare_memory_region(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
+ struct kvm_userspace_memory_region *mem)
{
return 0;
}
-
-#ifdef CONFIG_PPC64
-static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
- struct kvm_ppc_smmu_info *info)
+void kvmppc_core_commit_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem,
+ const struct kvm_memory_slot *old)
{
- long int i;
- struct kvm_vcpu *vcpu;
-
- info->flags = 0;
-
- /* SLB is always 64 entries */
- info->slb_size = 64;
-
- /* Standard 4k base page size segment */
- info->sps[0].page_shift = 12;
- info->sps[0].slb_enc = 0;
- info->sps[0].enc[0].page_shift = 12;
- info->sps[0].enc[0].pte_enc = 0;
-
- /*
- * 64k large page size.
- * We only want to put this in if the CPUs we're emulating
- * support it, but unfortunately we don't have a vcpu easily
- * to hand here to test. Just pick the first vcpu, and if
- * that doesn't exist yet, report the minimum capability,
- * i.e., no 64k pages.
- * 1T segment support goes along with 64k pages.
- */
- i = 1;
- vcpu = kvm_get_vcpu(kvm, 0);
- if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
- info->flags = KVM_PPC_1T_SEGMENTS;
- info->sps[i].page_shift = 16;
- info->sps[i].slb_enc = SLB_VSID_L | SLB_VSID_LP_01;
- info->sps[i].enc[0].page_shift = 16;
- info->sps[i].enc[0].pte_enc = 1;
- ++i;
- }
-
- /* Standard 16M large page size segment */
- info->sps[i].page_shift = 24;
- info->sps[i].slb_enc = SLB_VSID_L;
- info->sps[i].enc[0].page_shift = 24;
- info->sps[i].enc[0].pte_enc = 0;
-
- return 0;
}
-#else
-static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
- struct kvm_ppc_smmu_info *info)
+
+void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
{
- /* We should not get called */
- BUG();
}
-#endif /* CONFIG_PPC64 */
static unsigned int kvm_global_user_count = 0;
static DEFINE_SPINLOCK(kvm_global_user_count_lock);
-static int kvmppc_core_init_vm_pr(struct kvm *kvm)
+int kvmppc_core_init_vm(struct kvm *kvm)
{
- mutex_init(&kvm->arch.hpt_mutex);
+#ifdef CONFIG_PPC64
+ INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
+ INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
+#endif
if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
spin_lock(&kvm_global_user_count_lock);
@@ -1487,7 +1324,7 @@ static int kvmppc_core_init_vm_pr(struct kvm *kvm)
return 0;
}
-static void kvmppc_core_destroy_vm_pr(struct kvm *kvm)
+void kvmppc_core_destroy_vm(struct kvm *kvm)
{
#ifdef CONFIG_PPC64
WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
@@ -1502,81 +1339,26 @@ static void kvmppc_core_destroy_vm_pr(struct kvm *kvm)
}
}
-static int kvmppc_core_check_processor_compat_pr(void)
-{
- /* we are always compatible */
- return 0;
-}
-
-static long kvm_arch_vm_ioctl_pr(struct file *filp,
- unsigned int ioctl, unsigned long arg)
-{
- return -ENOTTY;
-}
-
-static struct kvmppc_ops kvm_ops_pr = {
- .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr,
- .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr,
- .get_one_reg = kvmppc_get_one_reg_pr,
- .set_one_reg = kvmppc_set_one_reg_pr,
- .vcpu_load = kvmppc_core_vcpu_load_pr,
- .vcpu_put = kvmppc_core_vcpu_put_pr,
- .set_msr = kvmppc_set_msr_pr,
- .vcpu_run = kvmppc_vcpu_run_pr,
- .vcpu_create = kvmppc_core_vcpu_create_pr,
- .vcpu_free = kvmppc_core_vcpu_free_pr,
- .check_requests = kvmppc_core_check_requests_pr,
- .get_dirty_log = kvm_vm_ioctl_get_dirty_log_pr,
- .flush_memslot = kvmppc_core_flush_memslot_pr,
- .prepare_memory_region = kvmppc_core_prepare_memory_region_pr,
- .commit_memory_region = kvmppc_core_commit_memory_region_pr,
- .unmap_hva = kvm_unmap_hva_pr,
- .unmap_hva_range = kvm_unmap_hva_range_pr,
- .age_hva = kvm_age_hva_pr,
- .test_age_hva = kvm_test_age_hva_pr,
- .set_spte_hva = kvm_set_spte_hva_pr,
- .mmu_destroy = kvmppc_mmu_destroy_pr,
- .free_memslot = kvmppc_core_free_memslot_pr,
- .create_memslot = kvmppc_core_create_memslot_pr,
- .init_vm = kvmppc_core_init_vm_pr,
- .destroy_vm = kvmppc_core_destroy_vm_pr,
- .get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr,
- .emulate_op = kvmppc_core_emulate_op_pr,
- .emulate_mtspr = kvmppc_core_emulate_mtspr_pr,
- .emulate_mfspr = kvmppc_core_emulate_mfspr_pr,
- .fast_vcpu_kick = kvm_vcpu_kick,
- .arch_vm_ioctl = kvm_arch_vm_ioctl_pr,
-};
-
-
-int kvmppc_book3s_init_pr(void)
+static int kvmppc_book3s_init(void)
{
int r;
- r = kvmppc_core_check_processor_compat_pr();
- if (r < 0)
- return r;
+ r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0,
+ THIS_MODULE);
- kvm_ops_pr.owner = THIS_MODULE;
- kvmppc_pr_ops = &kvm_ops_pr;
+ if (r)
+ return r;
r = kvmppc_mmu_hpte_sysinit();
+
return r;
}
-void kvmppc_book3s_exit_pr(void)
+static void kvmppc_book3s_exit(void)
{
- kvmppc_pr_ops = NULL;
kvmppc_mmu_hpte_sysexit();
+ kvm_exit();
}
-/*
- * We only support separate modules for book3s 64
- */
-#ifdef CONFIG_PPC_BOOK3S_64
-
-module_init(kvmppc_book3s_init_pr);
-module_exit(kvmppc_book3s_exit_pr);
-
-MODULE_LICENSE("GPL");
-#endif
+module_init(kvmppc_book3s_init);
+module_exit(kvmppc_book3s_exit);
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index 5efa97b..da0e0bc 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -21,8 +21,6 @@
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
-#define HPTE_SIZE 16 /* bytes per HPT entry */
-
static unsigned long get_pteg_addr(struct kvm_vcpu *vcpu, long pte_index)
{
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
@@ -42,41 +40,32 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
long pte_index = kvmppc_get_gpr(vcpu, 5);
unsigned long pteg[2 * 8];
unsigned long pteg_addr, i, *hpte;
- long int ret;
- i = pte_index & 7;
pte_index &= ~7UL;
pteg_addr = get_pteg_addr(vcpu, pte_index);
- mutex_lock(&vcpu->kvm->arch.hpt_mutex);
copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg));
hpte = pteg;
- ret = H_PTEG_FULL;
if (likely((flags & H_EXACT) == 0)) {
+ pte_index &= ~7UL;
for (i = 0; ; ++i) {
if (i == 8)
- goto done;
+ return H_PTEG_FULL;
if ((*hpte & HPTE_V_VALID) == 0)
break;
hpte += 2;
}
} else {
+ i = kvmppc_get_gpr(vcpu, 5) & 7UL;
hpte += i * 2;
- if (*hpte & HPTE_V_VALID)
- goto done;
}
hpte[0] = kvmppc_get_gpr(vcpu, 6);
hpte[1] = kvmppc_get_gpr(vcpu, 7);
- pteg_addr += i * HPTE_SIZE;
- copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE);
+ copy_to_user((void __user *)pteg_addr, pteg, sizeof(pteg));
+ kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
kvmppc_set_gpr(vcpu, 4, pte_index | i);
- ret = H_SUCCESS;
-
- done:
- mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
- kvmppc_set_gpr(vcpu, 3, ret);
return EMULATE_DONE;
}
@@ -88,31 +77,26 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
unsigned long v = 0, pteg, rb;
unsigned long pte[2];
- long int ret;
pteg = get_pteg_addr(vcpu, pte_index);
- mutex_lock(&vcpu->kvm->arch.hpt_mutex);
copy_from_user(pte, (void __user *)pteg, sizeof(pte));
- ret = H_NOT_FOUND;
if ((pte[0] & HPTE_V_VALID) == 0 ||
((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn) ||
- ((flags & H_ANDCOND) && (pte[0] & avpn) != 0))
- goto done;
+ ((flags & H_ANDCOND) && (pte[0] & avpn) != 0)) {
+ kvmppc_set_gpr(vcpu, 3, H_NOT_FOUND);
+ return EMULATE_DONE;
+ }
copy_to_user((void __user *)pteg, &v, sizeof(v));
rb = compute_tlbie_rb(pte[0], pte[1], pte_index);
vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
- ret = H_SUCCESS;
+ kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
kvmppc_set_gpr(vcpu, 4, pte[0]);
kvmppc_set_gpr(vcpu, 5, pte[1]);
- done:
- mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
- kvmppc_set_gpr(vcpu, 3, ret);
-
return EMULATE_DONE;
}
@@ -140,7 +124,6 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
int paramnr = 4;
int ret = H_SUCCESS;
- mutex_lock(&vcpu->kvm->arch.hpt_mutex);
for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
unsigned long tsh = kvmppc_get_gpr(vcpu, paramnr+(2*i));
unsigned long tsl = kvmppc_get_gpr(vcpu, paramnr+(2*i)+1);
@@ -189,7 +172,6 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
}
kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh);
}
- mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
kvmppc_set_gpr(vcpu, 3, ret);
return EMULATE_DONE;
@@ -202,16 +184,15 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
unsigned long rb, pteg, r, v;
unsigned long pte[2];
- long int ret;
pteg = get_pteg_addr(vcpu, pte_index);
- mutex_lock(&vcpu->kvm->arch.hpt_mutex);
copy_from_user(pte, (void __user *)pteg, sizeof(pte));
- ret = H_NOT_FOUND;
if ((pte[0] & HPTE_V_VALID) == 0 ||
- ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn))
- goto done;
+ ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn)) {
+ kvmppc_set_gpr(vcpu, 3, H_NOT_FOUND);
+ return EMULATE_DONE;
+ }
v = pte[0];
r = pte[1];
@@ -226,11 +207,8 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
rb = compute_tlbie_rb(v, r, pte_index);
vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
copy_to_user((void __user *)pteg, pte, sizeof(pte));
- ret = H_SUCCESS;
- done:
- mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
- kvmppc_set_gpr(vcpu, 3, ret);
+ kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
return EMULATE_DONE;
}
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index a38c4c9..8f7633e 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -38,6 +38,32 @@
#define FUNC(name) GLUE(.,name)
+ .globl kvmppc_skip_interrupt
+kvmppc_skip_interrupt:
+ /*
+ * Here all GPRs are unchanged from when the interrupt happened
+ * except for r13, which is saved in SPRG_SCRATCH0.
+ */
+ mfspr r13, SPRN_SRR0
+ addi r13, r13, 4
+ mtspr SPRN_SRR0, r13
+ GET_SCRATCH0(r13)
+ rfid
+ b .
+
+ .globl kvmppc_skip_Hinterrupt
+kvmppc_skip_Hinterrupt:
+ /*
+ * Here all GPRs are unchanged from when the interrupt happened
+ * except for r13, which is saved in SPRG_SCRATCH0.
+ */
+ mfspr r13, SPRN_HSRR0
+ addi r13, r13, 4
+ mtspr SPRN_HSRR0, r13
+ GET_SCRATCH0(r13)
+ hrfid
+ b .
+
#elif defined(CONFIG_PPC_BOOK3S_32)
#define FUNC(name) name
@@ -153,15 +179,11 @@ _GLOBAL(kvmppc_entry_trampoline)
li r6, MSR_IR | MSR_DR
andc r6, r5, r6 /* Clear DR and IR in MSR value */
-#ifdef CONFIG_PPC_BOOK3S_32
/*
* Set EE in HOST_MSR so that it's enabled when we get into our
- * C exit handler function. On 64-bit we delay enabling
- * interrupts until we have finished transferring stuff
- * to or from the PACA.
+ * C exit handler function
*/
ori r5, r5, MSR_EE
-#endif
mtsrr0 r7
mtsrr1 r6
RFI
diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c
index cf95cde..3219ba8 100644
--- a/arch/powerpc/kvm/book3s_rtas.c
+++ b/arch/powerpc/kvm/book3s_rtas.c
@@ -260,7 +260,6 @@ fail:
*/
return rc;
}
-EXPORT_SYMBOL_GPL(kvmppc_rtas_hcall);
void kvmppc_rtas_tokens_free(struct kvm *kvm)
{
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index bc50c97..1abe478 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -161,8 +161,8 @@ kvmppc_handler_trampoline_enter_end:
.global kvmppc_handler_trampoline_exit
kvmppc_handler_trampoline_exit:
-.global kvmppc_interrupt_pr
-kvmppc_interrupt_pr:
+.global kvmppc_interrupt
+kvmppc_interrupt:
/* Register usage at this point:
*
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index 02a17dc..a3a5cb8 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -818,7 +818,7 @@ int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
}
/* Check for real mode returning too hard */
- if (xics->real_mode && is_kvmppc_hv_enabled(vcpu->kvm))
+ if (xics->real_mode)
return kvmppc_xics_rm_complete(vcpu, req);
switch (req) {
@@ -840,7 +840,6 @@ int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
return rc;
}
-EXPORT_SYMBOL_GPL(kvmppc_xics_hcall);
/* -- Initialisation code etc. -- */
@@ -1251,13 +1250,13 @@ static int kvmppc_xics_create(struct kvm_device *dev, u32 type)
xics_debugfs_init(xics);
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+#ifdef CONFIG_KVM_BOOK3S_64_HV
if (cpu_has_feature(CPU_FTR_ARCH_206)) {
/* Enable real mode support */
xics->real_mode = ENABLE_REALMODE;
xics->real_mode_dbg = DEBUG_REALMODE;
}
-#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
+#endif /* CONFIG_KVM_BOOK3S_64_HV */
return 0;
}
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 7e62965..6ec3920 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -40,9 +40,7 @@
#include "timing.h"
#include "booke.h"
-
-#define CREATE_TRACE_POINTS
-#include "trace_booke.h"
+#include "trace.h"
unsigned long kvmppc_booke_handlers;
@@ -683,8 +681,9 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
int ret, s;
struct thread_struct thread;
#ifdef CONFIG_PPC_FPU
- struct thread_fp_state fp;
+ unsigned int fpscr;
int fpexc_mode;
+ u64 fpr[32];
#endif
#ifdef CONFIG_ALTIVEC
@@ -707,13 +706,13 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
#ifdef CONFIG_PPC_FPU
/* Save userspace FPU state in stack */
enable_kernel_fp();
- fp = current->thread.fp_state;
+ memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
+ fpscr = current->thread.fpscr.val;
fpexc_mode = current->thread.fpexc_mode;
/* Restore guest FPU state to thread */
- memcpy(current->thread.fp_state.fpr, vcpu->arch.fpr,
- sizeof(vcpu->arch.fpr));
- current->thread.fp_state.fpscr = vcpu->arch.fpscr;
+ memcpy(current->thread.fpr, vcpu->arch.fpr, sizeof(vcpu->arch.fpr));
+ current->thread.fpscr.val = vcpu->arch.fpscr;
/*
* Since we can't trap on MSR_FP in GS-mode, we consider the guest
@@ -769,12 +768,12 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
vcpu->fpu_active = 0;
/* Save guest FPU state from thread */
- memcpy(vcpu->arch.fpr, current->thread.fp_state.fpr,
- sizeof(vcpu->arch.fpr));
- vcpu->arch.fpscr = current->thread.fp_state.fpscr;
+ memcpy(vcpu->arch.fpr, current->thread.fpr, sizeof(vcpu->arch.fpr));
+ vcpu->arch.fpscr = current->thread.fpscr.val;
/* Restore userspace FPU state from stack */
- current->thread.fp_state = fp;
+ memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
+ current->thread.fpscr.val = fpscr;
current->thread.fpexc_mode = fpexc_mode;
#endif
@@ -1465,7 +1464,7 @@ static int set_sregs_arch206(struct kvm_vcpu *vcpu,
return 0;
}
-int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
{
sregs->u.e.features |= KVM_SREGS_E_IVOR;
@@ -1485,7 +1484,6 @@ int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
- return 0;
}
int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
@@ -1520,7 +1518,8 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
get_sregs_base(vcpu, sregs);
get_sregs_arch206(vcpu, sregs);
- return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
+ kvmppc_core_get_sregs(vcpu, sregs);
+ return 0;
}
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
@@ -1539,7 +1538,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
if (ret < 0)
return ret;
- return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
+ return kvmppc_core_set_sregs(vcpu, sregs);
}
int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
@@ -1592,11 +1591,8 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
case KVM_REG_PPC_DEBUG_INST:
val = get_reg_val(reg->id, KVMPPC_INST_EHPRIV_DEBUG);
break;
- case KVM_REG_PPC_VRSAVE:
- val = get_reg_val(reg->id, vcpu->arch.vrsave);
- break;
default:
- r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, reg->id, &val);
+ r = kvmppc_get_one_reg(vcpu, reg->id, &val);
break;
}
@@ -1675,11 +1671,8 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
kvmppc_set_tcr(vcpu, tcr);
break;
}
- case KVM_REG_PPC_VRSAVE:
- vcpu->arch.vrsave = set_reg_val(reg->id, val);
- break;
default:
- r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, reg->id, &val);
+ r = kvmppc_set_one_reg(vcpu, reg->id, &val);
break;
}
@@ -1710,12 +1703,12 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
return -ENOTSUPP;
}
-void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
+void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
struct kvm_memory_slot *dont)
{
}
-int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
+int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
unsigned long npages)
{
return 0;
@@ -1957,41 +1950,6 @@ void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
kvmppc_clear_dbsr();
}
-void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
-{
- vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
-}
-
-int kvmppc_core_init_vm(struct kvm *kvm)
-{
- return kvm->arch.kvm_ops->init_vm(kvm);
-}
-
-struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
-{
- return kvm->arch.kvm_ops->vcpu_create(kvm, id);
-}
-
-void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
-{
- vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
-}
-
-void kvmppc_core_destroy_vm(struct kvm *kvm)
-{
- kvm->arch.kvm_ops->destroy_vm(kvm);
-}
-
-void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
-{
- vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
-}
-
-void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
-{
- vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
-}
-
int __init kvmppc_booke_init(void)
{
#ifndef CONFIG_KVM_BOOKE_HV
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index 4a7398f..8b0861c 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -101,30 +101,6 @@ enum int_class {
void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type);
-extern void kvmppc_mmu_destroy_44x(struct kvm_vcpu *vcpu);
-extern int kvmppc_core_emulate_op_44x(struct kvm_run *run, struct kvm_vcpu *vcpu,
- unsigned int inst, int *advance);
-extern int kvmppc_core_emulate_mtspr_44x(struct kvm_vcpu *vcpu, int sprn,
- ulong spr_val);
-extern int kvmppc_core_emulate_mfspr_44x(struct kvm_vcpu *vcpu, int sprn,
- ulong *spr_val);
-extern void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu);
-extern int kvmppc_core_emulate_op_e500(struct kvm_run *run,
- struct kvm_vcpu *vcpu,
- unsigned int inst, int *advance);
-extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn,
- ulong spr_val);
-extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn,
- ulong *spr_val);
-extern void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu);
-extern int kvmppc_core_emulate_op_e500(struct kvm_run *run,
- struct kvm_vcpu *vcpu,
- unsigned int inst, int *advance);
-extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn,
- ulong spr_val);
-extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn,
- ulong *spr_val);
-
/*
* Load up guest vcpu FP state if it's needed.
* It also set the MSR_FP in thread so that host know
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index 7dd11dd..47262bf 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -305,7 +305,7 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
{
}
-static void kvmppc_core_vcpu_load_e500(struct kvm_vcpu *vcpu, int cpu)
+void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
kvmppc_booke_vcpu_load(vcpu, cpu);
@@ -313,7 +313,7 @@ static void kvmppc_core_vcpu_load_e500(struct kvm_vcpu *vcpu, int cpu)
kvmppc_e500_recalc_shadow_pid(to_e500(vcpu));
}
-static void kvmppc_core_vcpu_put_e500(struct kvm_vcpu *vcpu)
+void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_SPE
if (vcpu->arch.shadow_msr & MSR_SPE)
@@ -367,8 +367,7 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
return 0;
}
-static int kvmppc_core_get_sregs_e500(struct kvm_vcpu *vcpu,
- struct kvm_sregs *sregs)
+void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -391,11 +390,9 @@ static int kvmppc_core_get_sregs_e500(struct kvm_vcpu *vcpu,
kvmppc_get_sregs_ivor(vcpu, sregs);
kvmppc_get_sregs_e500_tlb(vcpu, sregs);
- return 0;
}
-static int kvmppc_core_set_sregs_e500(struct kvm_vcpu *vcpu,
- struct kvm_sregs *sregs)
+int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
int ret;
@@ -430,15 +427,15 @@ static int kvmppc_core_set_sregs_e500(struct kvm_vcpu *vcpu,
return kvmppc_set_sregs_ivor(vcpu, sregs);
}
-static int kvmppc_get_one_reg_e500(struct kvm_vcpu *vcpu, u64 id,
- union kvmppc_one_reg *val)
+int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
+ union kvmppc_one_reg *val)
{
int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val);
return r;
}
-static int kvmppc_set_one_reg_e500(struct kvm_vcpu *vcpu, u64 id,
- union kvmppc_one_reg *val)
+int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
+ union kvmppc_one_reg *val)
{
int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val);
return r;
@@ -448,8 +445,8 @@ void kvmppc_prepare_for_emulation(struct kvm_vcpu *vcpu, unsigned int *exit_nr)
{
}
-static struct kvm_vcpu *kvmppc_core_vcpu_create_e500(struct kvm *kvm,
- unsigned int id)
+struct kvm_vcpu *kvmppc_core_vcpu_create_e500(struct kvm *kvm,
+ unsigned int id)
{
struct kvmppc_vcpu_e500 *vcpu_e500;
struct kvm_vcpu *vcpu;
@@ -491,7 +488,7 @@ out:
return ERR_PTR(err);
}
-static void kvmppc_core_vcpu_free_e500(struct kvm_vcpu *vcpu)
+void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -502,32 +499,15 @@ static void kvmppc_core_vcpu_free_e500(struct kvm_vcpu *vcpu)
kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
}
-static int kvmppc_core_init_vm_e500(struct kvm *kvm)
+int kvmppc_core_init_vm(struct kvm *kvm)
{
return 0;
}
-static void kvmppc_core_destroy_vm_e500(struct kvm *kvm)
+void kvmppc_core_destroy_vm(struct kvm *kvm)
{
}
-static struct kvmppc_ops kvm_ops_e500 = {
- .get_sregs = kvmppc_core_get_sregs_e500,
- .set_sregs = kvmppc_core_set_sregs_e500,
- .get_one_reg = kvmppc_get_one_reg_e500,
- .set_one_reg = kvmppc_set_one_reg_e500,
- .vcpu_load = kvmppc_core_vcpu_load_e500,
- .vcpu_put = kvmppc_core_vcpu_put_e500,
- .vcpu_create = kvmppc_core_vcpu_create_e500,
- .vcpu_free = kvmppc_core_vcpu_free_e500,
- .mmu_destroy = kvmppc_mmu_destroy_e500,
- .init_vm = kvmppc_core_init_vm_e500,
- .destroy_vm = kvmppc_core_destroy_vm_e500,
- .emulate_op = kvmppc_core_emulate_op_e500,
- .emulate_mtspr = kvmppc_core_emulate_mtspr_e500,
- .emulate_mfspr = kvmppc_core_emulate_mfspr_e500,
-};
-
static int __init kvmppc_e500_init(void)
{
int r, i;
@@ -539,11 +519,11 @@ static int __init kvmppc_e500_init(void)
r = kvmppc_core_check_processor_compat();
if (r)
- goto err_out;
+ return r;
r = kvmppc_booke_init();
if (r)
- goto err_out;
+ return r;
/* copy extra E500 exception handlers */
ivor[0] = mfspr(SPRN_IVOR32);
@@ -561,19 +541,11 @@ static int __init kvmppc_e500_init(void)
flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers +
ivor[max_ivor] + handler_len);
- r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
- if (r)
- goto err_out;
- kvm_ops_e500.owner = THIS_MODULE;
- kvmppc_pr_ops = &kvm_ops_e500;
-
-err_out:
- return r;
+ return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
}
static void __exit kvmppc_e500_exit(void)
{
- kvmppc_pr_ops = NULL;
kvmppc_booke_exit();
}
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index 42763b6..fc04187 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -104,8 +104,8 @@ static int kvmppc_e500_emul_ehpriv(struct kvm_run *run, struct kvm_vcpu *vcpu,
return emulated;
}
-int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu,
- unsigned int inst, int *advance)
+int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int inst, int *advance)
{
int emulated = EMULATE_DONE;
int ra = get_ra(inst);
@@ -181,7 +181,7 @@ int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu,
return emulated;
}
-int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
+int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
int emulated = EMULATE_DONE;
@@ -277,7 +277,7 @@ int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_va
return emulated;
}
-int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
+int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
int emulated = EMULATE_DONE;
diff --git a/arch/powerpc/kvm/e500_mmu.c b/arch/powerpc/kvm/e500_mmu.c
index ebca6b8..6d6f153 100644
--- a/arch/powerpc/kvm/e500_mmu.c
+++ b/arch/powerpc/kvm/e500_mmu.c
@@ -32,7 +32,7 @@
#include <asm/kvm_ppc.h>
#include "e500.h"
-#include "trace_booke.h"
+#include "trace.h"
#include "timing.h"
#include "e500_mmu_host.h"
@@ -536,7 +536,7 @@ gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
}
-void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu)
+void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
{
}
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 358d0c3..e9386c7 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -32,11 +32,10 @@
#include <asm/kvm_ppc.h>
#include "e500.h"
+#include "trace.h"
#include "timing.h"
#include "e500_mmu_host.h"
-#include "trace_booke.h"
-
#define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM];
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index 17840a7..3597b6f 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -111,7 +111,7 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu_on_cpu);
-static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu)
+void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
int lpid_idx = 0;
@@ -161,7 +161,7 @@ static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu)
kvmppc_load_guest_altivec(vcpu);
}
-static void kvmppc_core_vcpu_put_e500mc(struct kvm_vcpu *vcpu)
+void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
{
vcpu->arch.eplc = mfspr(SPRN_EPLC);
vcpu->arch.epsc = mfspr(SPRN_EPSC);
@@ -218,8 +218,7 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
return 0;
}
-static int kvmppc_core_get_sregs_e500mc(struct kvm_vcpu *vcpu,
- struct kvm_sregs *sregs)
+void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -239,11 +238,10 @@ static int kvmppc_core_get_sregs_e500mc(struct kvm_vcpu *vcpu,
sregs->u.e.ivor_high[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL];
sregs->u.e.ivor_high[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT];
- return kvmppc_get_sregs_ivor(vcpu, sregs);
+ kvmppc_get_sregs_ivor(vcpu, sregs);
}
-static int kvmppc_core_set_sregs_e500mc(struct kvm_vcpu *vcpu,
- struct kvm_sregs *sregs)
+int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
int ret;
@@ -276,15 +274,15 @@ static int kvmppc_core_set_sregs_e500mc(struct kvm_vcpu *vcpu,
return kvmppc_set_sregs_ivor(vcpu, sregs);
}
-static int kvmppc_get_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id,
- union kvmppc_one_reg *val)
+int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
+ union kvmppc_one_reg *val)
{
int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val);
return r;
}
-static int kvmppc_set_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id,
- union kvmppc_one_reg *val)
+int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
+ union kvmppc_one_reg *val)
{
int r = kvmppc_set_one_reg_e500_tlb(vcpu, id, val);
return r;
@@ -356,8 +354,8 @@ void kvmppc_prepare_for_emulation(struct kvm_vcpu *vcpu, unsigned int *exit_nr)
kunmap_atomic((u32 *)eaddr);
}
-static struct kvm_vcpu *kvmppc_core_vcpu_create_e500mc(struct kvm *kvm,
- unsigned int id)
+struct kvm_vcpu *kvmppc_core_vcpu_create_e500mc(struct kvm *kvm,
+ unsigned int id)
{
struct kvmppc_vcpu_e500 *vcpu_e500;
struct kvm_vcpu *vcpu;
@@ -398,7 +396,7 @@ out:
return ERR_PTR(err);
}
-static void kvmppc_core_vcpu_free_e500mc(struct kvm_vcpu *vcpu)
+void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -408,7 +406,7 @@ static void kvmppc_core_vcpu_free_e500mc(struct kvm_vcpu *vcpu)
kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
}
-static int kvmppc_core_init_vm_e500mc(struct kvm *kvm)
+int kvmppc_core_init_vm(struct kvm *kvm)
{
int i, lpid;
@@ -427,7 +425,7 @@ static int kvmppc_core_init_vm_e500mc(struct kvm *kvm)
return 0;
}
-static void kvmppc_core_destroy_vm_e500mc(struct kvm *kvm)
+void kvmppc_core_destroy_vm(struct kvm *kvm)
{
int i;
@@ -436,47 +434,22 @@ static void kvmppc_core_destroy_vm_e500mc(struct kvm *kvm)
}
}
-static struct kvmppc_ops kvm_ops_e500mc = {
- .get_sregs = kvmppc_core_get_sregs_e500mc,
- .set_sregs = kvmppc_core_set_sregs_e500mc,
- .get_one_reg = kvmppc_get_one_reg_e500mc,
- .set_one_reg = kvmppc_set_one_reg_e500mc,
- .vcpu_load = kvmppc_core_vcpu_load_e500mc,
- .vcpu_put = kvmppc_core_vcpu_put_e500mc,
- .vcpu_create = kvmppc_core_vcpu_create_e500mc,
- .vcpu_free = kvmppc_core_vcpu_free_e500mc,
- .mmu_destroy = kvmppc_mmu_destroy_e500,
- .init_vm = kvmppc_core_init_vm_e500mc,
- .destroy_vm = kvmppc_core_destroy_vm_e500mc,
- .emulate_op = kvmppc_core_emulate_op_e500,
- .emulate_mtspr = kvmppc_core_emulate_mtspr_e500,
- .emulate_mfspr = kvmppc_core_emulate_mfspr_e500,
-};
-
static int __init kvmppc_e500mc_init(void)
{
int r;
r = kvmppc_booke_init();
if (r)
- goto err_out;
+ return r;
kvmppc_init_lpid(64);
kvmppc_claim_lpid(0); /* host */
- r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
- if (r)
- goto err_out;
- kvm_ops_e500mc.owner = THIS_MODULE;
- kvmppc_pr_ops = &kvm_ops_e500mc;
-
-err_out:
- return r;
+ return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
}
static void __exit kvmppc_e500mc_exit(void)
{
- kvmppc_pr_ops = NULL;
kvmppc_booke_exit();
}
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 2f9a087..751cd45 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -130,8 +130,8 @@ static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
case SPRN_PIR: break;
default:
- emulated = vcpu->kvm->arch.kvm_ops->emulate_mtspr(vcpu, sprn,
- spr_val);
+ emulated = kvmppc_core_emulate_mtspr(vcpu, sprn,
+ spr_val);
if (emulated == EMULATE_FAIL)
printk(KERN_INFO "mtspr: unknown spr "
"0x%x\n", sprn);
@@ -191,8 +191,8 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
spr_val = kvmppc_get_dec(vcpu, get_tb());
break;
default:
- emulated = vcpu->kvm->arch.kvm_ops->emulate_mfspr(vcpu, sprn,
- &spr_val);
+ emulated = kvmppc_core_emulate_mfspr(vcpu, sprn,
+ &spr_val);
if (unlikely(emulated == EMULATE_FAIL)) {
printk(KERN_INFO "mfspr: unknown spr "
"0x%x\n", sprn);
@@ -464,8 +464,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
}
if (emulated == EMULATE_FAIL) {
- emulated = vcpu->kvm->arch.kvm_ops->emulate_op(run, vcpu, inst,
- &advance);
+ emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance);
if (emulated == EMULATE_AGAIN) {
advance = 0;
} else if (emulated == EMULATE_FAIL) {
@@ -484,4 +483,3 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
return emulated;
}
-EXPORT_SYMBOL_GPL(kvmppc_emulate_instruction);
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index a1fe2a9..832e043 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -26,7 +26,6 @@
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/file.h>
-#include <linux/module.h>
#include <asm/cputable.h>
#include <asm/uaccess.h>
#include <asm/kvm_ppc.h>
@@ -40,12 +39,6 @@
#define CREATE_TRACE_POINTS
#include "trace.h"
-struct kvmppc_ops *kvmppc_hv_ops;
-EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
-struct kvmppc_ops *kvmppc_pr_ops;
-EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
-
-
int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
{
return !!(v->arch.pending_exceptions) ||
@@ -57,6 +50,7 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
return 1;
}
+#ifndef CONFIG_KVM_BOOK3S_64_HV
/*
* Common checks before entering the guest world. Call with interrupts
* disabled.
@@ -128,7 +122,7 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
local_irq_enable();
return r;
}
-EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
+#endif /* CONFIG_KVM_BOOK3S_64_HV */
int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
{
@@ -182,7 +176,6 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
return r;
}
-EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
{
@@ -196,9 +189,11 @@ int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
goto out;
+#ifdef CONFIG_KVM_BOOK3S_64_HV
/* HV KVM can only do PAPR mode for now */
- if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
+ if (!vcpu->arch.papr_enabled)
goto out;
+#endif
#ifdef CONFIG_KVM_BOOKE_HV
if (!cpu_has_feature(CPU_FTR_EMB_HV))
@@ -211,7 +206,6 @@ out:
vcpu->arch.sane = r;
return r ? 0 : -EINVAL;
}
-EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
@@ -246,7 +240,6 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
return r;
}
-EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
int kvm_arch_hardware_enable(void *garbage)
{
@@ -273,35 +266,10 @@ void kvm_arch_check_processor_compat(void *rtn)
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
- struct kvmppc_ops *kvm_ops = NULL;
- /*
- * if we have both HV and PR enabled, default is HV
- */
- if (type == 0) {
- if (kvmppc_hv_ops)
- kvm_ops = kvmppc_hv_ops;
- else
- kvm_ops = kvmppc_pr_ops;
- if (!kvm_ops)
- goto err_out;
- } else if (type == KVM_VM_PPC_HV) {
- if (!kvmppc_hv_ops)
- goto err_out;
- kvm_ops = kvmppc_hv_ops;
- } else if (type == KVM_VM_PPC_PR) {
- if (!kvmppc_pr_ops)
- goto err_out;
- kvm_ops = kvmppc_pr_ops;
- } else
- goto err_out;
-
- if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
- return -ENOENT;
-
- kvm->arch.kvm_ops = kvm_ops;
+ if (type)
+ return -EINVAL;
+
return kvmppc_core_init_vm(kvm);
-err_out:
- return -EINVAL;
}
void kvm_arch_destroy_vm(struct kvm *kvm)
@@ -321,9 +289,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kvmppc_core_destroy_vm(kvm);
mutex_unlock(&kvm->lock);
-
- /* drop the module reference */
- module_put(kvm->arch.kvm_ops->owner);
}
void kvm_arch_sync_events(struct kvm *kvm)
@@ -333,10 +298,6 @@ void kvm_arch_sync_events(struct kvm *kvm)
int kvm_dev_ioctl_check_extension(long ext)
{
int r;
- /* FIXME!!
- * Should some of this be vm ioctl ? is it possible now ?
- */
- int hv_enabled = kvmppc_hv_ops ? 1 : 0;
switch (ext) {
#ifdef CONFIG_BOOKE
@@ -356,26 +317,22 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_DEVICE_CTRL:
r = 1;
break;
+#ifndef CONFIG_KVM_BOOK3S_64_HV
case KVM_CAP_PPC_PAIRED_SINGLES:
case KVM_CAP_PPC_OSI:
case KVM_CAP_PPC_GET_PVINFO:
#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
case KVM_CAP_SW_TLB:
#endif
- /* We support this only for PR */
- r = !hv_enabled;
- break;
-#ifdef CONFIG_KVM_MMIO
- case KVM_CAP_COALESCED_MMIO:
- r = KVM_COALESCED_MMIO_PAGE_OFFSET;
- break;
-#endif
#ifdef CONFIG_KVM_MPIC
case KVM_CAP_IRQ_MPIC:
+#endif
r = 1;
break;
+ case KVM_CAP_COALESCED_MMIO:
+ r = KVM_COALESCED_MMIO_PAGE_OFFSET;
+ break;
#endif
-
#ifdef CONFIG_PPC_BOOK3S_64
case KVM_CAP_SPAPR_TCE:
case KVM_CAP_PPC_ALLOC_HTAB:
@@ -386,37 +343,32 @@ int kvm_dev_ioctl_check_extension(long ext)
r = 1;
break;
#endif /* CONFIG_PPC_BOOK3S_64 */
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+#ifdef CONFIG_KVM_BOOK3S_64_HV
case KVM_CAP_PPC_SMT:
- if (hv_enabled)
- r = threads_per_core;
- else
- r = 0;
+ r = threads_per_core;
break;
case KVM_CAP_PPC_RMA:
- r = hv_enabled;
+ r = 1;
/* PPC970 requires an RMA */
- if (r && cpu_has_feature(CPU_FTR_ARCH_201))
+ if (cpu_has_feature(CPU_FTR_ARCH_201))
r = 2;
break;
#endif
case KVM_CAP_SYNC_MMU:
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
- if (hv_enabled)
- r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0;
- else
- r = 0;
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0;
#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
r = 1;
#else
r = 0;
-#endif
break;
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+#endif
+#ifdef CONFIG_KVM_BOOK3S_64_HV
case KVM_CAP_PPC_HTAB_FD:
- r = hv_enabled;
+ r = 1;
break;
#endif
+ break;
case KVM_CAP_NR_VCPUS:
/*
* Recommending a number of CPUs is somewhat arbitrary; we
@@ -424,10 +376,11 @@ int kvm_dev_ioctl_check_extension(long ext)
* will have secondary threads "offline"), and for other KVM
* implementations just count online CPUs.
*/
- if (hv_enabled)
- r = num_present_cpus();
- else
- r = num_online_cpus();
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ r = num_present_cpus();
+#else
+ r = num_online_cpus();
+#endif
break;
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
@@ -451,16 +404,15 @@ long kvm_arch_dev_ioctl(struct file *filp,
return -EINVAL;
}
-void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
+void kvm_arch_free_memslot(struct kvm_memory_slot *free,
struct kvm_memory_slot *dont)
{
- kvmppc_core_free_memslot(kvm, free, dont);
+ kvmppc_core_free_memslot(free, dont);
}
-int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
- unsigned long npages)
+int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
{
- return kvmppc_core_create_memslot(kvm, slot, npages);
+ return kvmppc_core_create_memslot(slot, npages);
}
void kvm_arch_memslots_updated(struct kvm *kvm)
@@ -704,7 +656,6 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
return EMULATE_DO_MMIO;
}
-EXPORT_SYMBOL_GPL(kvmppc_handle_load);
/* Same as above, but sign extends */
int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
@@ -766,7 +717,6 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
return EMULATE_DO_MMIO;
}
-EXPORT_SYMBOL_GPL(kvmppc_handle_store);
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
@@ -1071,12 +1021,52 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
goto out;
}
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ case KVM_ALLOCATE_RMA: {
+ struct kvm_allocate_rma rma;
+ struct kvm *kvm = filp->private_data;
+
+ r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
+ if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma)))
+ r = -EFAULT;
+ break;
+ }
+
+ case KVM_PPC_ALLOCATE_HTAB: {
+ u32 htab_order;
+
+ r = -EFAULT;
+ if (get_user(htab_order, (u32 __user *)argp))
+ break;
+ r = kvmppc_alloc_reset_hpt(kvm, &htab_order);
+ if (r)
+ break;
+ r = -EFAULT;
+ if (put_user(htab_order, (u32 __user *)argp))
+ break;
+ r = 0;
+ break;
+ }
+
+ case KVM_PPC_GET_HTAB_FD: {
+ struct kvm_get_htab_fd ghf;
+
+ r = -EFAULT;
+ if (copy_from_user(&ghf, argp, sizeof(ghf)))
+ break;
+ r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf);
+ break;
+ }
+#endif /* CONFIG_KVM_BOOK3S_64_HV */
+
+#ifdef CONFIG_PPC_BOOK3S_64
case KVM_PPC_GET_SMMU_INFO: {
struct kvm_ppc_smmu_info info;
- struct kvm *kvm = filp->private_data;
memset(&info, 0, sizeof(info));
- r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
+ r = kvm_vm_ioctl_get_smmu_info(kvm, &info);
if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
r = -EFAULT;
break;
@@ -1087,15 +1077,11 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
break;
}
- default: {
- struct kvm *kvm = filp->private_data;
- r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
- }
-#else /* CONFIG_PPC_BOOK3S_64 */
+#endif /* CONFIG_PPC_BOOK3S_64 */
default:
r = -ENOTTY;
-#endif
}
+
out:
return r;
}
@@ -1117,26 +1103,22 @@ long kvmppc_alloc_lpid(void)
return lpid;
}
-EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
void kvmppc_claim_lpid(long lpid)
{
set_bit(lpid, lpid_inuse);
}
-EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
void kvmppc_free_lpid(long lpid)
{
clear_bit(lpid, lpid_inuse);
}
-EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
void kvmppc_init_lpid(unsigned long nr_lpids_param)
{
nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
memset(lpid_inuse, 0, sizeof(lpid_inuse));
}
-EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
int kvm_arch_init(void *opaque)
{
@@ -1145,5 +1127,4 @@ int kvm_arch_init(void *opaque)
void kvm_arch_exit(void)
{
-
}
diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h
index 2e0e67e..e326489 100644
--- a/arch/powerpc/kvm/trace.h
+++ b/arch/powerpc/kvm/trace.h
@@ -31,6 +31,126 @@ TRACE_EVENT(kvm_ppc_instr,
__entry->inst, __entry->pc, __entry->emulate)
);
+#ifdef CONFIG_PPC_BOOK3S
+#define kvm_trace_symbol_exit \
+ {0x100, "SYSTEM_RESET"}, \
+ {0x200, "MACHINE_CHECK"}, \
+ {0x300, "DATA_STORAGE"}, \
+ {0x380, "DATA_SEGMENT"}, \
+ {0x400, "INST_STORAGE"}, \
+ {0x480, "INST_SEGMENT"}, \
+ {0x500, "EXTERNAL"}, \
+ {0x501, "EXTERNAL_LEVEL"}, \
+ {0x502, "EXTERNAL_HV"}, \
+ {0x600, "ALIGNMENT"}, \
+ {0x700, "PROGRAM"}, \
+ {0x800, "FP_UNAVAIL"}, \
+ {0x900, "DECREMENTER"}, \
+ {0x980, "HV_DECREMENTER"}, \
+ {0xc00, "SYSCALL"}, \
+ {0xd00, "TRACE"}, \
+ {0xe00, "H_DATA_STORAGE"}, \
+ {0xe20, "H_INST_STORAGE"}, \
+ {0xe40, "H_EMUL_ASSIST"}, \
+ {0xf00, "PERFMON"}, \
+ {0xf20, "ALTIVEC"}, \
+ {0xf40, "VSX"}
+#else
+#define kvm_trace_symbol_exit \
+ {0, "CRITICAL"}, \
+ {1, "MACHINE_CHECK"}, \
+ {2, "DATA_STORAGE"}, \
+ {3, "INST_STORAGE"}, \
+ {4, "EXTERNAL"}, \
+ {5, "ALIGNMENT"}, \
+ {6, "PROGRAM"}, \
+ {7, "FP_UNAVAIL"}, \
+ {8, "SYSCALL"}, \
+ {9, "AP_UNAVAIL"}, \
+ {10, "DECREMENTER"}, \
+ {11, "FIT"}, \
+ {12, "WATCHDOG"}, \
+ {13, "DTLB_MISS"}, \
+ {14, "ITLB_MISS"}, \
+ {15, "DEBUG"}, \
+ {32, "SPE_UNAVAIL"}, \
+ {33, "SPE_FP_DATA"}, \
+ {34, "SPE_FP_ROUND"}, \
+ {35, "PERFORMANCE_MONITOR"}, \
+ {36, "DOORBELL"}, \
+ {37, "DOORBELL_CRITICAL"}, \
+ {38, "GUEST_DBELL"}, \
+ {39, "GUEST_DBELL_CRIT"}, \
+ {40, "HV_SYSCALL"}, \
+ {41, "HV_PRIV"}
+#endif
+
+TRACE_EVENT(kvm_exit,
+ TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
+ TP_ARGS(exit_nr, vcpu),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, exit_nr )
+ __field( unsigned long, pc )
+ __field( unsigned long, msr )
+ __field( unsigned long, dar )
+#ifdef CONFIG_KVM_BOOK3S_PR
+ __field( unsigned long, srr1 )
+#endif
+ __field( unsigned long, last_inst )
+ ),
+
+ TP_fast_assign(
+#ifdef CONFIG_KVM_BOOK3S_PR
+ struct kvmppc_book3s_shadow_vcpu *svcpu;
+#endif
+ __entry->exit_nr = exit_nr;
+ __entry->pc = kvmppc_get_pc(vcpu);
+ __entry->dar = kvmppc_get_fault_dar(vcpu);
+ __entry->msr = vcpu->arch.shared->msr;
+#ifdef CONFIG_KVM_BOOK3S_PR
+ svcpu = svcpu_get(vcpu);
+ __entry->srr1 = svcpu->shadow_srr1;
+ svcpu_put(svcpu);
+#endif
+ __entry->last_inst = vcpu->arch.last_inst;
+ ),
+
+ TP_printk("exit=%s"
+ " | pc=0x%lx"
+ " | msr=0x%lx"
+ " | dar=0x%lx"
+#ifdef CONFIG_KVM_BOOK3S_PR
+ " | srr1=0x%lx"
+#endif
+ " | last_inst=0x%lx"
+ ,
+ __print_symbolic(__entry->exit_nr, kvm_trace_symbol_exit),
+ __entry->pc,
+ __entry->msr,
+ __entry->dar,
+#ifdef CONFIG_KVM_BOOK3S_PR
+ __entry->srr1,
+#endif
+ __entry->last_inst
+ )
+);
+
+TRACE_EVENT(kvm_unmap_hva,
+ TP_PROTO(unsigned long hva),
+ TP_ARGS(hva),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, hva )
+ ),
+
+ TP_fast_assign(
+ __entry->hva = hva;
+ ),
+
+ TP_printk("unmap hva 0x%lx\n", __entry->hva)
+);
+
TRACE_EVENT(kvm_stlb_inval,
TP_PROTO(unsigned int stlb_index),
TP_ARGS(stlb_index),
@@ -116,6 +236,315 @@ TRACE_EVENT(kvm_check_requests,
__entry->cpu_nr, __entry->requests)
);
+
+/*************************************************************************
+ * Book3S trace points *
+ *************************************************************************/
+
+#ifdef CONFIG_KVM_BOOK3S_PR
+
+TRACE_EVENT(kvm_book3s_reenter,
+ TP_PROTO(int r, struct kvm_vcpu *vcpu),
+ TP_ARGS(r, vcpu),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, r )
+ __field( unsigned long, pc )
+ ),
+
+ TP_fast_assign(
+ __entry->r = r;
+ __entry->pc = kvmppc_get_pc(vcpu);
+ ),
+
+ TP_printk("reentry r=%d | pc=0x%lx", __entry->r, __entry->pc)
+);
+
+#ifdef CONFIG_PPC_BOOK3S_64
+
+TRACE_EVENT(kvm_book3s_64_mmu_map,
+ TP_PROTO(int rflags, ulong hpteg, ulong va, pfn_t hpaddr,
+ struct kvmppc_pte *orig_pte),
+ TP_ARGS(rflags, hpteg, va, hpaddr, orig_pte),
+
+ TP_STRUCT__entry(
+ __field( unsigned char, flag_w )
+ __field( unsigned char, flag_x )
+ __field( unsigned long, eaddr )
+ __field( unsigned long, hpteg )
+ __field( unsigned long, va )
+ __field( unsigned long long, vpage )
+ __field( unsigned long, hpaddr )
+ ),
+
+ TP_fast_assign(
+ __entry->flag_w = ((rflags & HPTE_R_PP) == 3) ? '-' : 'w';
+ __entry->flag_x = (rflags & HPTE_R_N) ? '-' : 'x';
+ __entry->eaddr = orig_pte->eaddr;
+ __entry->hpteg = hpteg;
+ __entry->va = va;
+ __entry->vpage = orig_pte->vpage;
+ __entry->hpaddr = hpaddr;
+ ),
+
+ TP_printk("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx",
+ __entry->flag_w, __entry->flag_x, __entry->eaddr,
+ __entry->hpteg, __entry->va, __entry->vpage, __entry->hpaddr)
+);
+
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
+TRACE_EVENT(kvm_book3s_mmu_map,
+ TP_PROTO(struct hpte_cache *pte),
+ TP_ARGS(pte),
+
+ TP_STRUCT__entry(
+ __field( u64, host_vpn )
+ __field( u64, pfn )
+ __field( ulong, eaddr )
+ __field( u64, vpage )
+ __field( ulong, raddr )
+ __field( int, flags )
+ ),
+
+ TP_fast_assign(
+ __entry->host_vpn = pte->host_vpn;
+ __entry->pfn = pte->pfn;
+ __entry->eaddr = pte->pte.eaddr;
+ __entry->vpage = pte->pte.vpage;
+ __entry->raddr = pte->pte.raddr;
+ __entry->flags = (pte->pte.may_read ? 0x4 : 0) |
+ (pte->pte.may_write ? 0x2 : 0) |
+ (pte->pte.may_execute ? 0x1 : 0);
+ ),
+
+ TP_printk("Map: hvpn=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
+ __entry->host_vpn, __entry->pfn, __entry->eaddr,
+ __entry->vpage, __entry->raddr, __entry->flags)
+);
+
+TRACE_EVENT(kvm_book3s_mmu_invalidate,
+ TP_PROTO(struct hpte_cache *pte),
+ TP_ARGS(pte),
+
+ TP_STRUCT__entry(
+ __field( u64, host_vpn )
+ __field( u64, pfn )
+ __field( ulong, eaddr )
+ __field( u64, vpage )
+ __field( ulong, raddr )
+ __field( int, flags )
+ ),
+
+ TP_fast_assign(
+ __entry->host_vpn = pte->host_vpn;
+ __entry->pfn = pte->pfn;
+ __entry->eaddr = pte->pte.eaddr;
+ __entry->vpage = pte->pte.vpage;
+ __entry->raddr = pte->pte.raddr;
+ __entry->flags = (pte->pte.may_read ? 0x4 : 0) |
+ (pte->pte.may_write ? 0x2 : 0) |
+ (pte->pte.may_execute ? 0x1 : 0);
+ ),
+
+ TP_printk("Flush: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
+ __entry->host_vpn, __entry->pfn, __entry->eaddr,
+ __entry->vpage, __entry->raddr, __entry->flags)
+);
+
+TRACE_EVENT(kvm_book3s_mmu_flush,
+ TP_PROTO(const char *type, struct kvm_vcpu *vcpu, unsigned long long p1,
+ unsigned long long p2),
+ TP_ARGS(type, vcpu, p1, p2),
+
+ TP_STRUCT__entry(
+ __field( int, count )
+ __field( unsigned long long, p1 )
+ __field( unsigned long long, p2 )
+ __field( const char *, type )
+ ),
+
+ TP_fast_assign(
+ __entry->count = to_book3s(vcpu)->hpte_cache_count;
+ __entry->p1 = p1;
+ __entry->p2 = p2;
+ __entry->type = type;
+ ),
+
+ TP_printk("Flush %d %sPTEs: %llx - %llx",
+ __entry->count, __entry->type, __entry->p1, __entry->p2)
+);
+
+TRACE_EVENT(kvm_book3s_slb_found,
+ TP_PROTO(unsigned long long gvsid, unsigned long long hvsid),
+ TP_ARGS(gvsid, hvsid),
+
+ TP_STRUCT__entry(
+ __field( unsigned long long, gvsid )
+ __field( unsigned long long, hvsid )
+ ),
+
+ TP_fast_assign(
+ __entry->gvsid = gvsid;
+ __entry->hvsid = hvsid;
+ ),
+
+ TP_printk("%llx -> %llx", __entry->gvsid, __entry->hvsid)
+);
+
+TRACE_EVENT(kvm_book3s_slb_fail,
+ TP_PROTO(u16 sid_map_mask, unsigned long long gvsid),
+ TP_ARGS(sid_map_mask, gvsid),
+
+ TP_STRUCT__entry(
+ __field( unsigned short, sid_map_mask )
+ __field( unsigned long long, gvsid )
+ ),
+
+ TP_fast_assign(
+ __entry->sid_map_mask = sid_map_mask;
+ __entry->gvsid = gvsid;
+ ),
+
+ TP_printk("%x/%x: %llx", __entry->sid_map_mask,
+ SID_MAP_MASK - __entry->sid_map_mask, __entry->gvsid)
+);
+
+TRACE_EVENT(kvm_book3s_slb_map,
+ TP_PROTO(u16 sid_map_mask, unsigned long long gvsid,
+ unsigned long long hvsid),
+ TP_ARGS(sid_map_mask, gvsid, hvsid),
+
+ TP_STRUCT__entry(
+ __field( unsigned short, sid_map_mask )
+ __field( unsigned long long, guest_vsid )
+ __field( unsigned long long, host_vsid )
+ ),
+
+ TP_fast_assign(
+ __entry->sid_map_mask = sid_map_mask;
+ __entry->guest_vsid = gvsid;
+ __entry->host_vsid = hvsid;
+ ),
+
+ TP_printk("%x: %llx -> %llx", __entry->sid_map_mask,
+ __entry->guest_vsid, __entry->host_vsid)
+);
+
+TRACE_EVENT(kvm_book3s_slbmte,
+ TP_PROTO(u64 slb_vsid, u64 slb_esid),
+ TP_ARGS(slb_vsid, slb_esid),
+
+ TP_STRUCT__entry(
+ __field( u64, slb_vsid )
+ __field( u64, slb_esid )
+ ),
+
+ TP_fast_assign(
+ __entry->slb_vsid = slb_vsid;
+ __entry->slb_esid = slb_esid;
+ ),
+
+ TP_printk("%llx, %llx", __entry->slb_vsid, __entry->slb_esid)
+);
+
+#endif /* CONFIG_PPC_BOOK3S */
+
+
+/*************************************************************************
+ * Book3E trace points *
+ *************************************************************************/
+
+#ifdef CONFIG_BOOKE
+
+TRACE_EVENT(kvm_booke206_stlb_write,
+ TP_PROTO(__u32 mas0, __u32 mas8, __u32 mas1, __u64 mas2, __u64 mas7_3),
+ TP_ARGS(mas0, mas8, mas1, mas2, mas7_3),
+
+ TP_STRUCT__entry(
+ __field( __u32, mas0 )
+ __field( __u32, mas8 )
+ __field( __u32, mas1 )
+ __field( __u64, mas2 )
+ __field( __u64, mas7_3 )
+ ),
+
+ TP_fast_assign(
+ __entry->mas0 = mas0;
+ __entry->mas8 = mas8;
+ __entry->mas1 = mas1;
+ __entry->mas2 = mas2;
+ __entry->mas7_3 = mas7_3;
+ ),
+
+ TP_printk("mas0=%x mas8=%x mas1=%x mas2=%llx mas7_3=%llx",
+ __entry->mas0, __entry->mas8, __entry->mas1,
+ __entry->mas2, __entry->mas7_3)
+);
+
+TRACE_EVENT(kvm_booke206_gtlb_write,
+ TP_PROTO(__u32 mas0, __u32 mas1, __u64 mas2, __u64 mas7_3),
+ TP_ARGS(mas0, mas1, mas2, mas7_3),
+
+ TP_STRUCT__entry(
+ __field( __u32, mas0 )
+ __field( __u32, mas1 )
+ __field( __u64, mas2 )
+ __field( __u64, mas7_3 )
+ ),
+
+ TP_fast_assign(
+ __entry->mas0 = mas0;
+ __entry->mas1 = mas1;
+ __entry->mas2 = mas2;
+ __entry->mas7_3 = mas7_3;
+ ),
+
+ TP_printk("mas0=%x mas1=%x mas2=%llx mas7_3=%llx",
+ __entry->mas0, __entry->mas1,
+ __entry->mas2, __entry->mas7_3)
+);
+
+TRACE_EVENT(kvm_booke206_ref_release,
+ TP_PROTO(__u64 pfn, __u32 flags),
+ TP_ARGS(pfn, flags),
+
+ TP_STRUCT__entry(
+ __field( __u64, pfn )
+ __field( __u32, flags )
+ ),
+
+ TP_fast_assign(
+ __entry->pfn = pfn;
+ __entry->flags = flags;
+ ),
+
+ TP_printk("pfn=%llx flags=%x",
+ __entry->pfn, __entry->flags)
+);
+
+TRACE_EVENT(kvm_booke_queue_irqprio,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned int priority),
+ TP_ARGS(vcpu, priority),
+
+ TP_STRUCT__entry(
+ __field( __u32, cpu_nr )
+ __field( __u32, priority )
+ __field( unsigned long, pending )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu_nr = vcpu->vcpu_id;
+ __entry->priority = priority;
+ __entry->pending = vcpu->arch.pending_exceptions;
+ ),
+
+ TP_printk("vcpu=%x prio=%x pending=%lx",
+ __entry->cpu_nr, __entry->priority, __entry->pending)
+);
+
+#endif
+
#endif /* _TRACE_KVM_H */
/* This part must be outside protection */
diff --git a/arch/powerpc/kvm/trace_booke.h b/arch/powerpc/kvm/trace_booke.h
deleted file mode 100644
index f7537cf..0000000
--- a/arch/powerpc/kvm/trace_booke.h
+++ /dev/null
@@ -1,177 +0,0 @@
-#if !defined(_TRACE_KVM_BOOKE_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_KVM_BOOKE_H
-
-#include <linux/tracepoint.h>
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM kvm_booke
-#define TRACE_INCLUDE_PATH .
-#define TRACE_INCLUDE_FILE trace_booke
-
-#define kvm_trace_symbol_exit \
- {0, "CRITICAL"}, \
- {1, "MACHINE_CHECK"}, \
- {2, "DATA_STORAGE"}, \
- {3, "INST_STORAGE"}, \
- {4, "EXTERNAL"}, \
- {5, "ALIGNMENT"}, \
- {6, "PROGRAM"}, \
- {7, "FP_UNAVAIL"}, \
- {8, "SYSCALL"}, \
- {9, "AP_UNAVAIL"}, \
- {10, "DECREMENTER"}, \
- {11, "FIT"}, \
- {12, "WATCHDOG"}, \
- {13, "DTLB_MISS"}, \
- {14, "ITLB_MISS"}, \
- {15, "DEBUG"}, \
- {32, "SPE_UNAVAIL"}, \
- {33, "SPE_FP_DATA"}, \
- {34, "SPE_FP_ROUND"}, \
- {35, "PERFORMANCE_MONITOR"}, \
- {36, "DOORBELL"}, \
- {37, "DOORBELL_CRITICAL"}, \
- {38, "GUEST_DBELL"}, \
- {39, "GUEST_DBELL_CRIT"}, \
- {40, "HV_SYSCALL"}, \
- {41, "HV_PRIV"}
-
-TRACE_EVENT(kvm_exit,
- TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
- TP_ARGS(exit_nr, vcpu),
-
- TP_STRUCT__entry(
- __field( unsigned int, exit_nr )
- __field( unsigned long, pc )
- __field( unsigned long, msr )
- __field( unsigned long, dar )
- __field( unsigned long, last_inst )
- ),
-
- TP_fast_assign(
- __entry->exit_nr = exit_nr;
- __entry->pc = kvmppc_get_pc(vcpu);
- __entry->dar = kvmppc_get_fault_dar(vcpu);
- __entry->msr = vcpu->arch.shared->msr;
- __entry->last_inst = vcpu->arch.last_inst;
- ),
-
- TP_printk("exit=%s"
- " | pc=0x%lx"
- " | msr=0x%lx"
- " | dar=0x%lx"
- " | last_inst=0x%lx"
- ,
- __print_symbolic(__entry->exit_nr, kvm_trace_symbol_exit),
- __entry->pc,
- __entry->msr,
- __entry->dar,
- __entry->last_inst
- )
-);
-
-TRACE_EVENT(kvm_unmap_hva,
- TP_PROTO(unsigned long hva),
- TP_ARGS(hva),
-
- TP_STRUCT__entry(
- __field( unsigned long, hva )
- ),
-
- TP_fast_assign(
- __entry->hva = hva;
- ),
-
- TP_printk("unmap hva 0x%lx\n", __entry->hva)
-);
-
-TRACE_EVENT(kvm_booke206_stlb_write,
- TP_PROTO(__u32 mas0, __u32 mas8, __u32 mas1, __u64 mas2, __u64 mas7_3),
- TP_ARGS(mas0, mas8, mas1, mas2, mas7_3),
-
- TP_STRUCT__entry(
- __field( __u32, mas0 )
- __field( __u32, mas8 )
- __field( __u32, mas1 )
- __field( __u64, mas2 )
- __field( __u64, mas7_3 )
- ),
-
- TP_fast_assign(
- __entry->mas0 = mas0;
- __entry->mas8 = mas8;
- __entry->mas1 = mas1;
- __entry->mas2 = mas2;
- __entry->mas7_3 = mas7_3;
- ),
-
- TP_printk("mas0=%x mas8=%x mas1=%x mas2=%llx mas7_3=%llx",
- __entry->mas0, __entry->mas8, __entry->mas1,
- __entry->mas2, __entry->mas7_3)
-);
-
-TRACE_EVENT(kvm_booke206_gtlb_write,
- TP_PROTO(__u32 mas0, __u32 mas1, __u64 mas2, __u64 mas7_3),
- TP_ARGS(mas0, mas1, mas2, mas7_3),
-
- TP_STRUCT__entry(
- __field( __u32, mas0 )
- __field( __u32, mas1 )
- __field( __u64, mas2 )
- __field( __u64, mas7_3 )
- ),
-
- TP_fast_assign(
- __entry->mas0 = mas0;
- __entry->mas1 = mas1;
- __entry->mas2 = mas2;
- __entry->mas7_3 = mas7_3;
- ),
-
- TP_printk("mas0=%x mas1=%x mas2=%llx mas7_3=%llx",
- __entry->mas0, __entry->mas1,
- __entry->mas2, __entry->mas7_3)
-);
-
-TRACE_EVENT(kvm_booke206_ref_release,
- TP_PROTO(__u64 pfn, __u32 flags),
- TP_ARGS(pfn, flags),
-
- TP_STRUCT__entry(
- __field( __u64, pfn )
- __field( __u32, flags )
- ),
-
- TP_fast_assign(
- __entry->pfn = pfn;
- __entry->flags = flags;
- ),
-
- TP_printk("pfn=%llx flags=%x",
- __entry->pfn, __entry->flags)
-);
-
-TRACE_EVENT(kvm_booke_queue_irqprio,
- TP_PROTO(struct kvm_vcpu *vcpu, unsigned int priority),
- TP_ARGS(vcpu, priority),
-
- TP_STRUCT__entry(
- __field( __u32, cpu_nr )
- __field( __u32, priority )
- __field( unsigned long, pending )
- ),
-
- TP_fast_assign(
- __entry->cpu_nr = vcpu->vcpu_id;
- __entry->priority = priority;
- __entry->pending = vcpu->arch.pending_exceptions;
- ),
-
- TP_printk("vcpu=%x prio=%x pending=%lx",
- __entry->cpu_nr, __entry->priority, __entry->pending)
-);
-
-#endif
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
diff --git a/arch/powerpc/kvm/trace_pr.h b/arch/powerpc/kvm/trace_pr.h
deleted file mode 100644
index 8b22e47..0000000
--- a/arch/powerpc/kvm/trace_pr.h
+++ /dev/null
@@ -1,297 +0,0 @@
-
-#if !defined(_TRACE_KVM_PR_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_KVM_PR_H
-
-#include <linux/tracepoint.h>
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM kvm_pr
-#define TRACE_INCLUDE_PATH .
-#define TRACE_INCLUDE_FILE trace_pr
-
-#define kvm_trace_symbol_exit \
- {0x100, "SYSTEM_RESET"}, \
- {0x200, "MACHINE_CHECK"}, \
- {0x300, "DATA_STORAGE"}, \
- {0x380, "DATA_SEGMENT"}, \
- {0x400, "INST_STORAGE"}, \
- {0x480, "INST_SEGMENT"}, \
- {0x500, "EXTERNAL"}, \
- {0x501, "EXTERNAL_LEVEL"}, \
- {0x502, "EXTERNAL_HV"}, \
- {0x600, "ALIGNMENT"}, \
- {0x700, "PROGRAM"}, \
- {0x800, "FP_UNAVAIL"}, \
- {0x900, "DECREMENTER"}, \
- {0x980, "HV_DECREMENTER"}, \
- {0xc00, "SYSCALL"}, \
- {0xd00, "TRACE"}, \
- {0xe00, "H_DATA_STORAGE"}, \
- {0xe20, "H_INST_STORAGE"}, \
- {0xe40, "H_EMUL_ASSIST"}, \
- {0xf00, "PERFMON"}, \
- {0xf20, "ALTIVEC"}, \
- {0xf40, "VSX"}
-
-TRACE_EVENT(kvm_book3s_reenter,
- TP_PROTO(int r, struct kvm_vcpu *vcpu),
- TP_ARGS(r, vcpu),
-
- TP_STRUCT__entry(
- __field( unsigned int, r )
- __field( unsigned long, pc )
- ),
-
- TP_fast_assign(
- __entry->r = r;
- __entry->pc = kvmppc_get_pc(vcpu);
- ),
-
- TP_printk("reentry r=%d | pc=0x%lx", __entry->r, __entry->pc)
-);
-
-#ifdef CONFIG_PPC_BOOK3S_64
-
-TRACE_EVENT(kvm_book3s_64_mmu_map,
- TP_PROTO(int rflags, ulong hpteg, ulong va, pfn_t hpaddr,
- struct kvmppc_pte *orig_pte),
- TP_ARGS(rflags, hpteg, va, hpaddr, orig_pte),
-
- TP_STRUCT__entry(
- __field( unsigned char, flag_w )
- __field( unsigned char, flag_x )
- __field( unsigned long, eaddr )
- __field( unsigned long, hpteg )
- __field( unsigned long, va )
- __field( unsigned long long, vpage )
- __field( unsigned long, hpaddr )
- ),
-
- TP_fast_assign(
- __entry->flag_w = ((rflags & HPTE_R_PP) == 3) ? '-' : 'w';
- __entry->flag_x = (rflags & HPTE_R_N) ? '-' : 'x';
- __entry->eaddr = orig_pte->eaddr;
- __entry->hpteg = hpteg;
- __entry->va = va;
- __entry->vpage = orig_pte->vpage;
- __entry->hpaddr = hpaddr;
- ),
-
- TP_printk("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx",
- __entry->flag_w, __entry->flag_x, __entry->eaddr,
- __entry->hpteg, __entry->va, __entry->vpage, __entry->hpaddr)
-);
-
-#endif /* CONFIG_PPC_BOOK3S_64 */
-
-TRACE_EVENT(kvm_book3s_mmu_map,
- TP_PROTO(struct hpte_cache *pte),
- TP_ARGS(pte),
-
- TP_STRUCT__entry(
- __field( u64, host_vpn )
- __field( u64, pfn )
- __field( ulong, eaddr )
- __field( u64, vpage )
- __field( ulong, raddr )
- __field( int, flags )
- ),
-
- TP_fast_assign(
- __entry->host_vpn = pte->host_vpn;
- __entry->pfn = pte->pfn;
- __entry->eaddr = pte->pte.eaddr;
- __entry->vpage = pte->pte.vpage;
- __entry->raddr = pte->pte.raddr;
- __entry->flags = (pte->pte.may_read ? 0x4 : 0) |
- (pte->pte.may_write ? 0x2 : 0) |
- (pte->pte.may_execute ? 0x1 : 0);
- ),
-
- TP_printk("Map: hvpn=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
- __entry->host_vpn, __entry->pfn, __entry->eaddr,
- __entry->vpage, __entry->raddr, __entry->flags)
-);
-
-TRACE_EVENT(kvm_book3s_mmu_invalidate,
- TP_PROTO(struct hpte_cache *pte),
- TP_ARGS(pte),
-
- TP_STRUCT__entry(
- __field( u64, host_vpn )
- __field( u64, pfn )
- __field( ulong, eaddr )
- __field( u64, vpage )
- __field( ulong, raddr )
- __field( int, flags )
- ),
-
- TP_fast_assign(
- __entry->host_vpn = pte->host_vpn;
- __entry->pfn = pte->pfn;
- __entry->eaddr = pte->pte.eaddr;
- __entry->vpage = pte->pte.vpage;
- __entry->raddr = pte->pte.raddr;
- __entry->flags = (pte->pte.may_read ? 0x4 : 0) |
- (pte->pte.may_write ? 0x2 : 0) |
- (pte->pte.may_execute ? 0x1 : 0);
- ),
-
- TP_printk("Flush: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
- __entry->host_vpn, __entry->pfn, __entry->eaddr,
- __entry->vpage, __entry->raddr, __entry->flags)
-);
-
-TRACE_EVENT(kvm_book3s_mmu_flush,
- TP_PROTO(const char *type, struct kvm_vcpu *vcpu, unsigned long long p1,
- unsigned long long p2),
- TP_ARGS(type, vcpu, p1, p2),
-
- TP_STRUCT__entry(
- __field( int, count )
- __field( unsigned long long, p1 )
- __field( unsigned long long, p2 )
- __field( const char *, type )
- ),
-
- TP_fast_assign(
- __entry->count = to_book3s(vcpu)->hpte_cache_count;
- __entry->p1 = p1;
- __entry->p2 = p2;
- __entry->type = type;
- ),
-
- TP_printk("Flush %d %sPTEs: %llx - %llx",
- __entry->count, __entry->type, __entry->p1, __entry->p2)
-);
-
-TRACE_EVENT(kvm_book3s_slb_found,
- TP_PROTO(unsigned long long gvsid, unsigned long long hvsid),
- TP_ARGS(gvsid, hvsid),
-
- TP_STRUCT__entry(
- __field( unsigned long long, gvsid )
- __field( unsigned long long, hvsid )
- ),
-
- TP_fast_assign(
- __entry->gvsid = gvsid;
- __entry->hvsid = hvsid;
- ),
-
- TP_printk("%llx -> %llx", __entry->gvsid, __entry->hvsid)
-);
-
-TRACE_EVENT(kvm_book3s_slb_fail,
- TP_PROTO(u16 sid_map_mask, unsigned long long gvsid),
- TP_ARGS(sid_map_mask, gvsid),
-
- TP_STRUCT__entry(
- __field( unsigned short, sid_map_mask )
- __field( unsigned long long, gvsid )
- ),
-
- TP_fast_assign(
- __entry->sid_map_mask = sid_map_mask;
- __entry->gvsid = gvsid;
- ),
-
- TP_printk("%x/%x: %llx", __entry->sid_map_mask,
- SID_MAP_MASK - __entry->sid_map_mask, __entry->gvsid)
-);
-
-TRACE_EVENT(kvm_book3s_slb_map,
- TP_PROTO(u16 sid_map_mask, unsigned long long gvsid,
- unsigned long long hvsid),
- TP_ARGS(sid_map_mask, gvsid, hvsid),
-
- TP_STRUCT__entry(
- __field( unsigned short, sid_map_mask )
- __field( unsigned long long, guest_vsid )
- __field( unsigned long long, host_vsid )
- ),
-
- TP_fast_assign(
- __entry->sid_map_mask = sid_map_mask;
- __entry->guest_vsid = gvsid;
- __entry->host_vsid = hvsid;
- ),
-
- TP_printk("%x: %llx -> %llx", __entry->sid_map_mask,
- __entry->guest_vsid, __entry->host_vsid)
-);
-
-TRACE_EVENT(kvm_book3s_slbmte,
- TP_PROTO(u64 slb_vsid, u64 slb_esid),
- TP_ARGS(slb_vsid, slb_esid),
-
- TP_STRUCT__entry(
- __field( u64, slb_vsid )
- __field( u64, slb_esid )
- ),
-
- TP_fast_assign(
- __entry->slb_vsid = slb_vsid;
- __entry->slb_esid = slb_esid;
- ),
-
- TP_printk("%llx, %llx", __entry->slb_vsid, __entry->slb_esid)
-);
-
-TRACE_EVENT(kvm_exit,
- TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
- TP_ARGS(exit_nr, vcpu),
-
- TP_STRUCT__entry(
- __field( unsigned int, exit_nr )
- __field( unsigned long, pc )
- __field( unsigned long, msr )
- __field( unsigned long, dar )
- __field( unsigned long, srr1 )
- __field( unsigned long, last_inst )
- ),
-
- TP_fast_assign(
- __entry->exit_nr = exit_nr;
- __entry->pc = kvmppc_get_pc(vcpu);
- __entry->dar = kvmppc_get_fault_dar(vcpu);
- __entry->msr = vcpu->arch.shared->msr;
- __entry->srr1 = vcpu->arch.shadow_srr1;
- __entry->last_inst = vcpu->arch.last_inst;
- ),
-
- TP_printk("exit=%s"
- " | pc=0x%lx"
- " | msr=0x%lx"
- " | dar=0x%lx"
- " | srr1=0x%lx"
- " | last_inst=0x%lx"
- ,
- __print_symbolic(__entry->exit_nr, kvm_trace_symbol_exit),
- __entry->pc,
- __entry->msr,
- __entry->dar,
- __entry->srr1,
- __entry->last_inst
- )
-);
-
-TRACE_EVENT(kvm_unmap_hva,
- TP_PROTO(unsigned long hva),
- TP_ARGS(hva),
-
- TP_STRUCT__entry(
- __field( unsigned long, hva )
- ),
-
- TP_fast_assign(
- __entry->hva = hva;
- ),
-
- TP_printk("unmap hva 0x%lx\n", __entry->hva)
-);
-
-#endif /* _TRACE_KVM_H */
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>