summaryrefslogtreecommitdiff
path: root/arch/arm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-01-12 21:22:12 (GMT)
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-12 21:22:12 (GMT)
commit1baa5efbeb6eb75de697f7b5931094be33f12005 (patch)
tree30358fcebb950305929306c6654f8a585f1f5500 /arch/arm
parentc9bed1cf51011c815d88288b774865d013ca78a8 (diff)
parent45bdbcfdf241149642fb6c25ab0c209d59c371b7 (diff)
downloadlinux-1baa5efbeb6eb75de697f7b5931094be33f12005.tar.xz
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini: "PPC changes will come next week. - s390: Support for runtime instrumentation within guests, support of 248 VCPUs. - ARM: rewrite of the arm64 world switch in C, support for 16-bit VM identifiers. Performance counter virtualization missed the boat. - x86: Support for more Hyper-V features (synthetic interrupt controller), MMU cleanups" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (115 commits) kvm: x86: Fix vmwrite to SECONDARY_VM_EXEC_CONTROL kvm/x86: Hyper-V SynIC timers tracepoints kvm/x86: Hyper-V SynIC tracepoints kvm/x86: Update SynIC timers on guest entry only kvm/x86: Skip SynIC vector check for QEMU side kvm/x86: Hyper-V fix SynIC timer disabling condition kvm/x86: Reorg stimer_expiration() to better control timer restart kvm/x86: Hyper-V unify stimer_start() and stimer_restart() kvm/x86: Drop stimer_stop() function kvm/x86: Hyper-V timers fix incorrect logical operation KVM: move architecture-dependent requests to arch/ KVM: renumber vcpu->request bits KVM: document which architecture uses each request bit KVM: Remove unused KVM_REQ_KICK to save a bit in vcpu->requests kvm: x86: Check kvm_write_guest return value in kvm_write_wall_clock KVM: s390: implement the RI support of guest kvm/s390: drop unpaired smp_mb kvm: x86: fix comment about {mmu,nested_mmu}.gva_to_gpa KVM: x86: MMU: Use clear_page() instead of init_shadow_page_table() arm/arm64: KVM: Detect vGIC presence at runtime ...
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/include/asm/kvm_arm.h34
-rw-r--r--arch/arm/include/asm/kvm_host.h6
-rw-r--r--arch/arm/include/asm/kvm_mmu.h5
-rw-r--r--arch/arm/kvm/arm.c40
-rw-r--r--arch/arm/kvm/emulate.c74
-rw-r--r--arch/arm/kvm/guest.c6
-rw-r--r--arch/arm/kvm/handle_exit.c3
-rw-r--r--arch/arm/kvm/mmio.c3
-rw-r--r--arch/arm/kvm/mmu.c6
9 files changed, 117 insertions, 60 deletions
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index dc641dd..e22089f 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -19,6 +19,7 @@
#ifndef __ARM_KVM_ARM_H__
#define __ARM_KVM_ARM_H__
+#include <linux/const.h>
#include <linux/types.h>
/* Hyp Configuration Register (HCR) bits */
@@ -132,10 +133,9 @@
* space.
*/
#define KVM_PHYS_SHIFT (40)
-#define KVM_PHYS_SIZE (1ULL << KVM_PHYS_SHIFT)
-#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1ULL)
-#define PTRS_PER_S2_PGD (1ULL << (KVM_PHYS_SHIFT - 30))
-#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
+#define KVM_PHYS_SIZE (_AC(1, ULL) << KVM_PHYS_SHIFT)
+#define KVM_PHYS_MASK (KVM_PHYS_SIZE - _AC(1, ULL))
+#define PTRS_PER_S2_PGD (_AC(1, ULL) << (KVM_PHYS_SHIFT - 30))
/* Virtualization Translation Control Register (VTCR) bits */
#define VTCR_SH0 (3 << 12)
@@ -162,17 +162,17 @@
#define VTTBR_X (5 - KVM_T0SZ)
#endif
#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
-#define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
-#define VTTBR_VMID_SHIFT (48LLU)
-#define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT)
+#define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
+#define VTTBR_VMID_SHIFT _AC(48, ULL)
+#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
/* Hyp Syndrome Register (HSR) bits */
#define HSR_EC_SHIFT (26)
-#define HSR_EC (0x3fU << HSR_EC_SHIFT)
-#define HSR_IL (1U << 25)
+#define HSR_EC (_AC(0x3f, UL) << HSR_EC_SHIFT)
+#define HSR_IL (_AC(1, UL) << 25)
#define HSR_ISS (HSR_IL - 1)
#define HSR_ISV_SHIFT (24)
-#define HSR_ISV (1U << HSR_ISV_SHIFT)
+#define HSR_ISV (_AC(1, UL) << HSR_ISV_SHIFT)
#define HSR_SRT_SHIFT (16)
#define HSR_SRT_MASK (0xf << HSR_SRT_SHIFT)
#define HSR_FSC (0x3f)
@@ -180,9 +180,9 @@
#define HSR_SSE (1 << 21)
#define HSR_WNR (1 << 6)
#define HSR_CV_SHIFT (24)
-#define HSR_CV (1U << HSR_CV_SHIFT)
+#define HSR_CV (_AC(1, UL) << HSR_CV_SHIFT)
#define HSR_COND_SHIFT (20)
-#define HSR_COND (0xfU << HSR_COND_SHIFT)
+#define HSR_COND (_AC(0xf, UL) << HSR_COND_SHIFT)
#define FSC_FAULT (0x04)
#define FSC_ACCESS (0x08)
@@ -210,13 +210,13 @@
#define HSR_EC_DABT (0x24)
#define HSR_EC_DABT_HYP (0x25)
-#define HSR_WFI_IS_WFE (1U << 0)
+#define HSR_WFI_IS_WFE (_AC(1, UL) << 0)
-#define HSR_HVC_IMM_MASK ((1UL << 16) - 1)
+#define HSR_HVC_IMM_MASK ((_AC(1, UL) << 16) - 1)
-#define HSR_DABT_S1PTW (1U << 7)
-#define HSR_DABT_CM (1U << 8)
-#define HSR_DABT_EA (1U << 9)
+#define HSR_DABT_S1PTW (_AC(1, UL) << 7)
+#define HSR_DABT_CM (_AC(1, UL) << 8)
+#define HSR_DABT_EA (_AC(1, UL) << 9)
#define kvm_arm_exception_type \
{0, "RESET" }, \
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 6692982..f9f2779 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -150,6 +150,12 @@ struct kvm_vcpu_stat {
u32 halt_successful_poll;
u32 halt_attempted_poll;
u32 halt_wakeup;
+ u32 hvc_exit_stat;
+ u64 wfe_exit_stat;
+ u64 wfi_exit_stat;
+ u64 mmio_exit_user;
+ u64 mmio_exit_kernel;
+ u64 exits;
};
int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 405aa18..9203c21 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -279,6 +279,11 @@ static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
pgd_t *merged_hyp_pgd,
unsigned long hyp_idmap_start) { }
+static inline unsigned int kvm_get_vmid_bits(void)
+{
+ return 8;
+}
+
#endif /* !__ASSEMBLY__ */
#endif /* __ARM_KVM_MMU_H__ */
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index e06fd29..dda1959 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -44,6 +44,7 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
#include <asm/kvm_psci.h>
+#include <asm/sections.h>
#ifdef REQUIRES_VIRT
__asm__(".arch_extension virt");
@@ -58,9 +59,12 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
/* The VMID used in the VTTBR */
static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
-static u8 kvm_next_vmid;
+static u32 kvm_next_vmid;
+static unsigned int kvm_vmid_bits __read_mostly;
static DEFINE_SPINLOCK(kvm_vmid_lock);
+static bool vgic_present;
+
static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
{
BUG_ON(preemptible());
@@ -132,7 +136,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.vmid_gen = 0;
/* The maximum number of VCPUs is limited by the host's GIC model */
- kvm->arch.max_vcpus = kvm_vgic_get_max_vcpus();
+ kvm->arch.max_vcpus = vgic_present ?
+ kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
return ret;
out_free_stage2_pgd:
@@ -172,6 +177,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
int r;
switch (ext) {
case KVM_CAP_IRQCHIP:
+ r = vgic_present;
+ break;
case KVM_CAP_IOEVENTFD:
case KVM_CAP_DEVICE_CTRL:
case KVM_CAP_USER_MEMORY:
@@ -433,11 +440,12 @@ static void update_vttbr(struct kvm *kvm)
kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
kvm->arch.vmid = kvm_next_vmid;
kvm_next_vmid++;
+ kvm_next_vmid &= (1 << kvm_vmid_bits) - 1;
/* update vttbr to be used with the new vmid */
pgd_phys = virt_to_phys(kvm_get_hwpgd(kvm));
BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK);
- vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK;
+ vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
kvm->arch.vttbr = pgd_phys | vmid;
spin_unlock(&kvm_vmid_lock);
@@ -603,6 +611,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
vcpu->mode = OUTSIDE_GUEST_MODE;
+ vcpu->stat.exits++;
/*
* Back from guest
*************************************************************/
@@ -913,6 +922,8 @@ static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
switch (dev_id) {
case KVM_ARM_DEVICE_VGIC_V2:
+ if (!vgic_present)
+ return -ENXIO;
return kvm_vgic_addr(kvm, type, &dev_addr->addr, true);
default:
return -ENODEV;
@@ -927,6 +938,8 @@ long kvm_arch_vm_ioctl(struct file *filp,
switch (ioctl) {
case KVM_CREATE_IRQCHIP: {
+ if (!vgic_present)
+ return -ENXIO;
return kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
}
case KVM_ARM_SET_DEVICE_ADDR: {
@@ -1067,6 +1080,12 @@ static int init_hyp_mode(void)
goto out_free_mappings;
}
+ err = create_hyp_mappings(__start_rodata, __end_rodata);
+ if (err) {
+ kvm_err("Cannot map rodata section\n");
+ goto out_free_mappings;
+ }
+
/*
* Map the Hyp stack pages
*/
@@ -1111,8 +1130,17 @@ static int init_hyp_mode(void)
* Init HYP view of VGIC
*/
err = kvm_vgic_hyp_init();
- if (err)
+ switch (err) {
+ case 0:
+ vgic_present = true;
+ break;
+ case -ENODEV:
+ case -ENXIO:
+ vgic_present = false;
+ break;
+ default:
goto out_free_context;
+ }
/*
* Init HYP architected timer support
@@ -1127,6 +1155,10 @@ static int init_hyp_mode(void)
kvm_perf_init();
+ /* set size of VMID supported by CPU */
+ kvm_vmid_bits = kvm_get_vmid_bits();
+ kvm_info("%d-bit VMID\n", kvm_vmid_bits);
+
kvm_info("Hyp mode initialized successfully\n");
return 0;
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
index d6c0052..dc99159 100644
--- a/arch/arm/kvm/emulate.c
+++ b/arch/arm/kvm/emulate.c
@@ -275,6 +275,40 @@ static u32 exc_vector_base(struct kvm_vcpu *vcpu)
return vbar;
}
+/*
+ * Switch to an exception mode, updating both CPSR and SPSR. Follow
+ * the logic described in AArch32.EnterMode() from the ARMv8 ARM.
+ */
+static void kvm_update_psr(struct kvm_vcpu *vcpu, unsigned long mode)
+{
+ unsigned long cpsr = *vcpu_cpsr(vcpu);
+ u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
+
+ *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | mode;
+
+ switch (mode) {
+ case FIQ_MODE:
+ *vcpu_cpsr(vcpu) |= PSR_F_BIT;
+ /* Fall through */
+ case ABT_MODE:
+ case IRQ_MODE:
+ *vcpu_cpsr(vcpu) |= PSR_A_BIT;
+ /* Fall through */
+ default:
+ *vcpu_cpsr(vcpu) |= PSR_I_BIT;
+ }
+
+ *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
+
+ if (sctlr & SCTLR_TE)
+ *vcpu_cpsr(vcpu) |= PSR_T_BIT;
+ if (sctlr & SCTLR_EE)
+ *vcpu_cpsr(vcpu) |= PSR_E_BIT;
+
+ /* Note: These now point to the mode banked copies */
+ *vcpu_spsr(vcpu) = cpsr;
+}
+
/**
* kvm_inject_undefined - inject an undefined exception into the guest
* @vcpu: The VCPU to receive the undefined exception
@@ -286,29 +320,13 @@ static u32 exc_vector_base(struct kvm_vcpu *vcpu)
*/
void kvm_inject_undefined(struct kvm_vcpu *vcpu)
{
- unsigned long new_lr_value;
- unsigned long new_spsr_value;
unsigned long cpsr = *vcpu_cpsr(vcpu);
- u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
bool is_thumb = (cpsr & PSR_T_BIT);
u32 vect_offset = 4;
u32 return_offset = (is_thumb) ? 2 : 4;
- new_spsr_value = cpsr;
- new_lr_value = *vcpu_pc(vcpu) - return_offset;
-
- *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | UND_MODE;
- *vcpu_cpsr(vcpu) |= PSR_I_BIT;
- *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
-
- if (sctlr & SCTLR_TE)
- *vcpu_cpsr(vcpu) |= PSR_T_BIT;
- if (sctlr & SCTLR_EE)
- *vcpu_cpsr(vcpu) |= PSR_E_BIT;
-
- /* Note: These now point to UND banked copies */
- *vcpu_spsr(vcpu) = cpsr;
- *vcpu_reg(vcpu, 14) = new_lr_value;
+ kvm_update_psr(vcpu, UND_MODE);
+ *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) - return_offset;
/* Branch to exception vector */
*vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
@@ -320,30 +338,14 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
*/
static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
{
- unsigned long new_lr_value;
- unsigned long new_spsr_value;
unsigned long cpsr = *vcpu_cpsr(vcpu);
- u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
bool is_thumb = (cpsr & PSR_T_BIT);
u32 vect_offset;
u32 return_offset = (is_thumb) ? 4 : 0;
bool is_lpae;
- new_spsr_value = cpsr;
- new_lr_value = *vcpu_pc(vcpu) + return_offset;
-
- *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | ABT_MODE;
- *vcpu_cpsr(vcpu) |= PSR_I_BIT | PSR_A_BIT;
- *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
-
- if (sctlr & SCTLR_TE)
- *vcpu_cpsr(vcpu) |= PSR_T_BIT;
- if (sctlr & SCTLR_EE)
- *vcpu_cpsr(vcpu) |= PSR_E_BIT;
-
- /* Note: These now point to ABT banked copies */
- *vcpu_spsr(vcpu) = cpsr;
- *vcpu_reg(vcpu, 14) = new_lr_value;
+ kvm_update_psr(vcpu, ABT_MODE);
+ *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
if (is_pabt)
vect_offset = 12;
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index 96e935b..5fa69d7 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -33,6 +33,12 @@
#define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU }
struct kvm_stats_debugfs_item debugfs_entries[] = {
+ VCPU_STAT(hvc_exit_stat),
+ VCPU_STAT(wfe_exit_stat),
+ VCPU_STAT(wfi_exit_stat),
+ VCPU_STAT(mmio_exit_user),
+ VCPU_STAT(mmio_exit_kernel),
+ VCPU_STAT(exits),
{ NULL }
};
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
index 95f12b2..3ede90d 100644
--- a/arch/arm/kvm/handle_exit.c
+++ b/arch/arm/kvm/handle_exit.c
@@ -42,6 +42,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
kvm_vcpu_hvc_get_imm(vcpu));
+ vcpu->stat.hvc_exit_stat++;
ret = kvm_psci_call(vcpu);
if (ret < 0) {
@@ -89,9 +90,11 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
if (kvm_vcpu_get_hsr(vcpu) & HSR_WFI_IS_WFE) {
trace_kvm_wfx(*vcpu_pc(vcpu), true);
+ vcpu->stat.wfe_exit_stat++;
kvm_vcpu_on_spin(vcpu);
} else {
trace_kvm_wfx(*vcpu_pc(vcpu), false);
+ vcpu->stat.wfi_exit_stat++;
kvm_vcpu_block(vcpu);
}
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index 3a10c9f..7f33b20 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -210,8 +210,11 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
if (!ret) {
/* We handled the access successfully in the kernel. */
+ vcpu->stat.mmio_exit_kernel++;
kvm_handle_mmio_return(vcpu, run);
return 1;
+ } else {
+ vcpu->stat.mmio_exit_user++;
}
run->exit_reason = KVM_EXIT_MMIO;
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 61d96a6..22f7fa0 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -656,9 +656,9 @@ static void *kvm_alloc_hwpgd(void)
* kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
* @kvm: The KVM struct pointer for the VM.
*
- * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can
- * support either full 40-bit input addresses or limited to 32-bit input
- * addresses). Clears the allocated pages.
+ * Allocates only the stage-2 HW PGD level table(s) (can support either full
+ * 40-bit input addresses or limited to 32-bit input addresses). Clears the
+ * allocated pages.
*
* Note we don't need locking here as this is only called when the VM is
* created, which can only be done once.