summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/include/asm/kvm_arm.h1
-rw-r--r--arch/arm/include/asm/kvm_host.h15
-rw-r--r--arch/arm/include/asm/kvm_mmio.h22
-rw-r--r--arch/arm/include/uapi/asm/kvm.h3
-rw-r--r--arch/arm/kernel/asm-offsets.c4
-rw-r--r--arch/arm/kvm/Kconfig30
-rw-r--r--arch/arm/kvm/Makefile12
-rw-r--r--arch/arm/kvm/arm.c45
-rw-r--r--arch/arm/kvm/guest.c18
-rw-r--r--arch/arm/kvm/interrupts_head.S8
-rw-r--r--arch/arm/kvm/mmio.c64
-rw-r--r--arch/arm/kvm/mmu.c134
-rw-r--r--arch/arm/kvm/trace.h48
-rw-r--r--arch/arm64/include/asm/esr.h1
-rw-r--r--arch/arm64/include/asm/kvm_arm.h1
-rw-r--r--arch/arm64/include/asm/kvm_host.h15
-rw-r--r--arch/arm64/include/asm/kvm_mmio.h22
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h3
-rw-r--r--arch/arm64/kvm/Kconfig18
-rw-r--r--arch/arm64/kvm/Makefile20
-rw-r--r--arch/mips/include/asm/asmmacro-32.h128
-rw-r--r--arch/mips/include/asm/asmmacro.h218
-rw-r--r--arch/mips/include/asm/fpu.h20
-rw-r--r--arch/mips/include/asm/kdebug.h3
-rw-r--r--arch/mips/include/asm/kvm_host.h125
-rw-r--r--arch/mips/include/asm/processor.h2
-rw-r--r--arch/mips/include/uapi/asm/kvm.h164
-rw-r--r--arch/mips/kernel/asm-offsets.c105
-rw-r--r--arch/mips/kernel/genex.S15
-rw-r--r--arch/mips/kernel/ptrace.c30
-rw-r--r--arch/mips/kernel/r4k_fpu.S2
-rw-r--r--arch/mips/kernel/traps.c33
-rw-r--r--arch/mips/kvm/Makefile8
-rw-r--r--arch/mips/kvm/emulate.c332
-rw-r--r--arch/mips/kvm/fpu.S122
-rw-r--r--arch/mips/kvm/locore.S38
-rw-r--r--arch/mips/kvm/mips.c472
-rw-r--r--arch/mips/kvm/msa.S161
-rw-r--r--arch/mips/kvm/stats.c4
-rw-r--r--arch/mips/kvm/tlb.c6
-rw-r--r--arch/mips/kvm/trap_emul.c199
-rw-r--r--arch/powerpc/kvm/mpic.c17
-rw-r--r--arch/powerpc/kvm/powerpc.c4
-rw-r--r--arch/s390/include/asm/kvm_host.h46
-rw-r--r--arch/s390/include/uapi/asm/kvm.h4
-rw-r--r--arch/s390/include/uapi/asm/sie.h4
-rw-r--r--arch/s390/kernel/asm-offsets.c1
-rw-r--r--arch/s390/kvm/diag.c6
-rw-r--r--arch/s390/kvm/gaccess.c296
-rw-r--r--arch/s390/kvm/gaccess.h21
-rw-r--r--arch/s390/kvm/guestdbg.c8
-rw-r--r--arch/s390/kvm/intercept.c5
-rw-r--r--arch/s390/kvm/interrupt.c1101
-rw-r--r--arch/s390/kvm/kvm-s390.c398
-rw-r--r--arch/s390/kvm/kvm-s390.h51
-rw-r--r--arch/s390/kvm/priv.c144
-rw-r--r--arch/s390/kvm/sigp.c7
-rw-r--r--arch/x86/include/asm/kvm_host.h28
-rw-r--r--arch/x86/include/asm/kvm_para.h2
-rw-r--r--arch/x86/include/asm/pvclock.h1
-rw-r--r--arch/x86/include/uapi/asm/vmx.h1
-rw-r--r--arch/x86/kernel/pvclock.c44
-rw-r--r--arch/x86/kvm/Makefile2
-rw-r--r--arch/x86/kvm/cpuid.c33
-rw-r--r--arch/x86/kvm/cpuid.h8
-rw-r--r--arch/x86/kvm/emulate.c193
-rw-r--r--arch/x86/kvm/i8254.c14
-rw-r--r--arch/x86/kvm/i8254.h2
-rw-r--r--arch/x86/kvm/i8259.c12
-rw-r--r--arch/x86/kvm/ioapic.c22
-rw-r--r--arch/x86/kvm/ioapic.h11
-rw-r--r--arch/x86/kvm/irq.h2
-rw-r--r--arch/x86/kvm/lapic.c147
-rw-r--r--arch/x86/kvm/lapic.h17
-rw-r--r--arch/x86/kvm/mmu.c73
-rw-r--r--arch/x86/kvm/pmu.c2
-rw-r--r--arch/x86/kvm/svm.c43
-rw-r--r--arch/x86/kvm/vmx.c146
-rw-r--r--arch/x86/kvm/x86.c157
-rw-r--r--arch/x86/vdso/vclock_gettime.c34
80 files changed, 4250 insertions, 1528 deletions
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index 816db0b..d995821 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -185,6 +185,7 @@
#define HSR_COND (0xfU << HSR_COND_SHIFT)
#define FSC_FAULT (0x04)
+#define FSC_ACCESS (0x08)
#define FSC_PERM (0x0c)
/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 41008cd..d71607c 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -27,6 +27,8 @@
#include <asm/fpstate.h>
#include <kvm/arm_arch_timer.h>
+#define __KVM_HAVE_ARCH_INTC_INITIALIZED
+
#if defined(CONFIG_KVM_ARM_MAX_VCPUS)
#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
#else
@@ -165,19 +167,10 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
+int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
+int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
/* We do not have shadow page tables, hence the empty hooks */
-static inline int kvm_age_hva(struct kvm *kvm, unsigned long start,
- unsigned long end)
-{
- return 0;
-}
-
-static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
-{
- return 0;
-}
-
static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
unsigned long address)
{
diff --git a/arch/arm/include/asm/kvm_mmio.h b/arch/arm/include/asm/kvm_mmio.h
index 3f83db2..d8e90c8 100644
--- a/arch/arm/include/asm/kvm_mmio.h
+++ b/arch/arm/include/asm/kvm_mmio.h
@@ -28,28 +28,6 @@ struct kvm_decode {
bool sign_extend;
};
-/*
- * The in-kernel MMIO emulation code wants to use a copy of run->mmio,
- * which is an anonymous type. Use our own type instead.
- */
-struct kvm_exit_mmio {
- phys_addr_t phys_addr;
- u8 data[8];
- u32 len;
- bool is_write;
- void *private;
-};
-
-static inline void kvm_prepare_mmio(struct kvm_run *run,
- struct kvm_exit_mmio *mmio)
-{
- run->mmio.phys_addr = mmio->phys_addr;
- run->mmio.len = mmio->len;
- run->mmio.is_write = mmio->is_write;
- memcpy(run->mmio.data, mmio->data, mmio->len);
- run->exit_reason = KVM_EXIT_MMIO;
-}
-
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
phys_addr_t fault_ipa);
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index 0db25bc..2499867 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -198,6 +198,9 @@ struct kvm_arch_memory_slot {
/* Highest supported SPI, from VGIC_NR_IRQS */
#define KVM_ARM_IRQ_GIC_MAX 127
+/* One single KVM irqchip, ie. the VGIC */
+#define KVM_NR_IRQCHIPS 1
+
/* PSCI interface */
#define KVM_PSCI_FN_BASE 0x95c1ba5e
#define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n))
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 2d2d608..488eaac 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -190,7 +190,6 @@ int main(void)
DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.fault.hxfar));
DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.fault.hpfar));
DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.fault.hyp_pc));
-#ifdef CONFIG_KVM_ARM_VGIC
DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr));
DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr));
@@ -200,14 +199,11 @@ int main(void)
DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr));
DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr));
DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr));
-#ifdef CONFIG_KVM_ARM_TIMER
DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl));
DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval));
DEFINE(KVM_TIMER_CNTVOFF, offsetof(struct kvm, arch.timer.cntvoff));
DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled));
-#endif
DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base));
-#endif
DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
#endif
return 0;
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
index 338ace7..f1f79d1 100644
--- a/arch/arm/kvm/Kconfig
+++ b/arch/arm/kvm/Kconfig
@@ -18,6 +18,7 @@ if VIRTUALIZATION
config KVM
bool "Kernel-based Virtual Machine (KVM) support"
+ depends on MMU && OF
select PREEMPT_NOTIFIERS
select ANON_INODES
select HAVE_KVM_CPU_RELAX_INTERCEPT
@@ -26,10 +27,12 @@ config KVM
select KVM_ARM_HOST
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select SRCU
- depends on ARM_VIRT_EXT && ARM_LPAE
+ select MMU_NOTIFIER
+ select HAVE_KVM_EVENTFD
+ select HAVE_KVM_IRQFD
+ depends on ARM_VIRT_EXT && ARM_LPAE && ARM_ARCH_TIMER
---help---
- Support hosting virtualized guest machines. You will also
- need to select one or more of the processor modules below.
+ Support hosting virtualized guest machines.
This module provides access to the hardware capabilities through
a character device node named /dev/kvm.
@@ -37,10 +40,7 @@ config KVM
If unsure, say N.
config KVM_ARM_HOST
- bool "KVM host support for ARM cpus."
- depends on KVM
- depends on MMU
- select MMU_NOTIFIER
+ bool
---help---
Provides host support for ARM processors.
@@ -55,20 +55,4 @@ config KVM_ARM_MAX_VCPUS
large, so only choose a reasonable number that you expect to
actually use.
-config KVM_ARM_VGIC
- bool "KVM support for Virtual GIC"
- depends on KVM_ARM_HOST && OF
- select HAVE_KVM_IRQCHIP
- default y
- ---help---
- Adds support for a hardware assisted, in-kernel GIC emulation.
-
-config KVM_ARM_TIMER
- bool "KVM support for Architected Timers"
- depends on KVM_ARM_VGIC && ARM_ARCH_TIMER
- select HAVE_KVM_IRQCHIP
- default y
- ---help---
- Adds support for the Architected Timers in virtual machines
-
endif # VIRTUALIZATION
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index 443b8be..139e46c 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -7,7 +7,7 @@ ifeq ($(plus_virt),+virt)
plus_virt_def := -DREQUIRES_VIRT=1
endif
-ccflags-y += -Ivirt/kvm -Iarch/arm/kvm
+ccflags-y += -Iarch/arm/kvm
CFLAGS_arm.o := -I. $(plus_virt_def)
CFLAGS_mmu.o := -I.
@@ -15,12 +15,12 @@ AFLAGS_init.o := -Wa,-march=armv7-a$(plus_virt)
AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt)
KVM := ../../../virt/kvm
-kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o
+kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o
obj-y += kvm-arm.o init.o interrupts.o
obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o
-obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
-obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o
-obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2-emul.o
-obj-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
+obj-y += $(KVM)/arm/vgic.o
+obj-y += $(KVM)/arm/vgic-v2.o
+obj-y += $(KVM)/arm/vgic-v2-emul.o
+obj-y += $(KVM)/arm/arch_timer.o
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 5560f74..6f53645 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -61,8 +61,6 @@ static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
static u8 kvm_next_vmid;
static DEFINE_SPINLOCK(kvm_vmid_lock);
-static bool vgic_present;
-
static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
{
BUG_ON(preemptible());
@@ -173,8 +171,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
int r;
switch (ext) {
case KVM_CAP_IRQCHIP:
- r = vgic_present;
- break;
+ case KVM_CAP_IRQFD:
+ case KVM_CAP_IOEVENTFD:
case KVM_CAP_DEVICE_CTRL:
case KVM_CAP_USER_MEMORY:
case KVM_CAP_SYNC_MMU:
@@ -183,6 +181,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_ARM_PSCI:
case KVM_CAP_ARM_PSCI_0_2:
case KVM_CAP_READONLY_MEM:
+ case KVM_CAP_MP_STATE:
r = 1;
break;
case KVM_CAP_COALESCED_MMIO:
@@ -268,7 +267,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
- return 0;
+ return kvm_timer_should_fire(vcpu);
}
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
@@ -313,13 +312,29 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
- return -EINVAL;
+ if (vcpu->arch.pause)
+ mp_state->mp_state = KVM_MP_STATE_STOPPED;
+ else
+ mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
+
+ return 0;
}
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
- return -EINVAL;
+ switch (mp_state->mp_state) {
+ case KVM_MP_STATE_RUNNABLE:
+ vcpu->arch.pause = false;
+ break;
+ case KVM_MP_STATE_STOPPED:
+ vcpu->arch.pause = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
}
/**
@@ -452,6 +467,11 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
return 0;
}
+bool kvm_arch_intc_initialized(struct kvm *kvm)
+{
+ return vgic_initialized(kvm);
+}
+
static void vcpu_pause(struct kvm_vcpu *vcpu)
{
wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu);
@@ -831,8 +851,6 @@ static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
switch (dev_id) {
case KVM_ARM_DEVICE_VGIC_V2:
- if (!vgic_present)
- return -ENXIO;
return kvm_vgic_addr(kvm, type, &dev_addr->addr, true);
default:
return -ENODEV;
@@ -847,10 +865,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
switch (ioctl) {
case KVM_CREATE_IRQCHIP: {
- if (vgic_present)
- return kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
- else
- return -ENXIO;
+ return kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
}
case KVM_ARM_SET_DEVICE_ADDR: {
struct kvm_arm_device_addr dev_addr;
@@ -1035,10 +1050,6 @@ static int init_hyp_mode(void)
if (err)
goto out_free_context;
-#ifdef CONFIG_KVM_ARM_VGIC
- vgic_present = true;
-#endif
-
/*
* Init HYP architected timer support
*/
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index 384bab6..d503fbb 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -109,22 +109,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
return -EINVAL;
}
-#ifndef CONFIG_KVM_ARM_TIMER
-
-#define NUM_TIMER_REGS 0
-
-static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
-{
- return 0;
-}
-
-static bool is_timer_reg(u64 index)
-{
- return false;
-}
-
-#else
-
#define NUM_TIMER_REGS 3
static bool is_timer_reg(u64 index)
@@ -152,8 +136,6 @@ static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
return 0;
}
-#endif
-
static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
void __user *uaddr = (void __user *)(long)reg->addr;
diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
index 14d4883..35e4a3a 100644
--- a/arch/arm/kvm/interrupts_head.S
+++ b/arch/arm/kvm/interrupts_head.S
@@ -402,7 +402,6 @@ vcpu .req r0 @ vcpu pointer always in r0
* Assumes vcpu pointer in vcpu reg
*/
.macro save_vgic_state
-#ifdef CONFIG_KVM_ARM_VGIC
/* Get VGIC VCTRL base into r2 */
ldr r2, [vcpu, #VCPU_KVM]
ldr r2, [r2, #KVM_VGIC_VCTRL]
@@ -460,7 +459,6 @@ ARM_BE8(rev r6, r6 )
subs r4, r4, #1
bne 1b
2:
-#endif
.endm
/*
@@ -469,7 +467,6 @@ ARM_BE8(rev r6, r6 )
* Assumes vcpu pointer in vcpu reg
*/
.macro restore_vgic_state
-#ifdef CONFIG_KVM_ARM_VGIC
/* Get VGIC VCTRL base into r2 */
ldr r2, [vcpu, #VCPU_KVM]
ldr r2, [r2, #KVM_VGIC_VCTRL]
@@ -501,7 +498,6 @@ ARM_BE8(rev r6, r6 )
subs r4, r4, #1
bne 1b
2:
-#endif
.endm
#define CNTHCTL_PL1PCTEN (1 << 0)
@@ -515,7 +511,6 @@ ARM_BE8(rev r6, r6 )
* Clobbers r2-r5
*/
.macro save_timer_state
-#ifdef CONFIG_KVM_ARM_TIMER
ldr r4, [vcpu, #VCPU_KVM]
ldr r2, [r4, #KVM_TIMER_ENABLED]
cmp r2, #0
@@ -537,7 +532,6 @@ ARM_BE8(rev r6, r6 )
mcrr p15, 4, r2, r2, c14 @ CNTVOFF
1:
-#endif
@ Allow physical timer/counter access for the host
mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL
orr r2, r2, #(CNTHCTL_PL1PCEN | CNTHCTL_PL1PCTEN)
@@ -559,7 +553,6 @@ ARM_BE8(rev r6, r6 )
bic r2, r2, #CNTHCTL_PL1PCEN
mcr p15, 4, r2, c14, c1, 0 @ CNTHCTL
-#ifdef CONFIG_KVM_ARM_TIMER
ldr r4, [vcpu, #VCPU_KVM]
ldr r2, [r4, #KVM_TIMER_ENABLED]
cmp r2, #0
@@ -579,7 +572,6 @@ ARM_BE8(rev r6, r6 )
and r2, r2, #3
mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
1:
-#endif
.endm
.equ vmentry, 0
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index 5d3bfc0..974b1c6 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -121,12 +121,11 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
return 0;
}
-static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
- struct kvm_exit_mmio *mmio)
+static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len)
{
unsigned long rt;
- int len;
- bool is_write, sign_extend;
+ int access_size;
+ bool sign_extend;
if (kvm_vcpu_dabt_isextabt(vcpu)) {
/* cache operation on I/O addr, tell guest unsupported */
@@ -140,17 +139,15 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return 1;
}
- len = kvm_vcpu_dabt_get_as(vcpu);
- if (unlikely(len < 0))
- return len;
+ access_size = kvm_vcpu_dabt_get_as(vcpu);
+ if (unlikely(access_size < 0))
+ return access_size;
- is_write = kvm_vcpu_dabt_iswrite(vcpu);
+ *is_write = kvm_vcpu_dabt_iswrite(vcpu);
sign_extend = kvm_vcpu_dabt_issext(vcpu);
rt = kvm_vcpu_dabt_get_rd(vcpu);
- mmio->is_write = is_write;
- mmio->phys_addr = fault_ipa;
- mmio->len = len;
+ *len = access_size;
vcpu->arch.mmio_decode.sign_extend = sign_extend;
vcpu->arch.mmio_decode.rt = rt;
@@ -165,20 +162,20 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
phys_addr_t fault_ipa)
{
- struct kvm_exit_mmio mmio;
unsigned long data;
unsigned long rt;
int ret;
+ bool is_write;
+ int len;
+ u8 data_buf[8];
/*
- * Prepare MMIO operation. First stash it in a private
- * structure that we can use for in-kernel emulation. If the
- * kernel can't handle it, copy it into run->mmio and let user
- * space do its magic.
+ * Prepare MMIO operation. First decode the syndrome data we get
+ * from the CPU. Then try if some in-kernel emulation feels
+ * responsible, otherwise let user space do its magic.
*/
-
if (kvm_vcpu_dabt_isvalid(vcpu)) {
- ret = decode_hsr(vcpu, fault_ipa, &mmio);
+ ret = decode_hsr(vcpu, &is_write, &len);
if (ret)
return ret;
} else {
@@ -188,21 +185,34 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
rt = vcpu->arch.mmio_decode.rt;
- if (mmio.is_write) {
- data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt),
- mmio.len);
+ if (is_write) {
+ data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), len);
+
+ trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data);
+ mmio_write_buf(data_buf, len, data);
- trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, mmio.len,
- fault_ipa, data);
- mmio_write_buf(mmio.data, mmio.len, data);
+ ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
+ data_buf);
} else {
- trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, mmio.len,
+ trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len,
fault_ipa, 0);
+
+ ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len,
+ data_buf);
}
- if (vgic_handle_mmio(vcpu, run, &mmio))
+ /* Now prepare kvm_run for the potential return to userland. */
+ run->mmio.is_write = is_write;
+ run->mmio.phys_addr = fault_ipa;
+ run->mmio.len = len;
+ memcpy(run->mmio.data, data_buf, len);
+
+ if (!ret) {
+ /* We handled the access successfully in the kernel. */
+ kvm_handle_mmio_return(vcpu, run);
return 1;
+ }
- kvm_prepare_mmio(run, &mmio);
+ run->exit_reason = KVM_EXIT_MMIO;
return 0;
}
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 5656d79..15b050d 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -1330,10 +1330,51 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
out_unlock:
spin_unlock(&kvm->mmu_lock);
+ kvm_set_pfn_accessed(pfn);
kvm_release_pfn_clean(pfn);
return ret;
}
+/*
+ * Resolve the access fault by making the page young again.
+ * Note that because the faulting entry is guaranteed not to be
+ * cached in the TLB, we don't need to invalidate anything.
+ */
+static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
+{
+ pmd_t *pmd;
+ pte_t *pte;
+ pfn_t pfn;
+ bool pfn_valid = false;
+
+ trace_kvm_access_fault(fault_ipa);
+
+ spin_lock(&vcpu->kvm->mmu_lock);
+
+ pmd = stage2_get_pmd(vcpu->kvm, NULL, fault_ipa);
+ if (!pmd || pmd_none(*pmd)) /* Nothing there */
+ goto out;
+
+ if (kvm_pmd_huge(*pmd)) { /* THP, HugeTLB */
+ *pmd = pmd_mkyoung(*pmd);
+ pfn = pmd_pfn(*pmd);
+ pfn_valid = true;
+ goto out;
+ }
+
+ pte = pte_offset_kernel(pmd, fault_ipa);
+ if (pte_none(*pte)) /* Nothing there either */
+ goto out;
+
+ *pte = pte_mkyoung(*pte); /* Just a page... */
+ pfn = pte_pfn(*pte);
+ pfn_valid = true;
+out:
+ spin_unlock(&vcpu->kvm->mmu_lock);
+ if (pfn_valid)
+ kvm_set_pfn_accessed(pfn);
+}
+
/**
* kvm_handle_guest_abort - handles all 2nd stage aborts
* @vcpu: the VCPU pointer
@@ -1364,7 +1405,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
/* Check the stage-2 fault is trans. fault or write fault */
fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
- if (fault_status != FSC_FAULT && fault_status != FSC_PERM) {
+ if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
+ fault_status != FSC_ACCESS) {
kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
kvm_vcpu_trap_get_class(vcpu),
(unsigned long)kvm_vcpu_trap_get_fault(vcpu),
@@ -1400,6 +1442,12 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
/* Userspace should not be able to register out-of-bounds IPAs */
VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE);
+ if (fault_status == FSC_ACCESS) {
+ handle_access_fault(vcpu, fault_ipa);
+ ret = 1;
+ goto out_unlock;
+ }
+
ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
if (ret == 0)
ret = 1;
@@ -1408,15 +1456,16 @@ out_unlock:
return ret;
}
-static void handle_hva_to_gpa(struct kvm *kvm,
- unsigned long start,
- unsigned long end,
- void (*handler)(struct kvm *kvm,
- gpa_t gpa, void *data),
- void *data)
+static int handle_hva_to_gpa(struct kvm *kvm,
+ unsigned long start,
+ unsigned long end,
+ int (*handler)(struct kvm *kvm,
+ gpa_t gpa, void *data),
+ void *data)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
+ int ret = 0;
slots = kvm_memslots(kvm);
@@ -1440,14 +1489,17 @@ static void handle_hva_to_gpa(struct kvm *kvm,
for (; gfn < gfn_end; ++gfn) {
gpa_t gpa = gfn << PAGE_SHIFT;
- handler(kvm, gpa, data);
+ ret |= handler(kvm, gpa, data);
}
}
+
+ return ret;
}
-static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
+static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
{
unmap_stage2_range(kvm, gpa, PAGE_SIZE);
+ return 0;
}
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
@@ -1473,7 +1525,7 @@ int kvm_unmap_hva_range(struct kvm *kvm,
return 0;
}
-static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
+static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
{
pte_t *pte = (pte_t *)data;
@@ -1485,6 +1537,7 @@ static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
* through this calling path.
*/
stage2_set_pte(kvm, NULL, gpa, pte, 0);
+ return 0;
}
@@ -1501,6 +1554,67 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
}
+static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
+{
+ pmd_t *pmd;
+ pte_t *pte;
+
+ pmd = stage2_get_pmd(kvm, NULL, gpa);
+ if (!pmd || pmd_none(*pmd)) /* Nothing there */
+ return 0;
+
+ if (kvm_pmd_huge(*pmd)) { /* THP, HugeTLB */
+ if (pmd_young(*pmd)) {
+ *pmd = pmd_mkold(*pmd);
+ return 1;
+ }
+
+ return 0;
+ }
+
+ pte = pte_offset_kernel(pmd, gpa);
+ if (pte_none(*pte))
+ return 0;
+
+ if (pte_young(*pte)) {
+ *pte = pte_mkold(*pte); /* Just a page... */
+ return 1;
+ }
+
+ return 0;
+}
+
+static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
+{
+ pmd_t *pmd;
+ pte_t *pte;
+
+ pmd = stage2_get_pmd(kvm, NULL, gpa);
+ if (!pmd || pmd_none(*pmd)) /* Nothing there */
+ return 0;
+
+ if (kvm_pmd_huge(*pmd)) /* THP, HugeTLB */
+ return pmd_young(*pmd);
+
+ pte = pte_offset_kernel(pmd, gpa);
+ if (!pte_none(*pte)) /* Just a page... */
+ return pte_young(*pte);
+
+ return 0;
+}
+
+int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
+{
+ trace_kvm_age_hva(start, end);
+ return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
+}
+
+int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+{
+ trace_kvm_test_age_hva(hva);
+ return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
+}
+
void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{
mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
diff --git a/arch/arm/kvm/trace.h b/arch/arm/kvm/trace.h
index 6817664..0ec3539 100644
--- a/arch/arm/kvm/trace.h
+++ b/arch/arm/kvm/trace.h
@@ -68,6 +68,21 @@ TRACE_EVENT(kvm_guest_fault,
__entry->hxfar, __entry->vcpu_pc)
);
+TRACE_EVENT(kvm_access_fault,
+ TP_PROTO(unsigned long ipa),
+ TP_ARGS(ipa),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, ipa )
+ ),
+
+ TP_fast_assign(
+ __entry->ipa = ipa;
+ ),
+
+ TP_printk("IPA: %lx", __entry->ipa)
+);
+
TRACE_EVENT(kvm_irq_line,
TP_PROTO(unsigned int type, int vcpu_idx, int irq_num, int level),
TP_ARGS(type, vcpu_idx, irq_num, level),
@@ -210,6 +225,39 @@ TRACE_EVENT(kvm_set_spte_hva,
TP_printk("mmu notifier set pte hva: %#08lx", __entry->hva)
);
+TRACE_EVENT(kvm_age_hva,
+ TP_PROTO(unsigned long start, unsigned long end),
+ TP_ARGS(start, end),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, start )
+ __field( unsigned long, end )
+ ),
+
+ TP_fast_assign(
+ __entry->start = start;
+ __entry->end = end;
+ ),
+
+ TP_printk("mmu notifier age hva: %#08lx -- %#08lx",
+ __entry->start, __entry->end)
+);
+
+TRACE_EVENT(kvm_test_age_hva,
+ TP_PROTO(unsigned long hva),
+ TP_ARGS(hva),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, hva )
+ ),
+
+ TP_fast_assign(
+ __entry->hva = hva;
+ ),
+
+ TP_printk("mmu notifier test age hva: %#08lx", __entry->hva)
+);
+
TRACE_EVENT(kvm_hvc,
TP_PROTO(unsigned long vcpu_pc, unsigned long r0, unsigned long imm),
TP_ARGS(vcpu_pc, r0, imm),
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 92bbae3..7052245 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -90,6 +90,7 @@
#define ESR_ELx_FSC (0x3F)
#define ESR_ELx_FSC_TYPE (0x3C)
#define ESR_ELx_FSC_EXTABT (0x10)
+#define ESR_ELx_FSC_ACCESS (0x08)
#define ESR_ELx_FSC_FAULT (0x04)
#define ESR_ELx_FSC_PERM (0x0C)
#define ESR_ELx_CV (UL(1) << 24)
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 54bb4ba..ac6fafb 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -188,6 +188,7 @@
/* For compatibility with fault code shared with 32-bit */
#define FSC_FAULT ESR_ELx_FSC_FAULT
+#define FSC_ACCESS ESR_ELx_FSC_ACCESS
#define FSC_PERM ESR_ELx_FSC_PERM
/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 8ac3c70f..f0f58c9 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -28,6 +28,8 @@
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
+#define __KVM_HAVE_ARCH_INTC_INITIALIZED
+
#if defined(CONFIG_KVM_ARM_MAX_VCPUS)
#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
#else
@@ -177,19 +179,10 @@ int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
int kvm_unmap_hva_range(struct kvm *kvm,
unsigned long start, unsigned long end);
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
+int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
/* We do not have shadow page tables, hence the empty hooks */
-static inline int kvm_age_hva(struct kvm *kvm, unsigned long start,
- unsigned long end)
-{
- return 0;
-}
-
-static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
-{
- return 0;
-}
-
static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
unsigned long address)
{
diff --git a/arch/arm64/include/asm/kvm_mmio.h b/arch/arm64/include/asm/kvm_mmio.h
index 9f52beb..889c908 100644
--- a/arch/arm64/include/asm/kvm_mmio.h
+++ b/arch/arm64/include/asm/kvm_mmio.h
@@ -31,28 +31,6 @@ struct kvm_decode {
bool sign_extend;
};
-/*
- * The in-kernel MMIO emulation code wants to use a copy of run->mmio,
- * which is an anonymous type. Use our own type instead.
- */
-struct kvm_exit_mmio {
- phys_addr_t phys_addr;
- u8 data[8];
- u32 len;
- bool is_write;
- void *private;
-};
-
-static inline void kvm_prepare_mmio(struct kvm_run *run,
- struct kvm_exit_mmio *mmio)
-{
- run->mmio.phys_addr = mmio->phys_addr;
- run->mmio.len = mmio->len;
- run->mmio.is_write = mmio->is_write;
- memcpy(run->mmio.data, mmio->data, mmio->len);
- run->exit_reason = KVM_EXIT_MMIO;
-}
-
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
phys_addr_t fault_ipa);
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 3ef77a4..c154c0b7 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -191,6 +191,9 @@ struct kvm_arch_memory_slot {
/* Highest supported SPI, from VGIC_NR_IRQS */
#define KVM_ARM_IRQ_GIC_MAX 127
+/* One single KVM irqchip, ie. the VGIC */
+#define KVM_NR_IRQCHIPS 1
+
/* PSCI interface */
#define KVM_PSCI_FN_BASE 0x95c1ba5e
#define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n))
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index f5590c8..5105e29 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -18,6 +18,7 @@ if VIRTUALIZATION
config KVM
bool "Kernel-based Virtual Machine (KVM) support"
+ depends on OF
select MMU_NOTIFIER
select PREEMPT_NOTIFIERS
select ANON_INODES
@@ -25,10 +26,10 @@ config KVM
select HAVE_KVM_ARCH_TLB_FLUSH_ALL
select KVM_MMIO
select KVM_ARM_HOST
- select KVM_ARM_VGIC
- select KVM_ARM_TIMER
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select SRCU
+ select HAVE_KVM_EVENTFD
+ select HAVE_KVM_IRQFD
---help---
Support hosting virtualized guest machines.
@@ -50,17 +51,4 @@ config KVM_ARM_MAX_VCPUS
large, so only choose a reasonable number that you expect to
actually use.
-config KVM_ARM_VGIC
- bool
- depends on KVM_ARM_HOST && OF
- select HAVE_KVM_IRQCHIP
- ---help---
- Adds support for a hardware assisted, in-kernel GIC emulation.
-
-config KVM_ARM_TIMER
- bool
- depends on KVM_ARM_VGIC
- ---help---
- Adds support for the Architected Timers in virtual machines.
-
endif # VIRTUALIZATION
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 4e6e09e..d5904f8 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -2,7 +2,7 @@
# Makefile for Kernel-based Virtual Machine module
#
-ccflags-y += -Ivirt/kvm -Iarch/arm64/kvm
+ccflags-y += -Iarch/arm64/kvm
CFLAGS_arm.o := -I.
CFLAGS_mmu.o := -I.
@@ -11,7 +11,7 @@ ARM=../../../arch/arm/kvm
obj-$(CONFIG_KVM_ARM_HOST) += kvm.o
-kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/arm.o $(ARM)/mmu.o $(ARM)/mmio.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/psci.o $(ARM)/perf.o
@@ -19,11 +19,11 @@ kvm-$(CONFIG_KVM_ARM_HOST) += emulate.o inject_fault.o regmap.o
kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o
-kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
-kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o
-kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2-emul.o
-kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v2-switch.o
-kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v3.o
-kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v3-emul.o
-kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v3-switch.o
-kvm-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2-emul.o
+kvm-$(CONFIG_KVM_ARM_HOST) += vgic-v2-switch.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3-emul.o
+kvm-$(CONFIG_KVM_ARM_HOST) += vgic-v3-switch.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arch_timer.o
diff --git a/arch/mips/include/asm/asmmacro-32.h b/arch/mips/include/asm/asmmacro-32.h
index cdac7b3..8038647 100644
--- a/arch/mips/include/asm/asmmacro-32.h
+++ b/arch/mips/include/asm/asmmacro-32.h
@@ -16,38 +16,38 @@
.set push
SET_HARDFLOAT
cfc1 \tmp, fcr31
- swc1 $f0, THREAD_FPR0_LS64(\thread)
- swc1 $f1, THREAD_FPR1_LS64(\thread)
- swc1 $f2, THREAD_FPR2_LS64(\thread)
- swc1 $f3, THREAD_FPR3_LS64(\thread)
- swc1 $f4, THREAD_FPR4_LS64(\thread)
- swc1 $f5, THREAD_FPR5_LS64(\thread)
- swc1 $f6, THREAD_FPR6_LS64(\thread)
- swc1 $f7, THREAD_FPR7_LS64(\thread)
- swc1 $f8, THREAD_FPR8_LS64(\thread)
- swc1 $f9, THREAD_FPR9_LS64(\thread)
- swc1 $f10, THREAD_FPR10_LS64(\thread)
- swc1 $f11, THREAD_FPR11_LS64(\thread)
- swc1 $f12, THREAD_FPR12_LS64(\thread)
- swc1 $f13, THREAD_FPR13_LS64(\thread)
- swc1 $f14, THREAD_FPR14_LS64(\thread)
- swc1 $f15, THREAD_FPR15_LS64(\thread)
- swc1 $f16, THREAD_FPR16_LS64(\thread)
- swc1 $f17, THREAD_FPR17_LS64(\thread)
- swc1 $f18, THREAD_FPR18_LS64(\thread)
- swc1 $f19, THREAD_FPR19_LS64(\thread)
- swc1 $f20, THREAD_FPR20_LS64(\thread)
- swc1 $f21, THREAD_FPR21_LS64(\thread)
- swc1 $f22, THREAD_FPR22_LS64(\thread)
- swc1 $f23, THREAD_FPR23_LS64(\thread)
- swc1 $f24, THREAD_FPR24_LS64(\thread)
- swc1 $f25, THREAD_FPR25_LS64(\thread)
- swc1 $f26, THREAD_FPR26_LS64(\thread)
- swc1 $f27, THREAD_FPR27_LS64(\thread)
- swc1 $f28, THREAD_FPR28_LS64(\thread)
- swc1 $f29, THREAD_FPR29_LS64(\thread)
- swc1 $f30, THREAD_FPR30_LS64(\thread)
- swc1 $f31, THREAD_FPR31_LS64(\thread)
+ swc1 $f0, THREAD_FPR0(\thread)
+ swc1 $f1, THREAD_FPR1(\thread)
+ swc1 $f2, THREAD_FPR2(\thread)
+ swc1 $f3, THREAD_FPR3(\thread)
+ swc1 $f4, THREAD_FPR4(\thread)
+ swc1 $f5, THREAD_FPR5(\thread)
+ swc1 $f6, THREAD_FPR6(\thread)
+ swc1 $f7, THREAD_FPR7(\thread)
+ swc1 $f8, THREAD_FPR8(\thread)
+ swc1 $f9, THREAD_FPR9(\thread)
+ swc1 $f10, THREAD_FPR10(\thread)
+ swc1 $f11, THREAD_FPR11(\thread)
+ swc1 $f12, THREAD_FPR12(\thread)
+ swc1 $f13, THREAD_FPR13(\thread)
+ swc1 $f14, THREAD_FPR14(\thread)
+ swc1 $f15, THREAD_FPR15(\thread)
+ swc1 $f16, THREAD_FPR16(\thread)
+ swc1 $f17, THREAD_FPR17(\thread)
+ swc1 $f18, THREAD_FPR18(\thread)
+ swc1 $f19, THREAD_FPR19(\thread)
+ swc1 $f20, THREAD_FPR20(\thread)
+ swc1 $f21, THREAD_FPR21(\thread)
+ swc1 $f22, THREAD_FPR22(\thread)
+ swc1 $f23, THREAD_FPR23(\thread)
+ swc1 $f24, THREAD_FPR24(\thread)
+ swc1 $f25, THREAD_FPR25(\thread)
+ swc1 $f26, THREAD_FPR26(\thread)
+ swc1 $f27, THREAD_FPR27(\thread)
+ swc1 $f28, THREAD_FPR28(\thread)
+ swc1 $f29, THREAD_FPR29(\thread)
+ swc1 $f30, THREAD_FPR30(\thread)
+ swc1 $f31, THREAD_FPR31(\thread)
sw \tmp, THREAD_FCR31(\thread)
.set pop
.endm
@@ -56,38 +56,38 @@
.set push
SET_HARDFLOAT
lw \tmp, THREAD_FCR31(\thread)
- lwc1 $f0, THREAD_FPR0_LS64(\thread)
- lwc1 $f1, THREAD_FPR1_LS64(\thread)
- lwc1 $f2, THREAD_FPR2_LS64(\thread)
- lwc1 $f3, THREAD_FPR3_LS64(\thread)
- lwc1 $f4, THREAD_FPR4_LS64(\thread)
- lwc1 $f5, THREAD_FPR5_LS64(\thread)
- lwc1 $f6, THREAD_FPR6_LS64(\thread)
- lwc1 $f7, THREAD_FPR7_LS64(\thread)
- lwc1 $f8, THREAD_FPR8_LS64(\thread)
- lwc1 $f9, THREAD_FPR9_LS64(\thread)
- lwc1 $f10, THREAD_FPR10_LS64(\thread)
- lwc1 $f11, THREAD_FPR11_LS64(\thread)
- lwc1 $f12, THREAD_FPR12_LS64(\thread)
- lwc1 $f13, THREAD_FPR13_LS64(\thread)
- lwc1 $f14, THREAD_FPR14_LS64(\thread)
- lwc1 $f15, THREAD_FPR15_LS64(\thread)
- lwc1 $f16, THREAD_FPR16_LS64(\thread)
- lwc1 $f17, THREAD_FPR17_LS64(\thread)
- lwc1 $f18, THREAD_FPR18_LS64(\thread)
- lwc1 $f19, THREAD_FPR19_LS64(\thread)
- lwc1 $f20, THREAD_FPR20_LS64(\thread)
- lwc1 $f21, THREAD_FPR21_LS64(\thread)
- lwc1 $f22, THREAD_FPR22_LS64(\thread)
- lwc1 $f23, THREAD_FPR23_LS64(\thread)
- lwc1 $f24, THREAD_FPR24_LS64(\thread)
- lwc1 $f25, THREAD_FPR25_LS64(\thread)
- lwc1 $f26, THREAD_FPR26_LS64(\thread)
- lwc1 $f27, THREAD_FPR27_LS64(\thread)
- lwc1 $f28, THREAD_FPR28_LS64(\thread)
- lwc1 $f29, THREAD_FPR29_LS64(\thread)
- lwc1 $f30, THREAD_FPR30_LS64(\thread)
- lwc1 $f31, THREAD_FPR31_LS64(\thread)
+ lwc1 $f0, THREAD_FPR0(\thread)
+ lwc1 $f1, THREAD_FPR1(\thread)
+ lwc1 $f2, THREAD_FPR2(\thread)
+ lwc1 $f3, THREAD_FPR3(\thread)
+ lwc1 $f4, THREAD_FPR4(\thread)
+ lwc1 $f5, THREAD_FPR5(\thread)
+ lwc1 $f6, THREAD_FPR6(\thread)
+ lwc1 $f7, THREAD_FPR7(\thread)
+ lwc1 $f8, THREAD_FPR8(\thread)
+ lwc1 $f9, THREAD_FPR9(\thread)
+ lwc1 $f10, THREAD_FPR10(\thread)
+ lwc1 $f11, THREAD_FPR11(\thread)
+ lwc1 $f12, THREAD_FPR12(\thread)
+ lwc1 $f13, THREAD_FPR13(\thread)
+ lwc1 $f14, THREAD_FPR14(\thread)
+ lwc1 $f15, THREAD_FPR15(\thread)
+ lwc1 $f16, THREAD_FPR16(\thread)
+ lwc1 $f17, THREAD_FPR17(\thread)
+ lwc1 $f18, THREAD_FPR18(\thread)
+ lwc1 $f19, THREAD_FPR19(\thread)
+ lwc1 $f20, THREAD_FPR20(\thread)
+ lwc1 $f21, THREAD_FPR21(\thread)
+ lwc1 $f22, THREAD_FPR22(\thread)
+ lwc1 $f23, THREAD_FPR23(\thread)
+ lwc1 $f24, THREAD_FPR24(\thread)
+ lwc1 $f25, THREAD_FPR25(\thread)
+ lwc1 $f26, THREAD_FPR26(\thread)
+ lwc1 $f27, THREAD_FPR27(\thread)
+ lwc1 $f28, THREAD_FPR28(\thread)
+ lwc1 $f29, THREAD_FPR29(\thread)
+ lwc1 $f30, THREAD_FPR30(\thread)
+ lwc1 $f31, THREAD_FPR31(\thread)
ctc1 \tmp, fcr31
.set pop
.endm
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
index 0cae459..6156ac8 100644
--- a/arch/mips/include/asm/asmmacro.h
+++ b/arch/mips/include/asm/asmmacro.h
@@ -60,22 +60,22 @@
.set push
SET_HARDFLOAT
cfc1 \tmp, fcr31
- sdc1 $f0, THREAD_FPR0_LS64(\thread)
- sdc1 $f2, THREAD_FPR2_LS64(\thread)
- sdc1 $f4, THREAD_FPR4_LS64(\thread)
- sdc1 $f6, THREAD_FPR6_LS64(\thread)
- sdc1 $f8, THREAD_FPR8_LS64(\thread)
- sdc1 $f10, THREAD_FPR10_LS64(\thread)
- sdc1 $f12, THREAD_FPR12_LS64(\thread)
- sdc1 $f14, THREAD_FPR14_LS64(\thread)
- sdc1 $f16, THREAD_FPR16_LS64(\thread)
- sdc1 $f18, THREAD_FPR18_LS64(\thread)
- sdc1 $f20, THREAD_FPR20_LS64(\thread)
- sdc1 $f22, THREAD_FPR22_LS64(\thread)
- sdc1 $f24, THREAD_FPR24_LS64(\thread)
- sdc1 $f26, THREAD_FPR26_LS64(\thread)
- sdc1 $f28, THREAD_FPR28_LS64(\thread)
- sdc1 $f30, THREAD_FPR30_LS64(\thread)
+ sdc1 $f0, THREAD_FPR0(\thread)
+ sdc1 $f2, THREAD_FPR2(\thread)
+ sdc1 $f4, THREAD_FPR4(\thread)
+ sdc1 $f6, THREAD_FPR6(\thread)
+ sdc1 $f8, THREAD_FPR8(\thread)
+ sdc1 $f10, THREAD_FPR10(\thread)
+ sdc1 $f12, THREAD_FPR12(\thread)
+ sdc1 $f14, THREAD_FPR14(\thread)
+ sdc1 $f16, THREAD_FPR16(\thread)
+ sdc1 $f18, THREAD_FPR18(\thread)
+ sdc1 $f20, THREAD_FPR20(\thread)
+ sdc1 $f22, THREAD_FPR22(\thread)
+ sdc1 $f24, THREAD_FPR24(\thread)
+ sdc1 $f26, THREAD_FPR26(\thread)
+ sdc1 $f28, THREAD_FPR28(\thread)
+ sdc1 $f30, THREAD_FPR30(\thread)
sw \tmp, THREAD_FCR31(\thread)
.set pop
.endm
@@ -84,22 +84,22 @@
.set push
.set mips64r2
SET_HARDFLOAT
- sdc1 $f1, THREAD_FPR1_LS64(\thread)
- sdc1 $f3, THREAD_FPR3_LS64(\thread)
- sdc1 $f5, THREAD_FPR5_LS64(\thread)
- sdc1 $f7, THREAD_FPR7_LS64(\thread)
- sdc1 $f9, THREAD_FPR9_LS64(\thread)
- sdc1 $f11, THREAD_FPR11_LS64(\thread)
- sdc1 $f13, THREAD_FPR13_LS64(\thread)
- sdc1 $f15, THREAD_FPR15_LS64(\thread)
- sdc1 $f17, THREAD_FPR17_LS64(\thread)
- sdc1 $f19, THREAD_FPR19_LS64(\thread)
- sdc1 $f21, THREAD_FPR21_LS64(\thread)
- sdc1 $f23, THREAD_FPR23_LS64(\thread)
- sdc1 $f25, THREAD_FPR25_LS64(\thread)
- sdc1 $f27, THREAD_FPR27_LS64(\thread)
- sdc1 $f29, THREAD_FPR29_LS64(\thread)
- sdc1 $f31, THREAD_FPR31_LS64(\thread)
+ sdc1 $f1, THREAD_FPR1(\thread)
+ sdc1 $f3, THREAD_FPR3(\thread)
+ sdc1 $f5, THREAD_FPR5(\thread)
+ sdc1 $f7, THREAD_FPR7(\thread)
+ sdc1 $f9, THREAD_FPR9(\thread)
+ sdc1 $f11, THREAD_FPR11(\thread)
+ sdc1 $f13, THREAD_FPR13(\thread)
+ sdc1 $f15, THREAD_FPR15(\thread)
+ sdc1 $f17, THREAD_FPR17(\thread)
+ sdc1 $f19, THREAD_FPR19(\thread)
+ sdc1 $f21, THREAD_FPR21(\thread)
+ sdc1 $f23, THREAD_FPR23(\thread)
+ sdc1 $f25, THREAD_FPR25(\thread)
+ sdc1 $f27, THREAD_FPR27(\thread)
+ sdc1 $f29, THREAD_FPR29(\thread)
+ sdc1 $f31, THREAD_FPR31(\thread)
.set pop
.endm
@@ -118,22 +118,22 @@
.set push
SET_HARDFLOAT
lw \tmp, THREAD_FCR31(\thread)
- ldc1 $f0, THREAD_FPR0_LS64(\thread)
- ldc1 $f2, THREAD_FPR2_LS64(\thread)
- ldc1 $f4, THREAD_FPR4_LS64(\thread)
- ldc1 $f6, THREAD_FPR6_LS64(\thread)
- ldc1 $f8, THREAD_FPR8_LS64(\thread)
- ldc1 $f10, THREAD_FPR10_LS64(\thread)
- ldc1 $f12, THREAD_FPR12_LS64(\thread)
- ldc1 $f14, THREAD_FPR14_LS64(\thread)
- ldc1 $f16, THREAD_FPR16_LS64(\thread)
- ldc1 $f18, THREAD_FPR18_LS64(\thread)
- ldc1 $f20, THREAD_FPR20_LS64(\thread)
- ldc1 $f22, THREAD_FPR22_LS64(\thread)
- ldc1 $f24, THREAD_FPR24_LS64(\thread)
- ldc1 $f26, THREAD_FPR26_LS64(\thread)
- ldc1 $f28, THREAD_FPR28_LS64(\thread)
- ldc1 $f30, THREAD_FPR30_LS64(\thread)
+ ldc1 $f0, THREAD_FPR0(\thread)
+ ldc1 $f2, THREAD_FPR2(\thread)
+ ldc1 $f4, THREAD_FPR4(\thread)
+ ldc1 $f6, THREAD_FPR6(\thread)
+ ldc1 $f8, THREAD_FPR8(\thread)
+ ldc1 $f10, THREAD_FPR10(\thread)
+ ldc1 $f12, THREAD_FPR12(\thread)
+ ldc1 $f14, THREAD_FPR14(\thread)
+ ldc1 $f16, THREAD_FPR16(\thread)
+ ldc1 $f18, THREAD_FPR18(\thread)
+ ldc1 $f20, THREAD_FPR20(\thread)
+ ldc1 $f22, THREAD_FPR22(\thread)
+ ldc1 $f24, THREAD_FPR24(\thread)
+ ldc1 $f26, THREAD_FPR26(\thread)
+ ldc1 $f28, THREAD_FPR28(\thread)
+ ldc1 $f30, THREAD_FPR30(\thread)
ctc1 \tmp, fcr31
.endm
@@ -141,22 +141,22 @@
.set push
.set mips64r2
SET_HARDFLOAT
- ldc1 $f1, THREAD_FPR1_LS64(\thread)
- ldc1 $f3, THREAD_FPR3_LS64(\thread)
- ldc1 $f5, THREAD_FPR5_LS64(\thread)
- ldc1 $f7, THREAD_FPR7_LS64(\thread)
- ldc1 $f9, THREAD_FPR9_LS64(\thread)
- ldc1 $f11, THREAD_FPR11_LS64(\thread)
- ldc1 $f13, THREAD_FPR13_LS64(\thread)
- ldc1 $f15, THREAD_FPR15_LS64(\thread)
- ldc1 $f17, THREAD_FPR17_LS64(\thread)
- ldc1 $f19, THREAD_FPR19_LS64(\thread)
- ldc1 $f21, THREAD_FPR21_LS64(\thread)
- ldc1 $f23, THREAD_FPR23_LS64(\thread)
- ldc1 $f25, THREAD_FPR25_LS64(\thread)
- ldc1 $f27, THREAD_FPR27_LS64(\thread)
- ldc1 $f29, THREAD_FPR29_LS64(\thread)
- ldc1 $f31, THREAD_FPR31_LS64(\thread)
+ ldc1 $f1, THREAD_FPR1(\thread)
+ ldc1 $f3, THREAD_FPR3(\thread)
+ ldc1 $f5, THREAD_FPR5(\thread)
+ ldc1 $f7, THREAD_FPR7(\thread)
+ ldc1 $f9, THREAD_FPR9(\thread)
+ ldc1 $f11, THREAD_FPR11(\thread)
+ ldc1 $f13, THREAD_FPR13(\thread)
+ ldc1 $f15, THREAD_FPR15(\thread)
+ ldc1 $f17, THREAD_FPR17(\thread)
+ ldc1 $f19, THREAD_FPR19(\thread)
+ ldc1 $f21, THREAD_FPR21(\thread)
+ ldc1 $f23, THREAD_FPR23(\thread)
+ ldc1 $f25, THREAD_FPR25(\thread)
+ ldc1 $f27, THREAD_FPR27(\thread)
+ ldc1 $f29, THREAD_FPR29(\thread)
+ ldc1 $f31, THREAD_FPR31(\thread)
.set pop
.endm
@@ -211,6 +211,22 @@
.endm
#ifdef TOOLCHAIN_SUPPORTS_MSA
+ .macro _cfcmsa rd, cs
+ .set push
+ .set mips32r2
+ .set msa
+ cfcmsa \rd, $\cs
+ .set pop
+ .endm
+
+ .macro _ctcmsa cd, rs
+ .set push
+ .set mips32r2
+ .set msa
+ ctcmsa $\cd, \rs
+ .set pop
+ .endm
+
.macro ld_d wd, off, base
.set push
.set mips32r2
@@ -227,35 +243,35 @@
.set pop
.endm
- .macro copy_u_w rd, ws, n
+ .macro copy_u_w ws, n
.set push
.set mips32r2
.set msa
- copy_u.w \rd, $w\ws[\n]
+ copy_u.w $1, $w\ws[\n]
.set pop
.endm
- .macro copy_u_d rd, ws, n
+ .macro copy_u_d ws, n
.set push
.set mips64r2
.set msa
- copy_u.d \rd, $w\ws[\n]
+ copy_u.d $1, $w\ws[\n]
.set pop
.endm
- .macro insert_w wd, n, rs
+ .macro insert_w wd, n
.set push
.set mips32r2
.set msa
- insert.w $w\wd[\n], \rs
+ insert.w $w\wd[\n], $1
.set pop
.endm
- .macro insert_d wd, n, rs
+ .macro insert_d wd, n
.set push
.set mips64r2
.set msa
- insert.d $w\wd[\n], \rs
+ insert.d $w\wd[\n], $1
.set pop
.endm
#else
@@ -283,7 +299,7 @@
/*
* Temporary until all toolchains in use include MSA support.
*/
- .macro cfcmsa rd, cs
+ .macro _cfcmsa rd, cs
.set push
.set noat
SET_HARDFLOAT
@@ -293,7 +309,7 @@
.set pop
.endm
- .macro ctcmsa cd, rs
+ .macro _ctcmsa cd, rs
.set push
.set noat
SET_HARDFLOAT
@@ -320,44 +336,36 @@
.set pop
.endm
- .macro copy_u_w rd, ws, n
+ .macro copy_u_w ws, n
.set push
.set noat
SET_HARDFLOAT
.insn
.word COPY_UW_MSA_INSN | (\n << 16) | (\ws << 11)
- /* move triggers an assembler bug... */
- or \rd, $1, zero
.set pop
.endm
- .macro copy_u_d rd, ws, n
+ .macro copy_u_d ws, n
.set push
.set noat
SET_HARDFLOAT
.insn
.word COPY_UD_MSA_INSN | (\n << 16) | (\ws << 11)
- /* move triggers an assembler bug... */
- or \rd, $1, zero
.set pop
.endm
- .macro insert_w wd, n, rs
+ .macro insert_w wd, n
.set push
.set noat
SET_HARDFLOAT
- /* move triggers an assembler bug... */
- or $1, \rs, zero
.word INSERT_W_MSA_INSN | (\n << 16) | (\wd << 6)
.set pop
.endm
- .macro insert_d wd, n, rs
+ .macro insert_d wd, n
.set push
.set noat
SET_HARDFLOAT
- /* move triggers an assembler bug... */
- or $1, \rs, zero
.word INSERT_D_MSA_INSN | (\n << 16) | (\wd << 6)
.set pop
.endm
@@ -399,7 +407,7 @@
.set push
.set noat
SET_HARDFLOAT
- cfcmsa $1, MSA_CSR
+ _cfcmsa $1, MSA_CSR
sw $1, THREAD_MSA_CSR(\thread)
.set pop
.endm
@@ -409,7 +417,7 @@
.set noat
SET_HARDFLOAT
lw $1, THREAD_MSA_CSR(\thread)
- ctcmsa MSA_CSR, $1
+ _ctcmsa MSA_CSR, $1
.set pop
ld_d 0, THREAD_FPR0, \thread
ld_d 1, THREAD_FPR1, \thread
@@ -452,9 +460,6 @@
insert_w \wd, 2
insert_w \wd, 3
#endif
- .if 31-\wd
- msa_init_upper (\wd+1)
- .endif
.endm
.macro msa_init_all_upper
@@ -463,6 +468,37 @@
SET_HARDFLOAT
not $1, zero
msa_init_upper 0
+ msa_init_upper 1
+ msa_init_upper 2
+ msa_init_upper 3
+ msa_init_upper 4
+ msa_init_upper 5
+ msa_init_upper 6
+ msa_init_upper 7
+ msa_init_upper 8
+ msa_init_upper 9
+ msa_init_upper 10
+ msa_init_upper 11
+ msa_init_upper 12
+ msa_init_upper 13
+ msa_init_upper 14
+ msa_init_upper 15
+ msa_init_upper 16
+ msa_init_upper 17
+ msa_init_upper 18
+ msa_init_upper 19
+ msa_init_upper 20
+ msa_init_upper 21
+ msa_init_upper 22
+ msa_init_upper 23
+ msa_init_upper 24
+ msa_init_upper 25
+ msa_init_upper 26
+ msa_init_upper 27
+ msa_init_upper 28
+ msa_init_upper 29
+ msa_init_upper 30
+ msa_init_upper 31
.set pop
.endm
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index dd083e9..b104ad9 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -48,6 +48,12 @@ enum fpu_mode {
#define FPU_FR_MASK 0x1
};
+#define __disable_fpu() \
+do { \
+ clear_c0_status(ST0_CU1); \
+ disable_fpu_hazard(); \
+} while (0)
+
static inline int __enable_fpu(enum fpu_mode mode)
{
int fr;
@@ -86,7 +92,12 @@ fr_common:
enable_fpu_hazard();
/* check FR has the desired value */
- return (!!(read_c0_status() & ST0_FR) == !!fr) ? 0 : SIGFPE;
+ if (!!(read_c0_status() & ST0_FR) == !!fr)
+ return 0;
+
+ /* unsupported FR value */
+ __disable_fpu();
+ return SIGFPE;
default:
BUG();
@@ -95,12 +106,6 @@ fr_common:
return SIGFPE;
}
-#define __disable_fpu() \
-do { \
- clear_c0_status(ST0_CU1); \
- disable_fpu_hazard(); \
-} while (0)
-
#define clear_fpu_owner() clear_thread_flag(TIF_USEDFPU)
static inline int __is_fpu_owner(void)
@@ -170,6 +175,7 @@ static inline void lose_fpu(int save)
}
disable_msa();
clear_thread_flag(TIF_USEDMSA);
+ __disable_fpu();
} else if (is_fpu_owner()) {
if (save)
_save_fp(current);
diff --git a/arch/mips/include/asm/kdebug.h b/arch/mips/include/asm/kdebug.h
index 6a9af5f..cba22ab 100644
--- a/arch/mips/include/asm/kdebug.h
+++ b/arch/mips/include/asm/kdebug.h
@@ -10,7 +10,8 @@ enum die_val {
DIE_RI,
DIE_PAGE_FAULT,
DIE_BREAK,
- DIE_SSTEPBP
+ DIE_SSTEPBP,
+ DIE_MSAFP
};
#endif /* _ASM_MIPS_KDEBUG_H */
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index ac4fc71..4c25823 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -21,10 +21,10 @@
/* MIPS KVM register ids */
#define MIPS_CP0_32(_R, _S) \
- (KVM_REG_MIPS | KVM_REG_SIZE_U32 | 0x10000 | (8 * (_R) + (_S)))
+ (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
#define MIPS_CP0_64(_R, _S) \
- (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0x10000 | (8 * (_R) + (_S)))
+ (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S)))
#define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0)
#define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0)
@@ -42,11 +42,14 @@
#define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0)
#define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0)
#define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0)
+#define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0)
#define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1)
#define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0)
#define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1)
#define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2)
#define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
+#define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4)
+#define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5)
#define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7)
#define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
#define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
@@ -119,6 +122,10 @@ struct kvm_vcpu_stat {
u32 syscall_exits;
u32 resvd_inst_exits;
u32 break_inst_exits;
+ u32 trap_inst_exits;
+ u32 msa_fpe_exits;
+ u32 fpe_exits;
+ u32 msa_disabled_exits;
u32 flush_dcache_exits;
u32 halt_successful_poll;
u32 halt_wakeup;
@@ -138,6 +145,10 @@ enum kvm_mips_exit_types {
SYSCALL_EXITS,
RESVD_INST_EXITS,
BREAK_INST_EXITS,
+ TRAP_INST_EXITS,
+ MSA_FPE_EXITS,
+ FPE_EXITS,
+ MSA_DISABLED_EXITS,
FLUSH_DCACHE_EXITS,
MAX_KVM_MIPS_EXIT_TYPES
};
@@ -206,6 +217,8 @@ struct mips_coproc {
#define MIPS_CP0_CONFIG1_SEL 1
#define MIPS_CP0_CONFIG2_SEL 2
#define MIPS_CP0_CONFIG3_SEL 3
+#define MIPS_CP0_CONFIG4_SEL 4
+#define MIPS_CP0_CONFIG5_SEL 5
/* Config0 register bits */
#define CP0C0_M 31
@@ -262,31 +275,6 @@ struct mips_coproc {
#define CP0C3_SM 1
#define CP0C3_TL 0
-/* Have config1, Cacheable, noncoherent, write-back, write allocate*/
-#define MIPS_CONFIG0 \
- ((1 << CP0C0_M) | (0x3 << CP0C0_K0))
-
-/* Have config2, no coprocessor2 attached, no MDMX support attached,
- no performance counters, watch registers present,
- no code compression, EJTAG present, no FPU, no watch registers */
-#define MIPS_CONFIG1 \
-((1 << CP0C1_M) | \
- (0 << CP0C1_C2) | (0 << CP0C1_MD) | (0 << CP0C1_PC) | \
- (0 << CP0C1_WR) | (0 << CP0C1_CA) | (1 << CP0C1_EP) | \
- (0 << CP0C1_FP))
-
-/* Have config3, no tertiary/secondary caches implemented */
-#define MIPS_CONFIG2 \
-((1 << CP0C2_M))
-
-/* No config4, no DSP ASE, no large physaddr (PABITS),
- no external interrupt controller, no vectored interrupts,
- no 1kb pages, no SmartMIPS ASE, no trace logic */
-#define MIPS_CONFIG3 \
-((0 << CP0C3_M) | (0 << CP0C3_DSPP) | (0 << CP0C3_LPA) | \
- (0 << CP0C3_VEIC) | (0 << CP0C3_VInt) | (0 << CP0C3_SP) | \
- (0 << CP0C3_SM) | (0 << CP0C3_TL))
-
/* MMU types, the first four entries have the same layout as the
CP0C0_MT field. */
enum mips_mmu_types {
@@ -321,7 +309,9 @@ enum mips_mmu_types {
*/
#define T_TRAP 13 /* Trap instruction */
#define T_VCEI 14 /* Virtual coherency exception */
+#define T_MSAFPE 14 /* MSA floating point exception */
#define T_FPE 15 /* Floating point exception */
+#define T_MSADIS 21 /* MSA disabled exception */
#define T_WATCH 23 /* Watch address reference */
#define T_VCED 31 /* Virtual coherency data */
@@ -374,6 +364,9 @@ struct kvm_mips_tlb {
long tlb_lo1;
};
+#define KVM_MIPS_FPU_FPU 0x1
+#define KVM_MIPS_FPU_MSA 0x2
+
#define KVM_MIPS_GUEST_TLB_SIZE 64
struct kvm_vcpu_arch {
void *host_ebase, *guest_ebase;
@@ -395,6 +388,8 @@ struct kvm_vcpu_arch {
/* FPU State */
struct mips_fpu_struct fpu;
+ /* Which FPU state is loaded (KVM_MIPS_FPU_*) */
+ unsigned int fpu_inuse;
/* COP0 State */
struct mips_coproc *cop0;
@@ -441,6 +436,9 @@ struct kvm_vcpu_arch {
/* WAIT executed */
int wait;
+
+ u8 fpu_enabled;
+ u8 msa_enabled;
};
@@ -482,11 +480,15 @@ struct kvm_vcpu_arch {
#define kvm_read_c0_guest_config1(cop0) (cop0->reg[MIPS_CP0_CONFIG][1])
#define kvm_read_c0_guest_config2(cop0) (cop0->reg[MIPS_CP0_CONFIG][2])
#define kvm_read_c0_guest_config3(cop0) (cop0->reg[MIPS_CP0_CONFIG][3])
+#define kvm_read_c0_guest_config4(cop0) (cop0->reg[MIPS_CP0_CONFIG][4])
+#define kvm_read_c0_guest_config5(cop0) (cop0->reg[MIPS_CP0_CONFIG][5])
#define kvm_read_c0_guest_config7(cop0) (cop0->reg[MIPS_CP0_CONFIG][7])
#define kvm_write_c0_guest_config(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][0] = (val))
#define kvm_write_c0_guest_config1(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][1] = (val))
#define kvm_write_c0_guest_config2(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][2] = (val))
#define kvm_write_c0_guest_config3(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][3] = (val))
+#define kvm_write_c0_guest_config4(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][4] = (val))
+#define kvm_write_c0_guest_config5(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][5] = (val))
#define kvm_write_c0_guest_config7(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][7] = (val))
#define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0])
#define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val))
@@ -567,6 +569,31 @@ static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \
}
+/* Helpers */
+
+static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu)
+{
+ return (!__builtin_constant_p(cpu_has_fpu) || cpu_has_fpu) &&
+ vcpu->fpu_enabled;
+}
+
+static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu)
+{
+ return kvm_mips_guest_can_have_fpu(vcpu) &&
+ kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP;
+}
+
+static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu)
+{
+ return (!__builtin_constant_p(cpu_has_msa) || cpu_has_msa) &&
+ vcpu->msa_enabled;
+}
+
+static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu)
+{
+ return kvm_mips_guest_can_have_msa(vcpu) &&
+ kvm_read_c0_guest_config3(vcpu->cop0) & MIPS_CONF3_MSA;
+}
struct kvm_mips_callbacks {
int (*handle_cop_unusable)(struct kvm_vcpu *vcpu);
@@ -578,6 +605,10 @@ struct kvm_mips_callbacks {
int (*handle_syscall)(struct kvm_vcpu *vcpu);
int (*handle_res_inst)(struct kvm_vcpu *vcpu);
int (*handle_break)(struct kvm_vcpu *vcpu);
+ int (*handle_trap)(struct kvm_vcpu *vcpu);
+ int (*handle_msa_fpe)(struct kvm_vcpu *vcpu);
+ int (*handle_fpe)(struct kvm_vcpu *vcpu);
+ int (*handle_msa_disabled)(struct kvm_vcpu *vcpu);
int (*vm_init)(struct kvm *kvm);
int (*vcpu_init)(struct kvm_vcpu *vcpu);
int (*vcpu_setup)(struct kvm_vcpu *vcpu);
@@ -596,6 +627,8 @@ struct kvm_mips_callbacks {
const struct kvm_one_reg *reg, s64 *v);
int (*set_one_reg)(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg, s64 v);
+ int (*vcpu_get_regs)(struct kvm_vcpu *vcpu);
+ int (*vcpu_set_regs)(struct kvm_vcpu *vcpu);
};
extern struct kvm_mips_callbacks *kvm_mips_callbacks;
int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
@@ -606,6 +639,19 @@ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
/* Trampoline ASM routine to start running in "Guest" context */
extern int __kvm_mips_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
+/* FPU/MSA context management */
+void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu);
+void __kvm_restore_fpu(struct kvm_vcpu_arch *vcpu);
+void __kvm_restore_fcsr(struct kvm_vcpu_arch *vcpu);
+void __kvm_save_msa(struct kvm_vcpu_arch *vcpu);
+void __kvm_restore_msa(struct kvm_vcpu_arch *vcpu);
+void __kvm_restore_msa_upper(struct kvm_vcpu_arch *vcpu);
+void __kvm_restore_msacsr(struct kvm_vcpu_arch *vcpu);
+void kvm_own_fpu(struct kvm_vcpu *vcpu);
+void kvm_own_msa(struct kvm_vcpu *vcpu);
+void kvm_drop_fpu(struct kvm_vcpu *vcpu);
+void kvm_lose_fpu(struct kvm_vcpu *vcpu);
+
/* TLB handling */
uint32_t kvm_get_kernel_asid(struct kvm_vcpu *vcpu);
@@ -711,6 +757,26 @@ extern enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
+extern enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause,
+ uint32_t *opc,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause,
+ uint32_t *opc,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause,
+ uint32_t *opc,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause,
+ uint32_t *opc,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu);
+
extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
struct kvm_run *run);
@@ -749,6 +815,11 @@ enum emulation_result kvm_mips_emulate_load(uint32_t inst,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
+unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu);
+unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu);
+unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu);
+unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu);
+
/* Dynamic binary translation */
extern int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
struct kvm_vcpu *vcpu);
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index b5dcbee..9b3b48e 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -105,7 +105,7 @@ union fpureg {
#ifdef CONFIG_CPU_LITTLE_ENDIAN
# define FPR_IDX(width, idx) (idx)
#else
-# define FPR_IDX(width, idx) ((FPU_REG_WIDTH / (width)) - 1 - (idx))
+# define FPR_IDX(width, idx) ((idx) ^ ((64 / (width)) - 1))
#endif
#define BUILD_FPR_ACCESS(width) \
diff --git a/arch/mips/include/uapi/asm/kvm.h b/arch/mips/include/uapi/asm/kvm.h
index 2c04b6d..6985eb5 100644
--- a/arch/mips/include/uapi/asm/kvm.h
+++ b/arch/mips/include/uapi/asm/kvm.h
@@ -36,77 +36,85 @@ struct kvm_regs {
/*
* for KVM_GET_FPU and KVM_SET_FPU
- *
- * If Status[FR] is zero (32-bit FPU), the upper 32-bits of the FPRs
- * are zero filled.
*/
struct kvm_fpu {
- __u64 fpr[32];
- __u32 fir;
- __u32 fccr;
- __u32 fexr;
- __u32 fenr;
- __u32 fcsr;
- __u32 pad;
};
/*
- * For MIPS, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access CP0
+ * For MIPS, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access various
* registers. The id field is broken down as follows:
*
- * bits[2..0] - Register 'sel' index.
- * bits[7..3] - Register 'rd' index.
- * bits[15..8] - Must be zero.
- * bits[31..16] - 1 -> CP0 registers.
- * bits[51..32] - Must be zero.
* bits[63..52] - As per linux/kvm.h
+ * bits[51..32] - Must be zero.
+ * bits[31..16] - Register set.
+ *
+ * Register set = 0: GP registers from kvm_regs (see definitions below).
+ *
+ * Register set = 1: CP0 registers.
+ * bits[15..8] - Must be zero.
+ * bits[7..3] - Register 'rd' index.
+ * bits[2..0] - Register 'sel' index.
+ *
+ * Register set = 2: KVM specific registers (see definitions below).
+ *
+ * Register set = 3: FPU / MSA registers (see definitions below).
*
* Other sets registers may be added in the future. Each set would
* have its own identifier in bits[31..16].
- *
- * The registers defined in struct kvm_regs are also accessible, the
- * id values for these are below.
*/
-#define KVM_REG_MIPS_R0 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0)
-#define KVM_REG_MIPS_R1 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 1)
-#define KVM_REG_MIPS_R2 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 2)
-#define KVM_REG_MIPS_R3 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 3)
-#define KVM_REG_MIPS_R4 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 4)
-#define KVM_REG_MIPS_R5 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 5)
-#define KVM_REG_MIPS_R6 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 6)
-#define KVM_REG_MIPS_R7 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 7)
-#define KVM_REG_MIPS_R8 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 8)
-#define KVM_REG_MIPS_R9 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 9)
-#define KVM_REG_MIPS_R10 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 10)
-#define KVM_REG_MIPS_R11 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 11)
-#define KVM_REG_MIPS_R12 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 12)
-#define KVM_REG_MIPS_R13 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 13)
-#define KVM_REG_MIPS_R14 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 14)
-#define KVM_REG_MIPS_R15 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 15)
-#define KVM_REG_MIPS_R16 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 16)
-#define KVM_REG_MIPS_R17 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 17)
-#define KVM_REG_MIPS_R18 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 18)
-#define KVM_REG_MIPS_R19 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 19)
-#define KVM_REG_MIPS_R20 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 20)
-#define KVM_REG_MIPS_R21 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 21)
-#define KVM_REG_MIPS_R22 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 22)
-#define KVM_REG_MIPS_R23 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 23)
-#define KVM_REG_MIPS_R24 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 24)
-#define KVM_REG_MIPS_R25 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 25)
-#define KVM_REG_MIPS_R26 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 26)
-#define KVM_REG_MIPS_R27 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 27)
-#define KVM_REG_MIPS_R28 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 28)
-#define KVM_REG_MIPS_R29 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 29)
-#define KVM_REG_MIPS_R30 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 30)
-#define KVM_REG_MIPS_R31 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 31)
-
-#define KVM_REG_MIPS_HI (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 32)
-#define KVM_REG_MIPS_LO (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 33)
-#define KVM_REG_MIPS_PC (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 34)
-
-/* KVM specific control registers */
+#define KVM_REG_MIPS_GP (KVM_REG_MIPS | 0x0000000000000000ULL)
+#define KVM_REG_MIPS_CP0 (KVM_REG_MIPS | 0x0000000000010000ULL)
+#define KVM_REG_MIPS_KVM (KVM_REG_MIPS | 0x0000000000020000ULL)
+#define KVM_REG_MIPS_FPU (KVM_REG_MIPS | 0x0000000000030000ULL)
+
+
+/*
+ * KVM_REG_MIPS_GP - General purpose registers from kvm_regs.
+ */
+
+#define KVM_REG_MIPS_R0 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 0)
+#define KVM_REG_MIPS_R1 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 1)
+#define KVM_REG_MIPS_R2 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 2)
+#define KVM_REG_MIPS_R3 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 3)
+#define KVM_REG_MIPS_R4 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 4)
+#define KVM_REG_MIPS_R5 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 5)
+#define KVM_REG_MIPS_R6 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 6)
+#define KVM_REG_MIPS_R7 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 7)
+#define KVM_REG_MIPS_R8 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 8)
+#define KVM_REG_MIPS_R9 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 9)
+#define KVM_REG_MIPS_R10 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 10)
+#define KVM_REG_MIPS_R11 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 11)
+#define KVM_REG_MIPS_R12 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 12)
+#define KVM_REG_MIPS_R13 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 13)
+#define KVM_REG_MIPS_R14 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 14)
+#define KVM_REG_MIPS_R15 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 15)
+#define KVM_REG_MIPS_R16 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 16)
+#define KVM_REG_MIPS_R17 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 17)
+#define KVM_REG_MIPS_R18 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 18)
+#define KVM_REG_MIPS_R19 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 19)
+#define KVM_REG_MIPS_R20 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 20)
+#define KVM_REG_MIPS_R21 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 21)
+#define KVM_REG_MIPS_R22 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 22)
+#define KVM_REG_MIPS_R23 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 23)
+#define KVM_REG_MIPS_R24 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 24)
+#define KVM_REG_MIPS_R25 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 25)
+#define KVM_REG_MIPS_R26 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 26)
+#define KVM_REG_MIPS_R27 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 27)
+#define KVM_REG_MIPS_R28 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 28)
+#define KVM_REG_MIPS_R29 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 29)
+#define KVM_REG_MIPS_R30 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 30)
+#define KVM_REG_MIPS_R31 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 31)
+
+#define KVM_REG_MIPS_HI (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 32)
+#define KVM_REG_MIPS_LO (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 33)
+#define KVM_REG_MIPS_PC (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 34)
+
+
+/*
+ * KVM_REG_MIPS_KVM - KVM specific control registers.
+ */
/*
* CP0_Count control
@@ -118,8 +126,7 @@ struct kvm_fpu {
* safely without losing time or guest timer interrupts.
* Other: Reserved, do not change.
*/
-#define KVM_REG_MIPS_COUNT_CTL (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \
- 0x20000 | 0)
+#define KVM_REG_MIPS_COUNT_CTL (KVM_REG_MIPS_KVM | KVM_REG_SIZE_U64 | 0)
#define KVM_REG_MIPS_COUNT_CTL_DC 0x00000001
/*
@@ -131,15 +138,46 @@ struct kvm_fpu {
* emulated.
* Modifications to times in the future are rejected.
*/
-#define KVM_REG_MIPS_COUNT_RESUME (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \
- 0x20000 | 1)
+#define KVM_REG_MIPS_COUNT_RESUME (KVM_REG_MIPS_KVM | KVM_REG_SIZE_U64 | 1)
/*
* CP0_Count rate in Hz
* Specifies the rate of the CP0_Count timer in Hz. Modifications occur without
* discontinuities in CP0_Count.
*/
-#define KVM_REG_MIPS_COUNT_HZ (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \
- 0x20000 | 2)
+#define KVM_REG_MIPS_COUNT_HZ (KVM_REG_MIPS_KVM | KVM_REG_SIZE_U64 | 2)
+
+
+/*
+ * KVM_REG_MIPS_FPU - Floating Point and MIPS SIMD Architecture (MSA) registers.
+ *
+ * bits[15..8] - Register subset (see definitions below).
+ * bits[7..5] - Must be zero.
+ * bits[4..0] - Register number within register subset.
+ */
+
+#define KVM_REG_MIPS_FPR (KVM_REG_MIPS_FPU | 0x0000000000000000ULL)
+#define KVM_REG_MIPS_FCR (KVM_REG_MIPS_FPU | 0x0000000000000100ULL)
+#define KVM_REG_MIPS_MSACR (KVM_REG_MIPS_FPU | 0x0000000000000200ULL)
+
+/*
+ * KVM_REG_MIPS_FPR - Floating point / Vector registers.
+ */
+#define KVM_REG_MIPS_FPR_32(n) (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U32 | (n))
+#define KVM_REG_MIPS_FPR_64(n) (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U64 | (n))
+#define KVM_REG_MIPS_VEC_128(n) (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U128 | (n))
+
+/*
+ * KVM_REG_MIPS_FCR - Floating point control registers.
+ */
+#define KVM_REG_MIPS_FCR_IR (KVM_REG_MIPS_FCR | KVM_REG_SIZE_U32 | 0)
+#define KVM_REG_MIPS_FCR_CSR (KVM_REG_MIPS_FCR | KVM_REG_SIZE_U32 | 31)
+
+/*
+ * KVM_REG_MIPS_MSACR - MIPS SIMD Architecture (MSA) control registers.
+ */
+#define KVM_REG_MIPS_MSA_IR (KVM_REG_MIPS_MSACR | KVM_REG_SIZE_U32 | 0)
+#define KVM_REG_MIPS_MSA_CSR (KVM_REG_MIPS_MSACR | KVM_REG_SIZE_U32 | 1)
+
/*
* KVM MIPS specific structures and definitions
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 750d67a..e59fd7c 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -167,72 +167,6 @@ void output_thread_fpu_defines(void)
OFFSET(THREAD_FPR30, task_struct, thread.fpu.fpr[30]);
OFFSET(THREAD_FPR31, task_struct, thread.fpu.fpr[31]);
- /* the least significant 64 bits of each FP register */
- OFFSET(THREAD_FPR0_LS64, task_struct,
- thread.fpu.fpr[0].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR1_LS64, task_struct,
- thread.fpu.fpr[1].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR2_LS64, task_struct,
- thread.fpu.fpr[2].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR3_LS64, task_struct,
- thread.fpu.fpr[3].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR4_LS64, task_struct,
- thread.fpu.fpr[4].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR5_LS64, task_struct,
- thread.fpu.fpr[5].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR6_LS64, task_struct,
- thread.fpu.fpr[6].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR7_LS64, task_struct,
- thread.fpu.fpr[7].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR8_LS64, task_struct,
- thread.fpu.fpr[8].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR9_LS64, task_struct,
- thread.fpu.fpr[9].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR10_LS64, task_struct,
- thread.fpu.fpr[10].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR11_LS64, task_struct,
- thread.fpu.fpr[11].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR12_LS64, task_struct,
- thread.fpu.fpr[12].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR13_LS64, task_struct,
- thread.fpu.fpr[13].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR14_LS64, task_struct,
- thread.fpu.fpr[14].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR15_LS64, task_struct,
- thread.fpu.fpr[15].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR16_LS64, task_struct,
- thread.fpu.fpr[16].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR17_LS64, task_struct,
- thread.fpu.fpr[17].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR18_LS64, task_struct,
- thread.fpu.fpr[18].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR19_LS64, task_struct,
- thread.fpu.fpr[19].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR20_LS64, task_struct,
- thread.fpu.fpr[20].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR21_LS64, task_struct,
- thread.fpu.fpr[21].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR22_LS64, task_struct,
- thread.fpu.fpr[22].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR23_LS64, task_struct,
- thread.fpu.fpr[23].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR24_LS64, task_struct,
- thread.fpu.fpr[24].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR25_LS64, task_struct,
- thread.fpu.fpr[25].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR26_LS64, task_struct,
- thread.fpu.fpr[26].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR27_LS64, task_struct,
- thread.fpu.fpr[27].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR28_LS64, task_struct,
- thread.fpu.fpr[28].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR29_LS64, task_struct,
- thread.fpu.fpr[29].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR30_LS64, task_struct,
- thread.fpu.fpr[30].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR31_LS64, task_struct,
- thread.fpu.fpr[31].val64[FPR_IDX(64, 0)]);
-
OFFSET(THREAD_FCR31, task_struct, thread.fpu.fcr31);
OFFSET(THREAD_MSA_CSR, task_struct, thread.fpu.msacsr);
BLANK();
@@ -470,6 +404,45 @@ void output_kvm_defines(void)
OFFSET(VCPU_LO, kvm_vcpu_arch, lo);
OFFSET(VCPU_HI, kvm_vcpu_arch, hi);
OFFSET(VCPU_PC, kvm_vcpu_arch, pc);
+ BLANK();
+
+ OFFSET(VCPU_FPR0, kvm_vcpu_arch, fpu.fpr[0]);
+ OFFSET(VCPU_FPR1, kvm_vcpu_arch, fpu.fpr[1]);
+ OFFSET(VCPU_FPR2, kvm_vcpu_arch, fpu.fpr[2]);
+ OFFSET(VCPU_FPR3, kvm_vcpu_arch, fpu.fpr[3]);
+ OFFSET(VCPU_FPR4, kvm_vcpu_arch, fpu.fpr[4]);
+ OFFSET(VCPU_FPR5, kvm_vcpu_arch, fpu.fpr[5]);
+ OFFSET(VCPU_FPR6, kvm_vcpu_arch, fpu.fpr[6]);
+ OFFSET(VCPU_FPR7, kvm_vcpu_arch, fpu.fpr[7]);
+ OFFSET(VCPU_FPR8, kvm_vcpu_arch, fpu.fpr[8]);
+ OFFSET(VCPU_FPR9, kvm_vcpu_arch, fpu.fpr[9]);
+ OFFSET(VCPU_FPR10, kvm_vcpu_arch, fpu.fpr[10]);
+ OFFSET(VCPU_FPR11, kvm_vcpu_arch, fpu.fpr[11]);
+ OFFSET(VCPU_FPR12, kvm_vcpu_arch, fpu.fpr[12]);
+ OFFSET(VCPU_FPR13, kvm_vcpu_arch, fpu.fpr[13]);
+ OFFSET(VCPU_FPR14, kvm_vcpu_arch, fpu.fpr[14]);
+ OFFSET(VCPU_FPR15, kvm_vcpu_arch, fpu.fpr[15]);
+ OFFSET(VCPU_FPR16, kvm_vcpu_arch, fpu.fpr[16]);
+ OFFSET(VCPU_FPR17, kvm_vcpu_arch, fpu.fpr[17]);
+ OFFSET(VCPU_FPR18, kvm_vcpu_arch, fpu.fpr[18]);
+ OFFSET(VCPU_FPR19, kvm_vcpu_arch, fpu.fpr[19]);
+ OFFSET(VCPU_FPR20, kvm_vcpu_arch, fpu.fpr[20]);
+ OFFSET(VCPU_FPR21, kvm_vcpu_arch, fpu.fpr[21]);
+ OFFSET(VCPU_FPR22, kvm_vcpu_arch, fpu.fpr[22]);
+ OFFSET(VCPU_FPR23, kvm_vcpu_arch, fpu.fpr[23]);
+ OFFSET(VCPU_FPR24, kvm_vcpu_arch, fpu.fpr[24]);
+ OFFSET(VCPU_FPR25, kvm_vcpu_arch, fpu.fpr[25]);
+ OFFSET(VCPU_FPR26, kvm_vcpu_arch, fpu.fpr[26]);
+ OFFSET(VCPU_FPR27, kvm_vcpu_arch, fpu.fpr[27]);
+ OFFSET(VCPU_FPR28, kvm_vcpu_arch, fpu.fpr[28]);
+ OFFSET(VCPU_FPR29, kvm_vcpu_arch, fpu.fpr[29]);
+ OFFSET(VCPU_FPR30, kvm_vcpu_arch, fpu.fpr[30]);
+ OFFSET(VCPU_FPR31, kvm_vcpu_arch, fpu.fpr[31]);
+
+ OFFSET(VCPU_FCR31, kvm_vcpu_arch, fpu.fcr31);
+ OFFSET(VCPU_MSA_CSR, kvm_vcpu_arch, fpu.msacsr);
+ BLANK();
+
OFFSET(VCPU_COP0, kvm_vcpu_arch, cop0);
OFFSET(VCPU_GUEST_KERNEL_ASID, kvm_vcpu_arch, guest_kernel_asid);
OFFSET(VCPU_GUEST_USER_ASID, kvm_vcpu_arch, guest_user_asid);
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 2ebaabe..af42e70 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -360,12 +360,15 @@ NESTED(nmi_handler, PT_SIZE, sp)
.set mips1
SET_HARDFLOAT
cfc1 a1, fcr31
- li a2, ~(0x3f << 12)
- and a2, a1
- ctc1 a2, fcr31
.set pop
- TRACE_IRQS_ON
- STI
+ CLI
+ TRACE_IRQS_OFF
+ .endm
+
+ .macro __build_clear_msa_fpe
+ _cfcmsa a1, MSA_CSR
+ CLI
+ TRACE_IRQS_OFF
.endm
.macro __build_clear_ade
@@ -426,7 +429,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
BUILD_HANDLER cpu cpu sti silent /* #11 */
BUILD_HANDLER ov ov sti silent /* #12 */
BUILD_HANDLER tr tr sti silent /* #13 */
- BUILD_HANDLER msa_fpe msa_fpe sti silent /* #14 */
+ BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent /* #14 */
BUILD_HANDLER fpe fpe fpe silent /* #15 */
BUILD_HANDLER ftlb ftlb none silent /* #16 */
BUILD_HANDLER msa msa sti silent /* #21 */
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 5104528..7da6e32 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -46,6 +46,26 @@
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
+static void init_fp_ctx(struct task_struct *target)
+{
+ /* If FP has been used then the target already has context */
+ if (tsk_used_math(target))
+ return;
+
+ /* Begin with data registers set to all 1s... */
+ memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr));
+
+ /* ...and FCSR zeroed */
+ target->thread.fpu.fcr31 = 0;
+
+ /*
+ * Record that the target has "used" math, such that the context
+ * just initialised, and any modifications made by the caller,
+ * aren't discarded.
+ */
+ set_stopped_child_used_math(target);
+}
+
/*
* Called by kernel/ptrace.c when detaching..
*
@@ -142,6 +162,7 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
if (!access_ok(VERIFY_READ, data, 33 * 8))
return -EIO;
+ init_fp_ctx(child);
fregs = get_fpu_regs(child);
for (i = 0; i < 32; i++) {
@@ -439,6 +460,8 @@ static int fpr_set(struct task_struct *target,
/* XXX fcr31 */
+ init_fp_ctx(target);
+
if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.fpu,
@@ -660,12 +683,7 @@ long arch_ptrace(struct task_struct *child, long request,
case FPR_BASE ... FPR_BASE + 31: {
union fpureg *fregs = get_fpu_regs(child);
- if (!tsk_used_math(child)) {
- /* FP not yet used */
- memset(&child->thread.fpu, ~0,
- sizeof(child->thread.fpu));
- child->thread.fpu.fcr31 = 0;
- }
+ init_fp_ctx(child);
#ifdef CONFIG_32BIT
if (test_thread_flag(TIF_32BIT_FPREGS)) {
/*
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 676c503..1d88af2 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -34,7 +34,6 @@
.endm
.set noreorder
- .set MIPS_ISA_ARCH_LEVEL_RAW
LEAF(_save_fp_context)
.set push
@@ -103,6 +102,7 @@ LEAF(_save_fp_context)
/* Save 32-bit process floating point context */
LEAF(_save_fp_context32)
.set push
+ .set MIPS_ISA_ARCH_LEVEL_RAW
SET_HARDFLOAT
cfc1 t1, fcr31
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 33984c0..5b4d711 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -701,6 +701,13 @@ asmlinkage void do_ov(struct pt_regs *regs)
int process_fpemu_return(int sig, void __user *fault_addr)
{
+ /*
+ * We can't allow the emulated instruction to leave any of the cause
+ * bits set in FCSR. If they were then the kernel would take an FP
+ * exception when restoring FP context.
+ */
+ current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+
if (sig == SIGSEGV || sig == SIGBUS) {
struct siginfo si = {0};
si.si_addr = fault_addr;
@@ -781,6 +788,11 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs),
SIGFPE) == NOTIFY_STOP)
goto out;
+
+ /* Clear FCSR.Cause before enabling interrupts */
+ write_32bit_cp1_register(CP1_STATUS, fcr31 & ~FPU_CSR_ALL_X);
+ local_irq_enable();
+
die_if_kernel("FP exception in kernel code", regs);
if (fcr31 & FPU_CSR_UNI_X) {
@@ -804,18 +816,12 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
&fault_addr);
- /*
- * We can't allow the emulated instruction to leave any of
- * the cause bit set in $fcr31.
- */
- current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+ /* If something went wrong, signal */
+ process_fpemu_return(sig, fault_addr);
/* Restore the hardware register state */
own_fpu(1); /* Using the FPU again. */
- /* If something went wrong, signal */
- process_fpemu_return(sig, fault_addr);
-
goto out;
} else if (fcr31 & FPU_CSR_INV_X)
info.si_code = FPE_FLTINV;
@@ -1392,13 +1398,22 @@ out:
exception_exit(prev_state);
}
-asmlinkage void do_msa_fpe(struct pt_regs *regs)
+asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr)
{
enum ctx_state prev_state;
prev_state = exception_enter();
+ if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
+ regs_to_trapnr(regs), SIGFPE) == NOTIFY_STOP)
+ goto out;
+
+ /* Clear MSACSR.Cause before enabling interrupts */
+ write_msa_csr(msacsr & ~MSA_CSR_CAUSEF);
+ local_irq_enable();
+
die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
force_sig(SIGFPE, current);
+out:
exception_exit(prev_state);
}
diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile
index 401fe02..637ebbe 100644
--- a/arch/mips/kvm/Makefile
+++ b/arch/mips/kvm/Makefile
@@ -1,13 +1,15 @@
# Makefile for KVM support for MIPS
#
-common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
+common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm
-kvm-objs := $(common-objs) mips.o emulate.o locore.o \
+common-objs-$(CONFIG_CPU_HAS_MSA) += msa.o
+
+kvm-objs := $(common-objs-y) mips.o emulate.o locore.o \
interrupt.o stats.o commpage.o \
- dyntrans.o trap_emul.o
+ dyntrans.o trap_emul.o fpu.o
obj-$(CONFIG_KVM) += kvm.o
obj-y += callback.o tlb.o
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index fb3e8df..6230f37 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -884,6 +884,84 @@ enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
return EMULATE_DONE;
}
+/**
+ * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1
+ * @vcpu: Virtual CPU.
+ *
+ * Finds the mask of bits which are writable in the guest's Config1 CP0
+ * register, by userland (currently read-only to the guest).
+ */
+unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu)
+{
+ unsigned int mask = 0;
+
+ /* Permit FPU to be present if FPU is supported */
+ if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
+ mask |= MIPS_CONF1_FP;
+
+ return mask;
+}
+
+/**
+ * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3
+ * @vcpu: Virtual CPU.
+ *
+ * Finds the mask of bits which are writable in the guest's Config3 CP0
+ * register, by userland (currently read-only to the guest).
+ */
+unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu)
+{
+ /* Config4 is optional */
+ unsigned int mask = MIPS_CONF_M;
+
+ /* Permit MSA to be present if MSA is supported */
+ if (kvm_mips_guest_can_have_msa(&vcpu->arch))
+ mask |= MIPS_CONF3_MSA;
+
+ return mask;
+}
+
+/**
+ * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4
+ * @vcpu: Virtual CPU.
+ *
+ * Finds the mask of bits which are writable in the guest's Config4 CP0
+ * register, by userland (currently read-only to the guest).
+ */
+unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu)
+{
+ /* Config5 is optional */
+ return MIPS_CONF_M;
+}
+
+/**
+ * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5
+ * @vcpu: Virtual CPU.
+ *
+ * Finds the mask of bits which are writable in the guest's Config5 CP0
+ * register, by the guest itself.
+ */
+unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu)
+{
+ unsigned int mask = 0;
+
+ /* Permit MSAEn changes if MSA supported and enabled */
+ if (kvm_mips_guest_has_msa(&vcpu->arch))
+ mask |= MIPS_CONF5_MSAEN;
+
+ /*
+ * Permit guest FPU mode changes if FPU is enabled and the relevant
+ * feature exists according to FIR register.
+ */
+ if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
+ if (cpu_has_fre)
+ mask |= MIPS_CONF5_FRE;
+ /* We don't support UFR or UFE */
+ }
+
+ return mask;
+}
+
enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
uint32_t cause, struct kvm_run *run,
struct kvm_vcpu *vcpu)
@@ -1021,18 +1099,114 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
kvm_mips_write_compare(vcpu,
vcpu->arch.gprs[rt]);
} else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
- kvm_write_c0_guest_status(cop0,
- vcpu->arch.gprs[rt]);
+ unsigned int old_val, val, change;
+
+ old_val = kvm_read_c0_guest_status(cop0);
+ val = vcpu->arch.gprs[rt];
+ change = val ^ old_val;
+
+ /* Make sure that the NMI bit is never set */
+ val &= ~ST0_NMI;
+
+ /*
+ * Don't allow CU1 or FR to be set unless FPU
+ * capability enabled and exists in guest
+ * configuration.
+ */
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ val &= ~(ST0_CU1 | ST0_FR);
+
+ /*
+ * Also don't allow FR to be set if host doesn't
+ * support it.
+ */
+ if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64))
+ val &= ~ST0_FR;
+
+
+ /* Handle changes in FPU mode */
+ preempt_disable();
+
+ /*
+ * FPU and Vector register state is made
+ * UNPREDICTABLE by a change of FR, so don't
+ * even bother saving it.
+ */
+ if (change & ST0_FR)
+ kvm_drop_fpu(vcpu);
+
+ /*
+ * If MSA state is already live, it is undefined
+ * how it interacts with FR=0 FPU state, and we
+ * don't want to hit reserved instruction
+ * exceptions trying to save the MSA state later
+ * when CU=1 && FR=1, so play it safe and save
+ * it first.
+ */
+ if (change & ST0_CU1 && !(val & ST0_FR) &&
+ vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
+ kvm_lose_fpu(vcpu);
+
/*
- * Make sure that CU1 and NMI bits are
- * never set
+ * Propagate CU1 (FPU enable) changes
+ * immediately if the FPU context is already
+ * loaded. When disabling we leave the context
+ * loaded so it can be quickly enabled again in
+ * the near future.
*/
- kvm_clear_c0_guest_status(cop0,
- (ST0_CU1 | ST0_NMI));
+ if (change & ST0_CU1 &&
+ vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
+ change_c0_status(ST0_CU1, val);
+
+ preempt_enable();
+
+ kvm_write_c0_guest_status(cop0, val);
#ifdef CONFIG_KVM_MIPS_DYN_TRANS
- kvm_mips_trans_mtc0(inst, opc, vcpu);
+ /*
+ * If FPU present, we need CU1/FR bits to take
+ * effect fairly soon.
+ */
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ kvm_mips_trans_mtc0(inst, opc, vcpu);
#endif
+ } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
+ unsigned int old_val, val, change, wrmask;
+
+ old_val = kvm_read_c0_guest_config5(cop0);
+ val = vcpu->arch.gprs[rt];
+
+ /* Only a few bits are writable in Config5 */
+ wrmask = kvm_mips_config5_wrmask(vcpu);
+ change = (val ^ old_val) & wrmask;
+ val = old_val ^ change;
+
+
+ /* Handle changes in FPU/MSA modes */
+ preempt_disable();
+
+ /*
+ * Propagate FRE changes immediately if the FPU
+ * context is already loaded.
+ */
+ if (change & MIPS_CONF5_FRE &&
+ vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
+ change_c0_config5(MIPS_CONF5_FRE, val);
+
+ /*
+ * Propagate MSAEn changes immediately if the
+ * MSA context is already loaded. When disabling
+ * we leave the context loaded so it can be
+ * quickly enabled again in the near future.
+ */
+ if (change & MIPS_CONF5_MSAEN &&
+ vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
+ change_c0_config5(MIPS_CONF5_MSAEN,
+ val);
+
+ preempt_enable();
+
+ kvm_write_c0_guest_config5(cop0, val);
} else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
uint32_t old_cause, new_cause;
@@ -1970,6 +2144,146 @@ enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
return er;
}
+enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause,
+ uint32_t *opc,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc);
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (T_TRAP << CAUSEB_EXCCODE));
+
+ /* Set PC to the exception entry point */
+ arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+ } else {
+ kvm_err("Trying to deliver TRAP when EXL is already set\n");
+ er = EMULATE_FAIL;
+ }
+
+ return er;
+}
+
+enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause,
+ uint32_t *opc,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc);
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (T_MSAFPE << CAUSEB_EXCCODE));
+
+ /* Set PC to the exception entry point */
+ arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+ } else {
+ kvm_err("Trying to deliver MSAFPE when EXL is already set\n");
+ er = EMULATE_FAIL;
+ }
+
+ return er;
+}
+
+enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause,
+ uint32_t *opc,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc);
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (T_FPE << CAUSEB_EXCCODE));
+
+ /* Set PC to the exception entry point */
+ arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+ } else {
+ kvm_err("Trying to deliver FPE when EXL is already set\n");
+ er = EMULATE_FAIL;
+ }
+
+ return er;
+}
+
+enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause,
+ uint32_t *opc,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc);
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (T_MSADIS << CAUSEB_EXCCODE));
+
+ /* Set PC to the exception entry point */
+ arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+ } else {
+ kvm_err("Trying to deliver MSADIS when EXL is already set\n");
+ er = EMULATE_FAIL;
+ }
+
+ return er;
+}
+
/* ll/sc, rdhwr, sync emulation */
#define OPCODE 0xfc000000
@@ -2176,6 +2490,10 @@ enum emulation_result kvm_mips_check_privilege(unsigned long cause,
case T_SYSCALL:
case T_BREAK:
case T_RES_INST:
+ case T_TRAP:
+ case T_MSAFPE:
+ case T_FPE:
+ case T_MSADIS:
break;
case T_COP_UNUSABLE:
diff --git a/arch/mips/kvm/fpu.S b/arch/mips/kvm/fpu.S
new file mode 100644
index 0000000..531fbf5
--- /dev/null
+++ b/arch/mips/kvm/fpu.S
@@ -0,0 +1,122 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * FPU context handling code for KVM.
+ *
+ * Copyright (C) 2015 Imagination Technologies Ltd.
+ */
+
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/fpregdef.h>
+#include <asm/mipsregs.h>
+#include <asm/regdef.h>
+
+ .set noreorder
+ .set noat
+
+LEAF(__kvm_save_fpu)
+ .set push
+ .set mips64r2
+ SET_HARDFLOAT
+ mfc0 t0, CP0_STATUS
+ sll t0, t0, 5 # is Status.FR set?
+ bgez t0, 1f # no: skip odd doubles
+ nop
+ sdc1 $f1, VCPU_FPR1(a0)
+ sdc1 $f3, VCPU_FPR3(a0)
+ sdc1 $f5, VCPU_FPR5(a0)
+ sdc1 $f7, VCPU_FPR7(a0)
+ sdc1 $f9, VCPU_FPR9(a0)
+ sdc1 $f11, VCPU_FPR11(a0)
+ sdc1 $f13, VCPU_FPR13(a0)
+ sdc1 $f15, VCPU_FPR15(a0)
+ sdc1 $f17, VCPU_FPR17(a0)
+ sdc1 $f19, VCPU_FPR19(a0)
+ sdc1 $f21, VCPU_FPR21(a0)
+ sdc1 $f23, VCPU_FPR23(a0)
+ sdc1 $f25, VCPU_FPR25(a0)
+ sdc1 $f27, VCPU_FPR27(a0)
+ sdc1 $f29, VCPU_FPR29(a0)
+ sdc1 $f31, VCPU_FPR31(a0)
+1: sdc1 $f0, VCPU_FPR0(a0)
+ sdc1 $f2, VCPU_FPR2(a0)
+ sdc1 $f4, VCPU_FPR4(a0)
+ sdc1 $f6, VCPU_FPR6(a0)
+ sdc1 $f8, VCPU_FPR8(a0)
+ sdc1 $f10, VCPU_FPR10(a0)
+ sdc1 $f12, VCPU_FPR12(a0)
+ sdc1 $f14, VCPU_FPR14(a0)
+ sdc1 $f16, VCPU_FPR16(a0)
+ sdc1 $f18, VCPU_FPR18(a0)
+ sdc1 $f20, VCPU_FPR20(a0)
+ sdc1 $f22, VCPU_FPR22(a0)
+ sdc1 $f24, VCPU_FPR24(a0)
+ sdc1 $f26, VCPU_FPR26(a0)
+ sdc1 $f28, VCPU_FPR28(a0)
+ jr ra
+ sdc1 $f30, VCPU_FPR30(a0)
+ .set pop
+ END(__kvm_save_fpu)
+
+LEAF(__kvm_restore_fpu)
+ .set push
+ .set mips64r2
+ SET_HARDFLOAT
+ mfc0 t0, CP0_STATUS
+ sll t0, t0, 5 # is Status.FR set?
+ bgez t0, 1f # no: skip odd doubles
+ nop
+ ldc1 $f1, VCPU_FPR1(a0)
+ ldc1 $f3, VCPU_FPR3(a0)
+ ldc1 $f5, VCPU_FPR5(a0)
+ ldc1 $f7, VCPU_FPR7(a0)
+ ldc1 $f9, VCPU_FPR9(a0)
+ ldc1 $f11, VCPU_FPR11(a0)
+ ldc1 $f13, VCPU_FPR13(a0)
+ ldc1 $f15, VCPU_FPR15(a0)
+ ldc1 $f17, VCPU_FPR17(a0)
+ ldc1 $f19, VCPU_FPR19(a0)
+ ldc1 $f21, VCPU_FPR21(a0)
+ ldc1 $f23, VCPU_FPR23(a0)
+ ldc1 $f25, VCPU_FPR25(a0)
+ ldc1 $f27, VCPU_FPR27(a0)
+ ldc1 $f29, VCPU_FPR29(a0)
+ ldc1 $f31, VCPU_FPR31(a0)
+1: ldc1 $f0, VCPU_FPR0(a0)
+ ldc1 $f2, VCPU_FPR2(a0)
+ ldc1 $f4, VCPU_FPR4(a0)
+ ldc1 $f6, VCPU_FPR6(a0)
+ ldc1 $f8, VCPU_FPR8(a0)
+ ldc1 $f10, VCPU_FPR10(a0)
+ ldc1 $f12, VCPU_FPR12(a0)
+ ldc1 $f14, VCPU_FPR14(a0)
+ ldc1 $f16, VCPU_FPR16(a0)
+ ldc1 $f18, VCPU_FPR18(a0)
+ ldc1 $f20, VCPU_FPR20(a0)
+ ldc1 $f22, VCPU_FPR22(a0)
+ ldc1 $f24, VCPU_FPR24(a0)
+ ldc1 $f26, VCPU_FPR26(a0)
+ ldc1 $f28, VCPU_FPR28(a0)
+ jr ra
+ ldc1 $f30, VCPU_FPR30(a0)
+ .set pop
+ END(__kvm_restore_fpu)
+
+LEAF(__kvm_restore_fcsr)
+ .set push
+ SET_HARDFLOAT
+ lw t0, VCPU_FCR31(a0)
+ /*
+ * The ctc1 must stay at this offset in __kvm_restore_fcsr.
+ * See kvm_mips_csr_die_notify() which handles t0 containing a value
+ * which triggers an FP Exception, which must be stepped over and
+ * ignored since the set cause bits must remain there for the guest.
+ */
+ ctc1 t0, fcr31
+ jr ra
+ nop
+ .set pop
+ END(__kvm_restore_fcsr)
diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S
index 4a68b17..c567240 100644
--- a/arch/mips/kvm/locore.S
+++ b/arch/mips/kvm/locore.S
@@ -36,6 +36,8 @@
#define PT_HOST_USERLOCAL PT_EPC
#define CP0_DDATA_LO $28,3
+#define CP0_CONFIG3 $16,3
+#define CP0_CONFIG5 $16,5
#define CP0_EBASE $15,1
#define CP0_INTCTL $12,1
@@ -353,6 +355,42 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
LONG_L k0, VCPU_HOST_EBASE(k1)
mtc0 k0,CP0_EBASE
+ /*
+ * If FPU is enabled, save FCR31 and clear it so that later ctc1's don't
+ * trigger FPE for pending exceptions.
+ */
+ .set at
+ and v1, v0, ST0_CU1
+ beqz v1, 1f
+ nop
+ .set push
+ SET_HARDFLOAT
+ cfc1 t0, fcr31
+ sw t0, VCPU_FCR31(k1)
+ ctc1 zero,fcr31
+ .set pop
+ .set noat
+1:
+
+#ifdef CONFIG_CPU_HAS_MSA
+ /*
+ * If MSA is enabled, save MSACSR and clear it so that later
+ * instructions don't trigger MSAFPE for pending exceptions.
+ */
+ mfc0 t0, CP0_CONFIG3
+ ext t0, t0, 28, 1 /* MIPS_CONF3_MSAP */
+ beqz t0, 1f
+ nop
+ mfc0 t0, CP0_CONFIG5
+ ext t0, t0, 27, 1 /* MIPS_CONF5_MSAEN */
+ beqz t0, 1f
+ nop
+ _cfcmsa t0, MSA_CSR
+ sw t0, VCPU_MSA_CSR(k1)
+ _ctcmsa MSA_CSR, zero
+1:
+#endif
+
/* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
.set at
and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index c9eccf5..bb68e8d 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -11,6 +11,7 @@
#include <linux/errno.h>
#include <linux/err.h>
+#include <linux/kdebug.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
@@ -48,6 +49,10 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU },
{ "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU },
{ "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU },
+ { "trap_inst", VCPU_STAT(trap_inst_exits), KVM_STAT_VCPU },
+ { "msa_fpe", VCPU_STAT(msa_fpe_exits), KVM_STAT_VCPU },
+ { "fpe", VCPU_STAT(fpe_exits), KVM_STAT_VCPU },
+ { "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU },
{ "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
{ "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
{ "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU },
@@ -504,10 +509,13 @@ static u64 kvm_mips_get_one_regs[] = {
KVM_REG_MIPS_CP0_STATUS,
KVM_REG_MIPS_CP0_CAUSE,
KVM_REG_MIPS_CP0_EPC,
+ KVM_REG_MIPS_CP0_PRID,
KVM_REG_MIPS_CP0_CONFIG,
KVM_REG_MIPS_CP0_CONFIG1,
KVM_REG_MIPS_CP0_CONFIG2,
KVM_REG_MIPS_CP0_CONFIG3,
+ KVM_REG_MIPS_CP0_CONFIG4,
+ KVM_REG_MIPS_CP0_CONFIG5,
KVM_REG_MIPS_CP0_CONFIG7,
KVM_REG_MIPS_CP0_ERROREPC,
@@ -520,10 +528,14 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
int ret;
s64 v;
+ s64 vs[2];
+ unsigned int idx;
switch (reg->id) {
+ /* General purpose registers */
case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
break;
@@ -537,6 +549,67 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
v = (long)vcpu->arch.pc;
break;
+ /* Floating point registers */
+ case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ return -EINVAL;
+ idx = reg->id - KVM_REG_MIPS_FPR_32(0);
+ /* Odd singles in top of even double when FR=0 */
+ if (kvm_read_c0_guest_status(cop0) & ST0_FR)
+ v = get_fpr32(&fpu->fpr[idx], 0);
+ else
+ v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1);
+ break;
+ case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ return -EINVAL;
+ idx = reg->id - KVM_REG_MIPS_FPR_64(0);
+ /* Can't access odd doubles in FR=0 mode */
+ if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
+ return -EINVAL;
+ v = get_fpr64(&fpu->fpr[idx], 0);
+ break;
+ case KVM_REG_MIPS_FCR_IR:
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ return -EINVAL;
+ v = boot_cpu_data.fpu_id;
+ break;
+ case KVM_REG_MIPS_FCR_CSR:
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ return -EINVAL;
+ v = fpu->fcr31;
+ break;
+
+ /* MIPS SIMD Architecture (MSA) registers */
+ case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
+ if (!kvm_mips_guest_has_msa(&vcpu->arch))
+ return -EINVAL;
+ /* Can't access MSA registers in FR=0 mode */
+ if (!(kvm_read_c0_guest_status(cop0) & ST0_FR))
+ return -EINVAL;
+ idx = reg->id - KVM_REG_MIPS_VEC_128(0);
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ /* least significant byte first */
+ vs[0] = get_fpr64(&fpu->fpr[idx], 0);
+ vs[1] = get_fpr64(&fpu->fpr[idx], 1);
+#else
+ /* most significant byte first */
+ vs[0] = get_fpr64(&fpu->fpr[idx], 1);
+ vs[1] = get_fpr64(&fpu->fpr[idx], 0);
+#endif
+ break;
+ case KVM_REG_MIPS_MSA_IR:
+ if (!kvm_mips_guest_has_msa(&vcpu->arch))
+ return -EINVAL;
+ v = boot_cpu_data.msa_id;
+ break;
+ case KVM_REG_MIPS_MSA_CSR:
+ if (!kvm_mips_guest_has_msa(&vcpu->arch))
+ return -EINVAL;
+ v = fpu->msacsr;
+ break;
+
+ /* Co-processor 0 registers */
case KVM_REG_MIPS_CP0_INDEX:
v = (long)kvm_read_c0_guest_index(cop0);
break;
@@ -573,8 +646,8 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
case KVM_REG_MIPS_CP0_EPC:
v = (long)kvm_read_c0_guest_epc(cop0);
break;
- case KVM_REG_MIPS_CP0_ERROREPC:
- v = (long)kvm_read_c0_guest_errorepc(cop0);
+ case KVM_REG_MIPS_CP0_PRID:
+ v = (long)kvm_read_c0_guest_prid(cop0);
break;
case KVM_REG_MIPS_CP0_CONFIG:
v = (long)kvm_read_c0_guest_config(cop0);
@@ -588,9 +661,18 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
case KVM_REG_MIPS_CP0_CONFIG3:
v = (long)kvm_read_c0_guest_config3(cop0);
break;
+ case KVM_REG_MIPS_CP0_CONFIG4:
+ v = (long)kvm_read_c0_guest_config4(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG5:
+ v = (long)kvm_read_c0_guest_config5(cop0);
+ break;
case KVM_REG_MIPS_CP0_CONFIG7:
v = (long)kvm_read_c0_guest_config7(cop0);
break;
+ case KVM_REG_MIPS_CP0_ERROREPC:
+ v = (long)kvm_read_c0_guest_errorepc(cop0);
+ break;
/* registers to be handled specially */
case KVM_REG_MIPS_CP0_COUNT:
case KVM_REG_MIPS_COUNT_CTL:
@@ -612,6 +694,10 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
u32 v32 = (u32)v;
return put_user(v32, uaddr32);
+ } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
+ void __user *uaddr = (void __user *)(long)reg->addr;
+
+ return copy_to_user(uaddr, vs, 16);
} else {
return -EINVAL;
}
@@ -621,7 +707,10 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
- u64 v;
+ struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
+ s64 v;
+ s64 vs[2];
+ unsigned int idx;
if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
@@ -635,11 +724,16 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
if (get_user(v32, uaddr32) != 0)
return -EFAULT;
v = (s64)v32;
+ } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
+ void __user *uaddr = (void __user *)(long)reg->addr;
+
+ return copy_from_user(vs, uaddr, 16);
} else {
return -EINVAL;
}
switch (reg->id) {
+ /* General purpose registers */
case KVM_REG_MIPS_R0:
/* Silently ignore requests to set $0 */
break;
@@ -656,6 +750,64 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
vcpu->arch.pc = v;
break;
+ /* Floating point registers */
+ case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ return -EINVAL;
+ idx = reg->id - KVM_REG_MIPS_FPR_32(0);
+ /* Odd singles in top of even double when FR=0 */
+ if (kvm_read_c0_guest_status(cop0) & ST0_FR)
+ set_fpr32(&fpu->fpr[idx], 0, v);
+ else
+ set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v);
+ break;
+ case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ return -EINVAL;
+ idx = reg->id - KVM_REG_MIPS_FPR_64(0);
+ /* Can't access odd doubles in FR=0 mode */
+ if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
+ return -EINVAL;
+ set_fpr64(&fpu->fpr[idx], 0, v);
+ break;
+ case KVM_REG_MIPS_FCR_IR:
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ return -EINVAL;
+ /* Read-only */
+ break;
+ case KVM_REG_MIPS_FCR_CSR:
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ return -EINVAL;
+ fpu->fcr31 = v;
+ break;
+
+ /* MIPS SIMD Architecture (MSA) registers */
+ case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
+ if (!kvm_mips_guest_has_msa(&vcpu->arch))
+ return -EINVAL;
+ idx = reg->id - KVM_REG_MIPS_VEC_128(0);
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ /* least significant byte first */
+ set_fpr64(&fpu->fpr[idx], 0, vs[0]);
+ set_fpr64(&fpu->fpr[idx], 1, vs[1]);
+#else
+ /* most significant byte first */
+ set_fpr64(&fpu->fpr[idx], 1, vs[0]);
+ set_fpr64(&fpu->fpr[idx], 0, vs[1]);
+#endif
+ break;
+ case KVM_REG_MIPS_MSA_IR:
+ if (!kvm_mips_guest_has_msa(&vcpu->arch))
+ return -EINVAL;
+ /* Read-only */
+ break;
+ case KVM_REG_MIPS_MSA_CSR:
+ if (!kvm_mips_guest_has_msa(&vcpu->arch))
+ return -EINVAL;
+ fpu->msacsr = v;
+ break;
+
+ /* Co-processor 0 registers */
case KVM_REG_MIPS_CP0_INDEX:
kvm_write_c0_guest_index(cop0, v);
break;
@@ -686,6 +838,9 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
case KVM_REG_MIPS_CP0_EPC:
kvm_write_c0_guest_epc(cop0, v);
break;
+ case KVM_REG_MIPS_CP0_PRID:
+ kvm_write_c0_guest_prid(cop0, v);
+ break;
case KVM_REG_MIPS_CP0_ERROREPC:
kvm_write_c0_guest_errorepc(cop0, v);
break;
@@ -693,6 +848,12 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
case KVM_REG_MIPS_CP0_COUNT:
case KVM_REG_MIPS_CP0_COMPARE:
case KVM_REG_MIPS_CP0_CAUSE:
+ case KVM_REG_MIPS_CP0_CONFIG:
+ case KVM_REG_MIPS_CP0_CONFIG1:
+ case KVM_REG_MIPS_CP0_CONFIG2:
+ case KVM_REG_MIPS_CP0_CONFIG3:
+ case KVM_REG_MIPS_CP0_CONFIG4:
+ case KVM_REG_MIPS_CP0_CONFIG5:
case KVM_REG_MIPS_COUNT_CTL:
case KVM_REG_MIPS_COUNT_RESUME:
case KVM_REG_MIPS_COUNT_HZ:
@@ -703,6 +864,33 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
return 0;
}
+static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
+ struct kvm_enable_cap *cap)
+{
+ int r = 0;
+
+ if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap))
+ return -EINVAL;
+ if (cap->flags)
+ return -EINVAL;
+ if (cap->args[0])
+ return -EINVAL;
+
+ switch (cap->cap) {
+ case KVM_CAP_MIPS_FPU:
+ vcpu->arch.fpu_enabled = true;
+ break;
+ case KVM_CAP_MIPS_MSA:
+ vcpu->arch.msa_enabled = true;
+ break;
+ default:
+ r = -EINVAL;
+ break;
+ }
+
+ return r;
+}
+
long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
unsigned long arg)
{
@@ -760,6 +948,15 @@ long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
break;
}
+ case KVM_ENABLE_CAP: {
+ struct kvm_enable_cap cap;
+
+ r = -EFAULT;
+ if (copy_from_user(&cap, argp, sizeof(cap)))
+ goto out;
+ r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
+ break;
+ }
default:
r = -ENOIOCTLCMD;
}
@@ -868,11 +1065,30 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
switch (ext) {
case KVM_CAP_ONE_REG:
+ case KVM_CAP_ENABLE_CAP:
r = 1;
break;
case KVM_CAP_COALESCED_MMIO:
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
break;
+ case KVM_CAP_MIPS_FPU:
+ r = !!cpu_has_fpu;
+ break;
+ case KVM_CAP_MIPS_MSA:
+ /*
+ * We don't support MSA vector partitioning yet:
+ * 1) It would require explicit support which can't be tested
+ * yet due to lack of support in current hardware.
+ * 2) It extends the state that would need to be saved/restored
+ * by e.g. QEMU for migration.
+ *
+ * When vector partitioning hardware becomes available, support
+ * could be added by requiring a flag when enabling
+ * KVM_CAP_MIPS_MSA capability to indicate that userland knows
+ * to save/restore the appropriate extra state.
+ */
+ r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF);
+ break;
default:
r = 0;
break;
@@ -1119,6 +1335,30 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
ret = kvm_mips_callbacks->handle_break(vcpu);
break;
+ case T_TRAP:
+ ++vcpu->stat.trap_inst_exits;
+ trace_kvm_exit(vcpu, TRAP_INST_EXITS);
+ ret = kvm_mips_callbacks->handle_trap(vcpu);
+ break;
+
+ case T_MSAFPE:
+ ++vcpu->stat.msa_fpe_exits;
+ trace_kvm_exit(vcpu, MSA_FPE_EXITS);
+ ret = kvm_mips_callbacks->handle_msa_fpe(vcpu);
+ break;
+
+ case T_FPE:
+ ++vcpu->stat.fpe_exits;
+ trace_kvm_exit(vcpu, FPE_EXITS);
+ ret = kvm_mips_callbacks->handle_fpe(vcpu);
+ break;
+
+ case T_MSADIS:
+ ++vcpu->stat.msa_disabled_exits;
+ trace_kvm_exit(vcpu, MSA_DISABLED_EXITS);
+ ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
+ break;
+
default:
kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
@@ -1146,12 +1386,233 @@ skip_emul:
}
}
+ if (ret == RESUME_GUEST) {
+ /*
+ * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
+ * is live), restore FCR31 / MSACSR.
+ *
+ * This should be before returning to the guest exception
+ * vector, as it may well cause an [MSA] FP exception if there
+ * are pending exception bits unmasked. (see
+ * kvm_mips_csr_die_notifier() for how that is handled).
+ */
+ if (kvm_mips_guest_has_fpu(&vcpu->arch) &&
+ read_c0_status() & ST0_CU1)
+ __kvm_restore_fcsr(&vcpu->arch);
+
+ if (kvm_mips_guest_has_msa(&vcpu->arch) &&
+ read_c0_config5() & MIPS_CONF5_MSAEN)
+ __kvm_restore_msacsr(&vcpu->arch);
+ }
+
/* Disable HTW before returning to guest or host */
htw_stop();
return ret;
}
+/* Enable FPU for guest and restore context */
+void kvm_own_fpu(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ unsigned int sr, cfg5;
+
+ preempt_disable();
+
+ sr = kvm_read_c0_guest_status(cop0);
+
+ /*
+ * If MSA state is already live, it is undefined how it interacts with
+ * FR=0 FPU state, and we don't want to hit reserved instruction
+ * exceptions trying to save the MSA state later when CU=1 && FR=1, so
+ * play it safe and save it first.
+ *
+ * In theory we shouldn't ever hit this case since kvm_lose_fpu() should
+ * get called when guest CU1 is set, however we can't trust the guest
+ * not to clobber the status register directly via the commpage.
+ */
+ if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) &&
+ vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
+ kvm_lose_fpu(vcpu);
+
+ /*
+ * Enable FPU for guest
+ * We set FR and FRE according to guest context
+ */
+ change_c0_status(ST0_CU1 | ST0_FR, sr);
+ if (cpu_has_fre) {
+ cfg5 = kvm_read_c0_guest_config5(cop0);
+ change_c0_config5(MIPS_CONF5_FRE, cfg5);
+ }
+ enable_fpu_hazard();
+
+ /* If guest FPU state not active, restore it now */
+ if (!(vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)) {
+ __kvm_restore_fpu(&vcpu->arch);
+ vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU;
+ }
+
+ preempt_enable();
+}
+
+#ifdef CONFIG_CPU_HAS_MSA
+/* Enable MSA for guest and restore context */
+void kvm_own_msa(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ unsigned int sr, cfg5;
+
+ preempt_disable();
+
+ /*
+ * Enable FPU if enabled in guest, since we're restoring FPU context
+ * anyway. We set FR and FRE according to guest context.
+ */
+ if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
+ sr = kvm_read_c0_guest_status(cop0);
+
+ /*
+ * If FR=0 FPU state is already live, it is undefined how it
+ * interacts with MSA state, so play it safe and save it first.
+ */
+ if (!(sr & ST0_FR) &&
+ (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU |
+ KVM_MIPS_FPU_MSA)) == KVM_MIPS_FPU_FPU)
+ kvm_lose_fpu(vcpu);
+
+ change_c0_status(ST0_CU1 | ST0_FR, sr);
+ if (sr & ST0_CU1 && cpu_has_fre) {
+ cfg5 = kvm_read_c0_guest_config5(cop0);
+ change_c0_config5(MIPS_CONF5_FRE, cfg5);
+ }
+ }
+
+ /* Enable MSA for guest */
+ set_c0_config5(MIPS_CONF5_MSAEN);
+ enable_fpu_hazard();
+
+ switch (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA)) {
+ case KVM_MIPS_FPU_FPU:
+ /*
+ * Guest FPU state already loaded, only restore upper MSA state
+ */
+ __kvm_restore_msa_upper(&vcpu->arch);
+ vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA;
+ break;
+ case 0:
+ /* Neither FPU or MSA already active, restore full MSA state */
+ __kvm_restore_msa(&vcpu->arch);
+ vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA;
+ if (kvm_mips_guest_has_fpu(&vcpu->arch))
+ vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU;
+ break;
+ default:
+ break;
+ }
+
+ preempt_enable();
+}
+#endif
+
+/* Drop FPU & MSA without saving it */
+void kvm_drop_fpu(struct kvm_vcpu *vcpu)
+{
+ preempt_disable();
+ if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) {
+ disable_msa();
+ vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_MSA;
+ }
+ if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
+ clear_c0_status(ST0_CU1 | ST0_FR);
+ vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU;
+ }
+ preempt_enable();
+}
+
+/* Save and disable FPU & MSA */
+void kvm_lose_fpu(struct kvm_vcpu *vcpu)
+{
+ /*
+ * FPU & MSA get disabled in root context (hardware) when it is disabled
+ * in guest context (software), but the register state in the hardware
+ * may still be in use. This is why we explicitly re-enable the hardware
+ * before saving.
+ */
+
+ preempt_disable();
+ if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) {
+ set_c0_config5(MIPS_CONF5_MSAEN);
+ enable_fpu_hazard();
+
+ __kvm_save_msa(&vcpu->arch);
+
+ /* Disable MSA & FPU */
+ disable_msa();
+ if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
+ clear_c0_status(ST0_CU1 | ST0_FR);
+ vcpu->arch.fpu_inuse &= ~(KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA);
+ } else if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
+ set_c0_status(ST0_CU1);
+ enable_fpu_hazard();
+
+ __kvm_save_fpu(&vcpu->arch);
+ vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU;
+
+ /* Disable FPU */
+ clear_c0_status(ST0_CU1 | ST0_FR);
+ }
+ preempt_enable();
+}
+
+/*
+ * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are
+ * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP
+ * exception if cause bits are set in the value being written.
+ */
+static int kvm_mips_csr_die_notify(struct notifier_block *self,
+ unsigned long cmd, void *ptr)
+{
+ struct die_args *args = (struct die_args *)ptr;
+ struct pt_regs *regs = args->regs;
+ unsigned long pc;
+
+ /* Only interested in FPE and MSAFPE */
+ if (cmd != DIE_FP && cmd != DIE_MSAFP)
+ return NOTIFY_DONE;
+
+ /* Return immediately if guest context isn't active */
+ if (!(current->flags & PF_VCPU))
+ return NOTIFY_DONE;
+
+ /* Should never get here from user mode */
+ BUG_ON(user_mode(regs));
+
+ pc = instruction_pointer(regs);
+ switch (cmd) {
+ case DIE_FP:
+ /* match 2nd instruction in __kvm_restore_fcsr */
+ if (pc != (unsigned long)&__kvm_restore_fcsr + 4)
+ return NOTIFY_DONE;
+ break;
+ case DIE_MSAFP:
+ /* match 2nd/3rd instruction in __kvm_restore_msacsr */
+ if (!cpu_has_msa ||
+ pc < (unsigned long)&__kvm_restore_msacsr + 4 ||
+ pc > (unsigned long)&__kvm_restore_msacsr + 8)
+ return NOTIFY_DONE;
+ break;
+ }
+
+ /* Move PC forward a little and continue executing */
+ instruction_pointer(regs) += 4;
+
+ return NOTIFY_STOP;
+}
+
+static struct notifier_block kvm_mips_csr_die_notifier = {
+ .notifier_call = kvm_mips_csr_die_notify,
+};
+
int __init kvm_mips_init(void)
{
int ret;
@@ -1161,6 +1622,8 @@ int __init kvm_mips_init(void)
if (ret)
return ret;
+ register_die_notifier(&kvm_mips_csr_die_notifier);
+
/*
* On MIPS, kernel modules are executed from "mapped space", which
* requires TLBs. The TLB handling code is statically linked with
@@ -1173,7 +1636,6 @@ int __init kvm_mips_init(void)
kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
kvm_mips_is_error_pfn = is_error_pfn;
- pr_info("KVM/MIPS Initialized\n");
return 0;
}
@@ -1185,7 +1647,7 @@ void __exit kvm_mips_exit(void)
kvm_mips_release_pfn_clean = NULL;
kvm_mips_is_error_pfn = NULL;
- pr_info("KVM/MIPS unloaded\n");
+ unregister_die_notifier(&kvm_mips_csr_die_notifier);
}
module_init(kvm_mips_init);
diff --git a/arch/mips/kvm/msa.S b/arch/mips/kvm/msa.S
new file mode 100644
index 0000000..d02f0c6
--- /dev/null
+++ b/arch/mips/kvm/msa.S
@@ -0,0 +1,161 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * MIPS SIMD Architecture (MSA) context handling code for KVM.
+ *
+ * Copyright (C) 2015 Imagination Technologies Ltd.
+ */
+
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/asmmacro.h>
+#include <asm/regdef.h>
+
+ .set noreorder
+ .set noat
+
+LEAF(__kvm_save_msa)
+ st_d 0, VCPU_FPR0, a0
+ st_d 1, VCPU_FPR1, a0
+ st_d 2, VCPU_FPR2, a0
+ st_d 3, VCPU_FPR3, a0
+ st_d 4, VCPU_FPR4, a0
+ st_d 5, VCPU_FPR5, a0
+ st_d 6, VCPU_FPR6, a0
+ st_d 7, VCPU_FPR7, a0
+ st_d 8, VCPU_FPR8, a0
+ st_d 9, VCPU_FPR9, a0
+ st_d 10, VCPU_FPR10, a0
+ st_d 11, VCPU_FPR11, a0
+ st_d 12, VCPU_FPR12, a0
+ st_d 13, VCPU_FPR13, a0
+ st_d 14, VCPU_FPR14, a0
+ st_d 15, VCPU_FPR15, a0
+ st_d 16, VCPU_FPR16, a0
+ st_d 17, VCPU_FPR17, a0
+ st_d 18, VCPU_FPR18, a0
+ st_d 19, VCPU_FPR19, a0
+ st_d 20, VCPU_FPR20, a0
+ st_d 21, VCPU_FPR21, a0
+ st_d 22, VCPU_FPR22, a0
+ st_d 23, VCPU_FPR23, a0
+ st_d 24, VCPU_FPR24, a0
+ st_d 25, VCPU_FPR25, a0
+ st_d 26, VCPU_FPR26, a0
+ st_d 27, VCPU_FPR27, a0
+ st_d 28, VCPU_FPR28, a0
+ st_d 29, VCPU_FPR29, a0
+ st_d 30, VCPU_FPR30, a0
+ st_d 31, VCPU_FPR31, a0
+ jr ra
+ nop
+ END(__kvm_save_msa)
+
+LEAF(__kvm_restore_msa)
+ ld_d 0, VCPU_FPR0, a0
+ ld_d 1, VCPU_FPR1, a0
+ ld_d 2, VCPU_FPR2, a0
+ ld_d 3, VCPU_FPR3, a0
+ ld_d 4, VCPU_FPR4, a0
+ ld_d 5, VCPU_FPR5, a0
+ ld_d 6, VCPU_FPR6, a0
+ ld_d 7, VCPU_FPR7, a0
+ ld_d 8, VCPU_FPR8, a0
+ ld_d 9, VCPU_FPR9, a0
+ ld_d 10, VCPU_FPR10, a0
+ ld_d 11, VCPU_FPR11, a0
+ ld_d 12, VCPU_FPR12, a0
+ ld_d 13, VCPU_FPR13, a0
+ ld_d 14, VCPU_FPR14, a0
+ ld_d 15, VCPU_FPR15, a0
+ ld_d 16, VCPU_FPR16, a0
+ ld_d 17, VCPU_FPR17, a0
+ ld_d 18, VCPU_FPR18, a0
+ ld_d 19, VCPU_FPR19, a0
+ ld_d 20, VCPU_FPR20, a0
+ ld_d 21, VCPU_FPR21, a0
+ ld_d 22, VCPU_FPR22, a0
+ ld_d 23, VCPU_FPR23, a0
+ ld_d 24, VCPU_FPR24, a0
+ ld_d 25, VCPU_FPR25, a0
+ ld_d 26, VCPU_FPR26, a0
+ ld_d 27, VCPU_FPR27, a0
+ ld_d 28, VCPU_FPR28, a0
+ ld_d 29, VCPU_FPR29, a0
+ ld_d 30, VCPU_FPR30, a0
+ ld_d 31, VCPU_FPR31, a0
+ jr ra
+ nop
+ END(__kvm_restore_msa)
+
+ .macro kvm_restore_msa_upper wr, off, base
+ .set push
+ .set noat
+#ifdef CONFIG_64BIT
+ ld $1, \off(\base)
+ insert_d \wr, 1
+#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
+ lw $1, \off(\base)
+ insert_w \wr, 2
+ lw $1, (\off+4)(\base)
+ insert_w \wr, 3
+#else /* CONFIG_CPU_BIG_ENDIAN */
+ lw $1, (\off+4)(\base)
+ insert_w \wr, 2
+ lw $1, \off(\base)
+ insert_w \wr, 3
+#endif
+ .set pop
+ .endm
+
+LEAF(__kvm_restore_msa_upper)
+ kvm_restore_msa_upper 0, VCPU_FPR0 +8, a0
+ kvm_restore_msa_upper 1, VCPU_FPR1 +8, a0
+ kvm_restore_msa_upper 2, VCPU_FPR2 +8, a0
+ kvm_restore_msa_upper 3, VCPU_FPR3 +8, a0
+ kvm_restore_msa_upper 4, VCPU_FPR4 +8, a0
+ kvm_restore_msa_upper 5, VCPU_FPR5 +8, a0
+ kvm_restore_msa_upper 6, VCPU_FPR6 +8, a0
+ kvm_restore_msa_upper 7, VCPU_FPR7 +8, a0
+ kvm_restore_msa_upper 8, VCPU_FPR8 +8, a0
+ kvm_restore_msa_upper 9, VCPU_FPR9 +8, a0
+ kvm_restore_msa_upper 10, VCPU_FPR10+8, a0
+ kvm_restore_msa_upper 11, VCPU_FPR11+8, a0
+ kvm_restore_msa_upper 12, VCPU_FPR12+8, a0
+ kvm_restore_msa_upper 13, VCPU_FPR13+8, a0
+ kvm_restore_msa_upper 14, VCPU_FPR14+8, a0
+ kvm_restore_msa_upper 15, VCPU_FPR15+8, a0
+ kvm_restore_msa_upper 16, VCPU_FPR16+8, a0
+ kvm_restore_msa_upper 17, VCPU_FPR17+8, a0
+ kvm_restore_msa_upper 18, VCPU_FPR18+8, a0
+ kvm_restore_msa_upper 19, VCPU_FPR19+8, a0
+ kvm_restore_msa_upper 20, VCPU_FPR20+8, a0
+ kvm_restore_msa_upper 21, VCPU_FPR21+8, a0
+ kvm_restore_msa_upper 22, VCPU_FPR22+8, a0
+ kvm_restore_msa_upper 23, VCPU_FPR23+8, a0
+ kvm_restore_msa_upper 24, VCPU_FPR24+8, a0
+ kvm_restore_msa_upper 25, VCPU_FPR25+8, a0
+ kvm_restore_msa_upper 26, VCPU_FPR26+8, a0
+ kvm_restore_msa_upper 27, VCPU_FPR27+8, a0
+ kvm_restore_msa_upper 28, VCPU_FPR28+8, a0
+ kvm_restore_msa_upper 29, VCPU_FPR29+8, a0
+ kvm_restore_msa_upper 30, VCPU_FPR30+8, a0
+ kvm_restore_msa_upper 31, VCPU_FPR31+8, a0
+ jr ra
+ nop
+ END(__kvm_restore_msa_upper)
+
+LEAF(__kvm_restore_msacsr)
+ lw t0, VCPU_MSA_CSR(a0)
+ /*
+ * The ctcmsa must stay at this offset in __kvm_restore_msacsr.
+ * See kvm_mips_csr_die_notify() which handles t0 containing a value
+ * which triggers an MSA FP Exception, which must be stepped over and
+ * ignored since the set cause bits must remain there for the guest.
+ */
+ _ctcmsa MSA_CSR, t0
+ jr ra
+ nop
+ END(__kvm_restore_msacsr)
diff --git a/arch/mips/kvm/stats.c b/arch/mips/kvm/stats.c
index a74d602..888bb67 100644
--- a/arch/mips/kvm/stats.c
+++ b/arch/mips/kvm/stats.c
@@ -25,6 +25,10 @@ char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES] = {
"System Call",
"Reserved Inst",
"Break Inst",
+ "Trap Inst",
+ "MSA FPE",
+ "FPE",
+ "MSA Disabled",
"D-Cache Flushes",
};
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
index b6beb0e..aed0ac2 100644
--- a/arch/mips/kvm/tlb.c
+++ b/arch/mips/kvm/tlb.c
@@ -733,6 +733,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
}
}
+ /* restore guest state to registers */
+ kvm_mips_callbacks->vcpu_set_regs(vcpu);
+
local_irq_restore(flags);
}
@@ -751,6 +754,9 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
vcpu->arch.preempt_entryhi = read_c0_entryhi();
vcpu->arch.last_sched_cpu = cpu;
+ /* save guest state in registers */
+ kvm_mips_callbacks->vcpu_get_regs(vcpu);
+
if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
ASID_VERSION_MASK)) {
kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
index fd7257b..d836ed5 100644
--- a/arch/mips/kvm/trap_emul.c
+++ b/arch/mips/kvm/trap_emul.c
@@ -39,16 +39,30 @@ static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_run *run = vcpu->run;
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
unsigned long cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
- if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1)
- er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
- else
+ if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
+ /* FPU Unusable */
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch) ||
+ (kvm_read_c0_guest_status(cop0) & ST0_CU1) == 0) {
+ /*
+ * Unusable/no FPU in guest:
+ * deliver guest COP1 Unusable Exception
+ */
+ er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
+ } else {
+ /* Restore FPU state */
+ kvm_own_fpu(vcpu);
+ er = EMULATE_DONE;
+ }
+ } else {
er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
+ }
switch (er) {
case EMULATE_DONE:
@@ -330,6 +344,107 @@ static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
return ret;
}
+static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
+ unsigned long cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu);
+ if (er == EMULATE_DONE) {
+ ret = RESUME_GUEST;
+ } else {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ return ret;
+}
+
+static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
+ unsigned long cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu);
+ if (er == EMULATE_DONE) {
+ ret = RESUME_GUEST;
+ } else {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ return ret;
+}
+
+static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
+ unsigned long cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ er = kvm_mips_emulate_fpe_exc(cause, opc, run, vcpu);
+ if (er == EMULATE_DONE) {
+ ret = RESUME_GUEST;
+ } else {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ return ret;
+}
+
+/**
+ * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root.
+ * @vcpu: Virtual CPU context.
+ *
+ * Handle when the guest attempts to use MSA when it is disabled.
+ */
+static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_run *run = vcpu->run;
+ uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+ unsigned long cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
+ (kvm_read_c0_guest_status(cop0) & (ST0_CU1 | ST0_FR)) == ST0_CU1) {
+ /*
+ * No MSA in guest, or FPU enabled and not in FR=1 mode,
+ * guest reserved instruction exception
+ */
+ er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
+ } else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) {
+ /* MSA disabled by guest, guest MSA disabled exception */
+ er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu);
+ } else {
+ /* Restore MSA/FPU state */
+ kvm_own_msa(vcpu);
+ er = EMULATE_DONE;
+ }
+
+ switch (er) {
+ case EMULATE_DONE:
+ ret = RESUME_GUEST;
+ break;
+
+ case EMULATE_FAIL:
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ break;
+
+ default:
+ BUG();
+ }
+ return ret;
+}
+
static int kvm_trap_emul_vm_init(struct kvm *kvm)
{
return 0;
@@ -351,8 +466,9 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
* guest will come up as expected, for now we simulate a MIPS 24kc
*/
kvm_write_c0_guest_prid(cop0, 0x00019300);
- kvm_write_c0_guest_config(cop0,
- MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
+ /* Have config1, Cacheable, noncoherent, write-back, write allocate */
+ kvm_write_c0_guest_config(cop0, MIPS_CONF_M | (0x3 << CP0C0_K0) |
+ (0x1 << CP0C0_AR) |
(MMU_TYPE_R4000 << CP0C0_MT));
/* Read the cache characteristics from the host Config1 Register */
@@ -368,10 +484,18 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
(1 << CP0C1_WR) | (1 << CP0C1_CA));
kvm_write_c0_guest_config1(cop0, config1);
- kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2);
- /* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */
- kvm_write_c0_guest_config3(cop0, MIPS_CONFIG3 | (0 << CP0C3_VInt) |
- (1 << CP0C3_ULRI));
+ /* Have config3, no tertiary/secondary caches implemented */
+ kvm_write_c0_guest_config2(cop0, MIPS_CONF_M);
+ /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */
+
+ /* Have config4, UserLocal */
+ kvm_write_c0_guest_config3(cop0, MIPS_CONF_M | MIPS_CONF3_ULRI);
+
+ /* Have config5 */
+ kvm_write_c0_guest_config4(cop0, MIPS_CONF_M);
+
+ /* No config6 */
+ kvm_write_c0_guest_config5(cop0, 0);
/* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
@@ -416,6 +540,7 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
int ret = 0;
+ unsigned int cur, change;
switch (reg->id) {
case KVM_REG_MIPS_CP0_COUNT:
@@ -444,6 +569,44 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
kvm_write_c0_guest_cause(cop0, v);
}
break;
+ case KVM_REG_MIPS_CP0_CONFIG:
+ /* read-only for now */
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG1:
+ cur = kvm_read_c0_guest_config1(cop0);
+ change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu);
+ if (change) {
+ v = cur ^ change;
+ kvm_write_c0_guest_config1(cop0, v);
+ }
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG2:
+ /* read-only for now */
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG3:
+ cur = kvm_read_c0_guest_config3(cop0);
+ change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu);
+ if (change) {
+ v = cur ^ change;
+ kvm_write_c0_guest_config3(cop0, v);
+ }
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG4:
+ cur = kvm_read_c0_guest_config4(cop0);
+ change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu);
+ if (change) {
+ v = cur ^ change;
+ kvm_write_c0_guest_config4(cop0, v);
+ }
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG5:
+ cur = kvm_read_c0_guest_config5(cop0);
+ change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu);
+ if (change) {
+ v = cur ^ change;
+ kvm_write_c0_guest_config5(cop0, v);
+ }
+ break;
case KVM_REG_MIPS_COUNT_CTL:
ret = kvm_mips_set_count_ctl(vcpu, v);
break;
@@ -459,6 +622,18 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
return ret;
}
+static int kvm_trap_emul_vcpu_get_regs(struct kvm_vcpu *vcpu)
+{
+ kvm_lose_fpu(vcpu);
+
+ return 0;
+}
+
+static int kvm_trap_emul_vcpu_set_regs(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
/* exit handlers */
.handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
@@ -470,6 +645,10 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
.handle_syscall = kvm_trap_emul_handle_syscall,
.handle_res_inst = kvm_trap_emul_handle_res_inst,
.handle_break = kvm_trap_emul_handle_break,
+ .handle_trap = kvm_trap_emul_handle_trap,
+ .handle_msa_fpe = kvm_trap_emul_handle_msa_fpe,
+ .handle_fpe = kvm_trap_emul_handle_fpe,
+ .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
.vm_init = kvm_trap_emul_vm_init,
.vcpu_init = kvm_trap_emul_vcpu_init,
@@ -483,6 +662,8 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
.irq_clear = kvm_mips_irq_clear_cb,
.get_one_reg = kvm_trap_emul_get_one_reg,
.set_one_reg = kvm_trap_emul_set_one_reg,
+ .vcpu_get_regs = kvm_trap_emul_vcpu_get_regs,
+ .vcpu_set_regs = kvm_trap_emul_vcpu_set_regs,
};
int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
diff --git a/arch/powerpc/kvm/mpic.c b/arch/powerpc/kvm/mpic.c
index 39b3a8f..6249cdc 100644
--- a/arch/powerpc/kvm/mpic.c
+++ b/arch/powerpc/kvm/mpic.c
@@ -34,7 +34,7 @@
#include <asm/kvm_para.h>
#include <asm/kvm_host.h>
#include <asm/kvm_ppc.h>
-#include "iodev.h"
+#include <kvm/iodev.h>
#define MAX_CPU 32
#define MAX_SRC 256
@@ -289,11 +289,6 @@ static inline void IRQ_resetbit(struct irq_queue *q, int n_IRQ)
clear_bit(n_IRQ, q->queue);
}
-static inline int IRQ_testbit(struct irq_queue *q, int n_IRQ)
-{
- return test_bit(n_IRQ, q->queue);
-}
-
static void IRQ_check(struct openpic *opp, struct irq_queue *q)
{
int irq = -1;
@@ -1374,8 +1369,9 @@ static int kvm_mpic_write_internal(struct openpic *opp, gpa_t addr, u32 val)
return -ENXIO;
}
-static int kvm_mpic_read(struct kvm_io_device *this, gpa_t addr,
- int len, void *ptr)
+static int kvm_mpic_read(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *this,
+ gpa_t addr, int len, void *ptr)
{
struct openpic *opp = container_of(this, struct openpic, mmio);
int ret;
@@ -1415,8 +1411,9 @@ static int kvm_mpic_read(struct kvm_io_device *this, gpa_t addr,
return ret;
}
-static int kvm_mpic_write(struct kvm_io_device *this, gpa_t addr,
- int len, const void *ptr)
+static int kvm_mpic_write(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *this,
+ gpa_t addr, int len, const void *ptr)
{
struct openpic *opp = container_of(this, struct openpic, mmio);
int ret;
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 27c0fac..24bfe40 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -807,7 +807,7 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
idx = srcu_read_lock(&vcpu->kvm->srcu);
- ret = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr,
+ ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
bytes, &run->mmio.data);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
@@ -880,7 +880,7 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
idx = srcu_read_lock(&vcpu->kvm->srcu);
- ret = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr,
+ ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
bytes, &run->mmio.data);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index f407bbf..d01fc58 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -172,7 +172,9 @@ struct kvm_s390_sie_block {
__u32 fac; /* 0x01a0 */
__u8 reserved1a4[20]; /* 0x01a4 */
__u64 cbrlo; /* 0x01b8 */
- __u8 reserved1c0[30]; /* 0x01c0 */
+ __u8 reserved1c0[8]; /* 0x01c0 */
+ __u32 ecd; /* 0x01c8 */
+ __u8 reserved1cc[18]; /* 0x01cc */
__u64 pp; /* 0x01de */
__u8 reserved1e6[2]; /* 0x01e6 */
__u64 itdba; /* 0x01e8 */
@@ -183,11 +185,17 @@ struct kvm_s390_itdb {
__u8 data[256];
} __packed;
+struct kvm_s390_vregs {
+ __vector128 vrs[32];
+ __u8 reserved200[512]; /* for future vector expansion */
+} __packed;
+
struct sie_page {
struct kvm_s390_sie_block sie_block;
__u8 reserved200[1024]; /* 0x0200 */
struct kvm_s390_itdb itdb; /* 0x0600 */
- __u8 reserved700[2304]; /* 0x0700 */
+ __u8 reserved700[1280]; /* 0x0700 */
+ struct kvm_s390_vregs vregs; /* 0x0c00 */
} __packed;
struct kvm_vcpu_stat {
@@ -238,6 +246,7 @@ struct kvm_vcpu_stat {
u32 instruction_sigp_stop;
u32 instruction_sigp_stop_store_status;
u32 instruction_sigp_store_status;
+ u32 instruction_sigp_store_adtl_status;
u32 instruction_sigp_arch;
u32 instruction_sigp_prefix;
u32 instruction_sigp_restart;
@@ -270,6 +279,7 @@ struct kvm_vcpu_stat {
#define PGM_SPECIAL_OPERATION 0x13
#define PGM_OPERAND 0x15
#define PGM_TRACE_TABEL 0x16
+#define PGM_VECTOR_PROCESSING 0x1b
#define PGM_SPACE_SWITCH 0x1c
#define PGM_HFP_SQUARE_ROOT 0x1d
#define PGM_PC_TRANSLATION_SPEC 0x1f
@@ -334,6 +344,11 @@ enum irq_types {
IRQ_PEND_COUNT
};
+/* We have 2M for virtio device descriptor pages. Smallest amount of
+ * memory per page is 24 bytes (1 queue), so (2048*1024) / 24 = 87381
+ */
+#define KVM_S390_MAX_VIRTIO_IRQS 87381
+
/*
* Repressible (non-floating) machine check interrupts
* subclass bits in MCIC
@@ -411,13 +426,32 @@ struct kvm_s390_local_interrupt {
unsigned long pending_irqs;
};
+#define FIRQ_LIST_IO_ISC_0 0
+#define FIRQ_LIST_IO_ISC_1 1
+#define FIRQ_LIST_IO_ISC_2 2
+#define FIRQ_LIST_IO_ISC_3 3
+#define FIRQ_LIST_IO_ISC_4 4
+#define FIRQ_LIST_IO_ISC_5 5
+#define FIRQ_LIST_IO_ISC_6 6
+#define FIRQ_LIST_IO_ISC_7 7
+#define FIRQ_LIST_PFAULT 8
+#define FIRQ_LIST_VIRTIO 9
+#define FIRQ_LIST_COUNT 10
+#define FIRQ_CNTR_IO 0
+#define FIRQ_CNTR_SERVICE 1
+#define FIRQ_CNTR_VIRTIO 2
+#define FIRQ_CNTR_PFAULT 3
+#define FIRQ_MAX_COUNT 4
+
struct kvm_s390_float_interrupt {
+ unsigned long pending_irqs;
spinlock_t lock;
- struct list_head list;
- atomic_t active;
+ struct list_head lists[FIRQ_LIST_COUNT];
+ int counters[FIRQ_MAX_COUNT];
+ struct kvm_s390_mchk_info mchk;
+ struct kvm_s390_ext_info srv_signal;
int next_rr_cpu;
unsigned long idle_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)];
- unsigned int irq_count;
};
struct kvm_hw_wp_info_arch {
@@ -465,6 +499,7 @@ struct kvm_vcpu_arch {
s390_fp_regs host_fpregs;
unsigned int host_acrs[NUM_ACRS];
s390_fp_regs guest_fpregs;
+ struct kvm_s390_vregs *host_vregs;
struct kvm_s390_local_interrupt local_int;
struct hrtimer ckc_timer;
struct kvm_s390_pgm_info pgm;
@@ -553,6 +588,7 @@ struct kvm_arch{
int use_cmma;
int user_cpu_state_ctrl;
int user_sigp;
+ int user_stsi;
struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS];
wait_queue_head_t ipte_wq;
int ipte_lock_count;
diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h
index 9c77e60..ef1a5fc 100644
--- a/arch/s390/include/uapi/asm/kvm.h
+++ b/arch/s390/include/uapi/asm/kvm.h
@@ -150,6 +150,7 @@ struct kvm_guest_debug_arch {
#define KVM_SYNC_CRS (1UL << 3)
#define KVM_SYNC_ARCH0 (1UL << 4)
#define KVM_SYNC_PFAULT (1UL << 5)
+#define KVM_SYNC_VRS (1UL << 6)
/* definition of registers in kvm_run */
struct kvm_sync_regs {
__u64 prefix; /* prefix register */
@@ -164,6 +165,9 @@ struct kvm_sync_regs {
__u64 pft; /* pfault token [PFAULT] */
__u64 pfs; /* pfault select [PFAULT] */
__u64 pfc; /* pfault compare [PFAULT] */
+ __u64 vrs[32][2]; /* vector registers */
+ __u8 reserved[512]; /* for future vector expansion */
+ __u32 fpc; /* only valid with vector registers */
};
#define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1)
diff --git a/arch/s390/include/uapi/asm/sie.h b/arch/s390/include/uapi/asm/sie.h
index d4096fd..ee69c08 100644
--- a/arch/s390/include/uapi/asm/sie.h
+++ b/arch/s390/include/uapi/asm/sie.h
@@ -230,7 +230,7 @@
* and returns a key, which can be used to find a mnemonic name
* of the instruction in the icpt_insn_codes table.
*/
-#define icpt_insn_decoder(insn) \
+#define icpt_insn_decoder(insn) ( \
INSN_DECODE_IPA0(0x01, insn, 48, 0xff) \
INSN_DECODE_IPA0(0xaa, insn, 48, 0x0f) \
INSN_DECODE_IPA0(0xb2, insn, 48, 0xff) \
@@ -239,6 +239,6 @@
INSN_DECODE_IPA0(0xe5, insn, 48, 0xff) \
INSN_DECODE_IPA0(0xeb, insn, 16, 0xff) \
INSN_DECODE_IPA0(0xc8, insn, 48, 0x0f) \
- INSN_DECODE(insn)
+ INSN_DECODE(insn))
#endif /* _UAPI_ASM_S390_SIE_H */
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index e07e916..8dc4db1 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -171,6 +171,7 @@ int main(void)
#else /* CONFIG_32BIT */
DEFINE(__LC_DATA_EXC_CODE, offsetof(struct _lowcore, data_exc_code));
DEFINE(__LC_MCCK_FAIL_STOR_ADDR, offsetof(struct _lowcore, failing_storage_address));
+ DEFINE(__LC_VX_SAVE_AREA_ADDR, offsetof(struct _lowcore, vector_save_area_addr));
DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2));
DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, floating_pt_save_area));
DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste));
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 9254aff..fc7ec95 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -77,7 +77,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
if (vcpu->run->s.regs.gprs[rx] & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], &parm, sizeof(parm));
+ rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258)
@@ -213,7 +213,7 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
* - gpr 3 contains the virtqueue index (passed as datamatch)
* - gpr 4 contains the index on the bus (optionally)
*/
- ret = kvm_io_bus_write_cookie(vcpu->kvm, KVM_VIRTIO_CCW_NOTIFY_BUS,
+ ret = kvm_io_bus_write_cookie(vcpu, KVM_VIRTIO_CCW_NOTIFY_BUS,
vcpu->run->s.regs.gprs[2] & 0xffffffff,
8, &vcpu->run->s.regs.gprs[3],
vcpu->run->s.regs.gprs[4]);
@@ -230,7 +230,7 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
{
- int code = kvm_s390_get_base_disp_rs(vcpu) & 0xffff;
+ int code = kvm_s390_get_base_disp_rs(vcpu, NULL) & 0xffff;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 267523c..a7559f7 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -10,6 +10,7 @@
#include <asm/pgtable.h>
#include "kvm-s390.h"
#include "gaccess.h"
+#include <asm/switch_to.h>
union asce {
unsigned long val;
@@ -207,6 +208,54 @@ union raddress {
unsigned long pfra : 52; /* Page-Frame Real Address */
};
+union alet {
+ u32 val;
+ struct {
+ u32 reserved : 7;
+ u32 p : 1;
+ u32 alesn : 8;
+ u32 alen : 16;
+ };
+};
+
+union ald {
+ u32 val;
+ struct {
+ u32 : 1;
+ u32 alo : 24;
+ u32 all : 7;
+ };
+};
+
+struct ale {
+ unsigned long i : 1; /* ALEN-Invalid Bit */
+ unsigned long : 5;
+ unsigned long fo : 1; /* Fetch-Only Bit */
+ unsigned long p : 1; /* Private Bit */
+ unsigned long alesn : 8; /* Access-List-Entry Sequence Number */
+ unsigned long aleax : 16; /* Access-List-Entry Authorization Index */
+ unsigned long : 32;
+ unsigned long : 1;
+ unsigned long asteo : 25; /* ASN-Second-Table-Entry Origin */
+ unsigned long : 6;
+ unsigned long astesn : 32; /* ASTE Sequence Number */
+} __packed;
+
+struct aste {
+ unsigned long i : 1; /* ASX-Invalid Bit */
+ unsigned long ato : 29; /* Authority-Table Origin */
+ unsigned long : 1;
+ unsigned long b : 1; /* Base-Space Bit */
+ unsigned long ax : 16; /* Authorization Index */
+ unsigned long atl : 12; /* Authority-Table Length */
+ unsigned long : 2;
+ unsigned long ca : 1; /* Controlled-ASN Bit */
+ unsigned long ra : 1; /* Reusable-ASN Bit */
+ unsigned long asce : 64; /* Address-Space-Control Element */
+ unsigned long ald : 32;
+ unsigned long astesn : 32;
+ /* .. more fields there */
+} __packed;
int ipte_lock_held(struct kvm_vcpu *vcpu)
{
@@ -307,15 +356,157 @@ void ipte_unlock(struct kvm_vcpu *vcpu)
ipte_unlock_simple(vcpu);
}
-static unsigned long get_vcpu_asce(struct kvm_vcpu *vcpu)
+static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar,
+ int write)
+{
+ union alet alet;
+ struct ale ale;
+ struct aste aste;
+ unsigned long ald_addr, authority_table_addr;
+ union ald ald;
+ int eax, rc;
+ u8 authority_table;
+
+ if (ar >= NUM_ACRS)
+ return -EINVAL;
+
+ save_access_regs(vcpu->run->s.regs.acrs);
+ alet.val = vcpu->run->s.regs.acrs[ar];
+
+ if (ar == 0 || alet.val == 0) {
+ asce->val = vcpu->arch.sie_block->gcr[1];
+ return 0;
+ } else if (alet.val == 1) {
+ asce->val = vcpu->arch.sie_block->gcr[7];
+ return 0;
+ }
+
+ if (alet.reserved)
+ return PGM_ALET_SPECIFICATION;
+
+ if (alet.p)
+ ald_addr = vcpu->arch.sie_block->gcr[5];
+ else
+ ald_addr = vcpu->arch.sie_block->gcr[2];
+ ald_addr &= 0x7fffffc0;
+
+ rc = read_guest_real(vcpu, ald_addr + 16, &ald.val, sizeof(union ald));
+ if (rc)
+ return rc;
+
+ if (alet.alen / 8 > ald.all)
+ return PGM_ALEN_TRANSLATION;
+
+ if (0x7fffffff - ald.alo * 128 < alet.alen * 16)
+ return PGM_ADDRESSING;
+
+ rc = read_guest_real(vcpu, ald.alo * 128 + alet.alen * 16, &ale,
+ sizeof(struct ale));
+ if (rc)
+ return rc;
+
+ if (ale.i == 1)
+ return PGM_ALEN_TRANSLATION;
+ if (ale.alesn != alet.alesn)
+ return PGM_ALE_SEQUENCE;
+
+ rc = read_guest_real(vcpu, ale.asteo * 64, &aste, sizeof(struct aste));
+ if (rc)
+ return rc;
+
+ if (aste.i)
+ return PGM_ASTE_VALIDITY;
+ if (aste.astesn != ale.astesn)
+ return PGM_ASTE_SEQUENCE;
+
+ if (ale.p == 1) {
+ eax = (vcpu->arch.sie_block->gcr[8] >> 16) & 0xffff;
+ if (ale.aleax != eax) {
+ if (eax / 16 > aste.atl)
+ return PGM_EXTENDED_AUTHORITY;
+
+ authority_table_addr = aste.ato * 4 + eax / 4;
+
+ rc = read_guest_real(vcpu, authority_table_addr,
+ &authority_table,
+ sizeof(u8));
+ if (rc)
+ return rc;
+
+ if ((authority_table & (0x40 >> ((eax & 3) * 2))) == 0)
+ return PGM_EXTENDED_AUTHORITY;
+ }
+ }
+
+ if (ale.fo == 1 && write)
+ return PGM_PROTECTION;
+
+ asce->val = aste.asce;
+ return 0;
+}
+
+struct trans_exc_code_bits {
+ unsigned long addr : 52; /* Translation-exception Address */
+ unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */
+ unsigned long : 6;
+ unsigned long b60 : 1;
+ unsigned long b61 : 1;
+ unsigned long as : 2; /* ASCE Identifier */
+};
+
+enum {
+ FSI_UNKNOWN = 0, /* Unknown wether fetch or store */
+ FSI_STORE = 1, /* Exception was due to store operation */
+ FSI_FETCH = 2 /* Exception was due to fetch operation */
+};
+
+static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce,
+ ar_t ar, int write)
{
+ int rc;
+ psw_t *psw = &vcpu->arch.sie_block->gpsw;
+ struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
+ struct trans_exc_code_bits *tec_bits;
+
+ memset(pgm, 0, sizeof(*pgm));
+ tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
+ tec_bits->fsi = write ? FSI_STORE : FSI_FETCH;
+ tec_bits->as = psw_bits(*psw).as;
+
+ if (!psw_bits(*psw).t) {
+ asce->val = 0;
+ asce->r = 1;
+ return 0;
+ }
+
switch (psw_bits(vcpu->arch.sie_block->gpsw).as) {
case PSW_AS_PRIMARY:
- return vcpu->arch.sie_block->gcr[1];
+ asce->val = vcpu->arch.sie_block->gcr[1];
+ return 0;
case PSW_AS_SECONDARY:
- return vcpu->arch.sie_block->gcr[7];
+ asce->val = vcpu->arch.sie_block->gcr[7];
+ return 0;
case PSW_AS_HOME:
- return vcpu->arch.sie_block->gcr[13];
+ asce->val = vcpu->arch.sie_block->gcr[13];
+ return 0;
+ case PSW_AS_ACCREG:
+ rc = ar_translation(vcpu, asce, ar, write);
+ switch (rc) {
+ case PGM_ALEN_TRANSLATION:
+ case PGM_ALE_SEQUENCE:
+ case PGM_ASTE_VALIDITY:
+ case PGM_ASTE_SEQUENCE:
+ case PGM_EXTENDED_AUTHORITY:
+ vcpu->arch.pgm.exc_access_id = ar;
+ break;
+ case PGM_PROTECTION:
+ tec_bits->b60 = 1;
+ tec_bits->b61 = 1;
+ break;
+ }
+ if (rc > 0)
+ pgm->code = rc;
+ return rc;
}
return 0;
}
@@ -330,10 +521,11 @@ static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
* @vcpu: virtual cpu
* @gva: guest virtual address
* @gpa: points to where guest physical (absolute) address should be stored
+ * @asce: effective asce
* @write: indicates if access is a write access
*
* Translate a guest virtual address into a guest absolute address by means
- * of dynamic address translation as specified by the architecuture.
+ * of dynamic address translation as specified by the architecture.
* If the resulting absolute address is not available in the configuration
* an addressing exception is indicated and @gpa will not be changed.
*
@@ -345,7 +537,8 @@ static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
* by the architecture
*/
static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
- unsigned long *gpa, int write)
+ unsigned long *gpa, const union asce asce,
+ int write)
{
union vaddress vaddr = {.addr = gva};
union raddress raddr = {.addr = gva};
@@ -354,12 +547,10 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
union ctlreg0 ctlreg0;
unsigned long ptr;
int edat1, edat2;
- union asce asce;
ctlreg0.val = vcpu->arch.sie_block->gcr[0];
edat1 = ctlreg0.edat && test_kvm_facility(vcpu->kvm, 8);
edat2 = edat1 && test_kvm_facility(vcpu->kvm, 78);
- asce.val = get_vcpu_asce(vcpu);
if (asce.r)
goto real_address;
ptr = asce.origin * 4096;
@@ -506,48 +697,30 @@ static inline int is_low_address(unsigned long ga)
return (ga & ~0x11fful) == 0;
}
-static int low_address_protection_enabled(struct kvm_vcpu *vcpu)
+static int low_address_protection_enabled(struct kvm_vcpu *vcpu,
+ const union asce asce)
{
union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
psw_t *psw = &vcpu->arch.sie_block->gpsw;
- union asce asce;
if (!ctlreg0.lap)
return 0;
- asce.val = get_vcpu_asce(vcpu);
if (psw_bits(*psw).t && asce.p)
return 0;
return 1;
}
-struct trans_exc_code_bits {
- unsigned long addr : 52; /* Translation-exception Address */
- unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */
- unsigned long : 7;
- unsigned long b61 : 1;
- unsigned long as : 2; /* ASCE Identifier */
-};
-
-enum {
- FSI_UNKNOWN = 0, /* Unknown wether fetch or store */
- FSI_STORE = 1, /* Exception was due to store operation */
- FSI_FETCH = 2 /* Exception was due to fetch operation */
-};
-
static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga,
unsigned long *pages, unsigned long nr_pages,
- int write)
+ const union asce asce, int write)
{
struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
psw_t *psw = &vcpu->arch.sie_block->gpsw;
struct trans_exc_code_bits *tec_bits;
int lap_enabled, rc;
- memset(pgm, 0, sizeof(*pgm));
tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
- tec_bits->fsi = write ? FSI_STORE : FSI_FETCH;
- tec_bits->as = psw_bits(*psw).as;
- lap_enabled = low_address_protection_enabled(vcpu);
+ lap_enabled = low_address_protection_enabled(vcpu, asce);
while (nr_pages) {
ga = kvm_s390_logical_to_effective(vcpu, ga);
tec_bits->addr = ga >> PAGE_SHIFT;
@@ -557,7 +730,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga,
}
ga &= PAGE_MASK;
if (psw_bits(*psw).t) {
- rc = guest_translate(vcpu, ga, pages, write);
+ rc = guest_translate(vcpu, ga, pages, asce, write);
if (rc < 0)
return rc;
if (rc == PGM_PROTECTION)
@@ -578,7 +751,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga,
return 0;
}
-int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
+int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
unsigned long len, int write)
{
psw_t *psw = &vcpu->arch.sie_block->gpsw;
@@ -591,20 +764,19 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
if (!len)
return 0;
- /* Access register mode is not supported yet. */
- if (psw_bits(*psw).t && psw_bits(*psw).as == PSW_AS_ACCREG)
- return -EOPNOTSUPP;
+ rc = get_vcpu_asce(vcpu, &asce, ar, write);
+ if (rc)
+ return rc;
nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1;
pages = pages_array;
if (nr_pages > ARRAY_SIZE(pages_array))
pages = vmalloc(nr_pages * sizeof(unsigned long));
if (!pages)
return -ENOMEM;
- asce.val = get_vcpu_asce(vcpu);
need_ipte_lock = psw_bits(*psw).t && !asce.r;
if (need_ipte_lock)
ipte_lock(vcpu);
- rc = guest_page_range(vcpu, ga, pages, nr_pages, write);
+ rc = guest_page_range(vcpu, ga, pages, nr_pages, asce, write);
for (idx = 0; idx < nr_pages && !rc; idx++) {
gpa = *(pages + idx) + (ga & ~PAGE_MASK);
_len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
@@ -652,7 +824,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
* Note: The IPTE lock is not taken during this function, so the caller
* has to take care of this.
*/
-int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
+int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
unsigned long *gpa, int write)
{
struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
@@ -661,26 +833,21 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
union asce asce;
int rc;
- /* Access register mode is not supported yet. */
- if (psw_bits(*psw).t && psw_bits(*psw).as == PSW_AS_ACCREG)
- return -EOPNOTSUPP;
-
gva = kvm_s390_logical_to_effective(vcpu, gva);
- memset(pgm, 0, sizeof(*pgm));
tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
- tec->as = psw_bits(*psw).as;
- tec->fsi = write ? FSI_STORE : FSI_FETCH;
+ rc = get_vcpu_asce(vcpu, &asce, ar, write);
tec->addr = gva >> PAGE_SHIFT;
- if (is_low_address(gva) && low_address_protection_enabled(vcpu)) {
+ if (rc)
+ return rc;
+ if (is_low_address(gva) && low_address_protection_enabled(vcpu, asce)) {
if (write) {
rc = pgm->code = PGM_PROTECTION;
return rc;
}
}
- asce.val = get_vcpu_asce(vcpu);
if (psw_bits(*psw).t && !asce.r) { /* Use DAT? */
- rc = guest_translate(vcpu, gva, gpa, write);
+ rc = guest_translate(vcpu, gva, gpa, asce, write);
if (rc > 0) {
if (rc == PGM_PROTECTION)
tec->b61 = 1;
@@ -697,28 +864,51 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
}
/**
- * kvm_s390_check_low_addr_protection - check for low-address protection
- * @ga: Guest address
+ * check_gva_range - test a range of guest virtual addresses for accessibility
+ */
+int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
+ unsigned long length, int is_write)
+{
+ unsigned long gpa;
+ unsigned long currlen;
+ int rc = 0;
+
+ ipte_lock(vcpu);
+ while (length > 0 && !rc) {
+ currlen = min(length, PAGE_SIZE - (gva % PAGE_SIZE));
+ rc = guest_translate_address(vcpu, gva, ar, &gpa, is_write);
+ gva += currlen;
+ length -= currlen;
+ }
+ ipte_unlock(vcpu);
+
+ return rc;
+}
+
+/**
+ * kvm_s390_check_low_addr_prot_real - check for low-address protection
+ * @gra: Guest real address
*
* Checks whether an address is subject to low-address protection and set
* up vcpu->arch.pgm accordingly if necessary.
*
* Return: 0 if no protection exception, or PGM_PROTECTION if protected.
*/
-int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga)
+int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra)
{
struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
psw_t *psw = &vcpu->arch.sie_block->gpsw;
struct trans_exc_code_bits *tec_bits;
+ union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
- if (!is_low_address(ga) || !low_address_protection_enabled(vcpu))
+ if (!ctlreg0.lap || !is_low_address(gra))
return 0;
memset(pgm, 0, sizeof(*pgm));
tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
tec_bits->fsi = FSI_STORE;
tec_bits->as = psw_bits(*psw).as;
- tec_bits->addr = ga >> PAGE_SHIFT;
+ tec_bits->addr = gra >> PAGE_SHIFT;
pgm->code = PGM_PROTECTION;
return pgm->code;
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index 0149cf1..ef03726 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -156,9 +156,11 @@ int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
}
int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
- unsigned long *gpa, int write);
+ ar_t ar, unsigned long *gpa, int write);
+int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
+ unsigned long length, int is_write);
-int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
+int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
unsigned long len, int write);
int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
@@ -168,6 +170,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
* write_guest - copy data from kernel space to guest space
* @vcpu: virtual cpu
* @ga: guest address
+ * @ar: access register
* @data: source address in kernel space
* @len: number of bytes to copy
*
@@ -176,8 +179,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
* If DAT is off data will be copied to guest real or absolute memory.
* If DAT is on data will be copied to the address space as specified by
* the address space bits of the PSW:
- * Primary, secondory or home space (access register mode is currently not
- * implemented).
+ * Primary, secondary, home space or access register mode.
* The addressing mode of the PSW is also inspected, so that address wrap
* around is taken into account for 24-, 31- and 64-bit addressing mode,
* if the to be copied data crosses page boundaries in guest address space.
@@ -210,16 +212,17 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
* if data has been changed in guest space in case of an exception.
*/
static inline __must_check
-int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
+int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
unsigned long len)
{
- return access_guest(vcpu, ga, data, len, 1);
+ return access_guest(vcpu, ga, ar, data, len, 1);
}
/**
* read_guest - copy data from guest space to kernel space
* @vcpu: virtual cpu
* @ga: guest address
+ * @ar: access register
* @data: destination address in kernel space
* @len: number of bytes to copy
*
@@ -229,10 +232,10 @@ int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
* data will be copied from guest space to kernel space.
*/
static inline __must_check
-int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
+int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
unsigned long len)
{
- return access_guest(vcpu, ga, data, len, 0);
+ return access_guest(vcpu, ga, ar, data, len, 0);
}
/**
@@ -330,6 +333,6 @@ int read_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
void ipte_lock(struct kvm_vcpu *vcpu);
void ipte_unlock(struct kvm_vcpu *vcpu);
int ipte_lock_held(struct kvm_vcpu *vcpu);
-int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga);
+int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra);
#endif /* __KVM_S390_GACCESS_H */
diff --git a/arch/s390/kvm/guestdbg.c b/arch/s390/kvm/guestdbg.c
index 3e8d409..e97b345 100644
--- a/arch/s390/kvm/guestdbg.c
+++ b/arch/s390/kvm/guestdbg.c
@@ -191,8 +191,8 @@ static int __import_wp_info(struct kvm_vcpu *vcpu,
if (!wp_info->old_data)
return -ENOMEM;
/* try to backup the original value */
- ret = read_guest(vcpu, wp_info->phys_addr, wp_info->old_data,
- wp_info->len);
+ ret = read_guest_abs(vcpu, wp_info->phys_addr, wp_info->old_data,
+ wp_info->len);
if (ret) {
kfree(wp_info->old_data);
wp_info->old_data = NULL;
@@ -362,8 +362,8 @@ static struct kvm_hw_wp_info_arch *any_wp_changed(struct kvm_vcpu *vcpu)
continue;
/* refetch the wp data and compare it to the old value */
- if (!read_guest(vcpu, wp_info->phys_addr, temp,
- wp_info->len)) {
+ if (!read_guest_abs(vcpu, wp_info->phys_addr, temp,
+ wp_info->len)) {
if (memcmp(temp, wp_info->old_data, wp_info->len)) {
kfree(temp);
return wp_info;
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index bebd215..9e3779e 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -165,6 +165,7 @@ static void __extract_prog_irq(struct kvm_vcpu *vcpu,
pgm_info->mon_class_nr = vcpu->arch.sie_block->mcn;
pgm_info->mon_code = vcpu->arch.sie_block->tecmc;
break;
+ case PGM_VECTOR_PROCESSING:
case PGM_DATA:
pgm_info->data_exc_code = vcpu->arch.sie_block->dxc;
break;
@@ -319,7 +320,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
/* Make sure that the source is paged-in */
rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg2],
- &srcaddr, 0);
+ reg2, &srcaddr, 0);
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0);
@@ -328,7 +329,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
/* Make sure that the destination is paged-in */
rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg1],
- &dstaddr, 1);
+ reg1, &dstaddr, 1);
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1);
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 073b5f3..9de4726 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -1,7 +1,7 @@
/*
* handling kvm guest interrupts
*
- * Copyright IBM Corp. 2008,2014
+ * Copyright IBM Corp. 2008, 2015
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
@@ -17,9 +17,12 @@
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/bitmap.h>
+#include <linux/vmalloc.h>
#include <asm/asm-offsets.h>
+#include <asm/dis.h>
#include <asm/uaccess.h>
#include <asm/sclp.h>
+#include <asm/isc.h>
#include "kvm-s390.h"
#include "gaccess.h"
#include "trace-s390.h"
@@ -32,11 +35,6 @@
#define PFAULT_DONE 0x0680
#define VIRTIO_PARAM 0x0d00
-static int is_ioint(u64 type)
-{
- return ((type & 0xfffe0000u) != 0xfffe0000u);
-}
-
int psw_extint_disabled(struct kvm_vcpu *vcpu)
{
return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
@@ -72,70 +70,45 @@ static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
return 1;
}
-static u64 int_word_to_isc_bits(u32 int_word)
+static int ckc_irq_pending(struct kvm_vcpu *vcpu)
+{
+ if (!(vcpu->arch.sie_block->ckc <
+ get_tod_clock_fast() + vcpu->arch.sie_block->epoch))
+ return 0;
+ return ckc_interrupts_enabled(vcpu);
+}
+
+static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu)
+{
+ return !psw_extint_disabled(vcpu) &&
+ (vcpu->arch.sie_block->gcr[0] & 0x400ul);
+}
+
+static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu)
+{
+ return (vcpu->arch.sie_block->cputm >> 63) &&
+ cpu_timer_interrupts_enabled(vcpu);
+}
+
+static inline int is_ioirq(unsigned long irq_type)
{
- u8 isc = (int_word & 0x38000000) >> 27;
+ return ((irq_type >= IRQ_PEND_IO_ISC_0) &&
+ (irq_type <= IRQ_PEND_IO_ISC_7));
+}
+static uint64_t isc_to_isc_bits(int isc)
+{
return (0x80 >> isc) << 24;
}
-static int __must_check __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
- struct kvm_s390_interrupt_info *inti)
+static inline u8 int_word_to_isc(u32 int_word)
{
- switch (inti->type) {
- case KVM_S390_INT_EXTERNAL_CALL:
- if (psw_extint_disabled(vcpu))
- return 0;
- if (vcpu->arch.sie_block->gcr[0] & 0x2000ul)
- return 1;
- return 0;
- case KVM_S390_INT_EMERGENCY:
- if (psw_extint_disabled(vcpu))
- return 0;
- if (vcpu->arch.sie_block->gcr[0] & 0x4000ul)
- return 1;
- return 0;
- case KVM_S390_INT_CLOCK_COMP:
- return ckc_interrupts_enabled(vcpu);
- case KVM_S390_INT_CPU_TIMER:
- if (psw_extint_disabled(vcpu))
- return 0;
- if (vcpu->arch.sie_block->gcr[0] & 0x400ul)
- return 1;
- return 0;
- case KVM_S390_INT_SERVICE:
- case KVM_S390_INT_PFAULT_INIT:
- case KVM_S390_INT_PFAULT_DONE:
- case KVM_S390_INT_VIRTIO:
- if (psw_extint_disabled(vcpu))
- return 0;
- if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
- return 1;
- return 0;
- case KVM_S390_PROGRAM_INT:
- case KVM_S390_SIGP_STOP:
- case KVM_S390_SIGP_SET_PREFIX:
- case KVM_S390_RESTART:
- return 1;
- case KVM_S390_MCHK:
- if (psw_mchk_disabled(vcpu))
- return 0;
- if (vcpu->arch.sie_block->gcr[14] & inti->mchk.cr14)
- return 1;
- return 0;
- case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
- if (psw_ioint_disabled(vcpu))
- return 0;
- if (vcpu->arch.sie_block->gcr[6] &
- int_word_to_isc_bits(inti->io.io_int_word))
- return 1;
- return 0;
- default:
- printk(KERN_WARNING "illegal interrupt type %llx\n",
- inti->type);
- BUG();
- }
- return 0;
+ return (int_word & 0x38000000) >> 27;
+}
+
+static inline unsigned long pending_floating_irqs(struct kvm_vcpu *vcpu)
+{
+ return vcpu->kvm->arch.float_int.pending_irqs;
}
static inline unsigned long pending_local_irqs(struct kvm_vcpu *vcpu)
@@ -143,12 +116,31 @@ static inline unsigned long pending_local_irqs(struct kvm_vcpu *vcpu)
return vcpu->arch.local_int.pending_irqs;
}
-static unsigned long deliverable_local_irqs(struct kvm_vcpu *vcpu)
+static unsigned long disable_iscs(struct kvm_vcpu *vcpu,
+ unsigned long active_mask)
+{
+ int i;
+
+ for (i = 0; i <= MAX_ISC; i++)
+ if (!(vcpu->arch.sie_block->gcr[6] & isc_to_isc_bits(i)))
+ active_mask &= ~(1UL << (IRQ_PEND_IO_ISC_0 + i));
+
+ return active_mask;
+}
+
+static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
{
- unsigned long active_mask = pending_local_irqs(vcpu);
+ unsigned long active_mask;
+
+ active_mask = pending_local_irqs(vcpu);
+ active_mask |= pending_floating_irqs(vcpu);
if (psw_extint_disabled(vcpu))
active_mask &= ~IRQ_PEND_EXT_MASK;
+ if (psw_ioint_disabled(vcpu))
+ active_mask &= ~IRQ_PEND_IO_MASK;
+ else
+ active_mask = disable_iscs(vcpu, active_mask);
if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul))
__clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask);
if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul))
@@ -157,8 +149,13 @@ static unsigned long deliverable_local_irqs(struct kvm_vcpu *vcpu)
__clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask);
if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul))
__clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask);
+ if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
+ __clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask);
if (psw_mchk_disabled(vcpu))
active_mask &= ~IRQ_PEND_MCHK_MASK;
+ if (!(vcpu->arch.sie_block->gcr[14] &
+ vcpu->kvm->arch.float_int.mchk.cr14))
+ __clear_bit(IRQ_PEND_MCHK_REP, &active_mask);
/*
* STOP irqs will never be actively delivered. They are triggered via
@@ -200,6 +197,16 @@ static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags);
}
+static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
+{
+ if (!(pending_floating_irqs(vcpu) & IRQ_PEND_IO_MASK))
+ return;
+ else if (psw_ioint_disabled(vcpu))
+ __set_cpuflag(vcpu, CPUSTAT_IO_INT);
+ else
+ vcpu->arch.sie_block->lctl |= LCTL_CR6;
+}
+
static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
{
if (!(pending_local_irqs(vcpu) & IRQ_PEND_EXT_MASK))
@@ -226,47 +233,17 @@ static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu)
__set_cpuflag(vcpu, CPUSTAT_STOP_INT);
}
-/* Set interception request for non-deliverable local interrupts */
-static void set_intercept_indicators_local(struct kvm_vcpu *vcpu)
+/* Set interception request for non-deliverable interrupts */
+static void set_intercept_indicators(struct kvm_vcpu *vcpu)
{
+ set_intercept_indicators_io(vcpu);
set_intercept_indicators_ext(vcpu);
set_intercept_indicators_mchk(vcpu);
set_intercept_indicators_stop(vcpu);
}
-static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
- struct kvm_s390_interrupt_info *inti)
-{
- switch (inti->type) {
- case KVM_S390_INT_SERVICE:
- case KVM_S390_INT_PFAULT_DONE:
- case KVM_S390_INT_VIRTIO:
- if (psw_extint_disabled(vcpu))
- __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
- else
- vcpu->arch.sie_block->lctl |= LCTL_CR0;
- break;
- case KVM_S390_MCHK:
- if (psw_mchk_disabled(vcpu))
- vcpu->arch.sie_block->ictl |= ICTL_LPSW;
- else
- vcpu->arch.sie_block->lctl |= LCTL_CR14;
- break;
- case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
- if (psw_ioint_disabled(vcpu))
- __set_cpuflag(vcpu, CPUSTAT_IO_INT);
- else
- vcpu->arch.sie_block->lctl |= LCTL_CR6;
- break;
- default:
- BUG();
- }
-}
-
static u16 get_ilc(struct kvm_vcpu *vcpu)
{
- const unsigned short table[] = { 2, 4, 4, 6 };
-
switch (vcpu->arch.sie_block->icptcode) {
case ICPT_INST:
case ICPT_INSTPROGI:
@@ -274,7 +251,7 @@ static u16 get_ilc(struct kvm_vcpu *vcpu)
case ICPT_PARTEXEC:
case ICPT_IOINST:
/* last instruction only stored for these icptcodes */
- return table[vcpu->arch.sie_block->ipa >> 14];
+ return insn_length(vcpu->arch.sie_block->ipa >> 8);
case ICPT_PROGI:
return vcpu->arch.sie_block->pgmilc;
default:
@@ -350,38 +327,72 @@ static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu)
static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
{
+ struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
- struct kvm_s390_mchk_info mchk;
- int rc;
+ struct kvm_s390_mchk_info mchk = {};
+ unsigned long adtl_status_addr;
+ int deliver = 0;
+ int rc = 0;
+ spin_lock(&fi->lock);
spin_lock(&li->lock);
- mchk = li->irq.mchk;
+ if (test_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs) ||
+ test_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs)) {
+ /*
+ * If there was an exigent machine check pending, then any
+ * repressible machine checks that might have been pending
+ * are indicated along with it, so always clear bits for
+ * repressible and exigent interrupts
+ */
+ mchk = li->irq.mchk;
+ clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
+ clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
+ memset(&li->irq.mchk, 0, sizeof(mchk));
+ deliver = 1;
+ }
/*
- * If there was an exigent machine check pending, then any repressible
- * machine checks that might have been pending are indicated along
- * with it, so always clear both bits
+ * We indicate floating repressible conditions along with
+ * other pending conditions. Channel Report Pending and Channel
+ * Subsystem damage are the only two and and are indicated by
+ * bits in mcic and masked in cr14.
*/
- clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
- clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
- memset(&li->irq.mchk, 0, sizeof(mchk));
+ if (test_and_clear_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
+ mchk.mcic |= fi->mchk.mcic;
+ mchk.cr14 |= fi->mchk.cr14;
+ memset(&fi->mchk, 0, sizeof(mchk));
+ deliver = 1;
+ }
spin_unlock(&li->lock);
+ spin_unlock(&fi->lock);
- VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
- mchk.mcic);
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK,
- mchk.cr14, mchk.mcic);
-
- rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED);
- rc |= put_guest_lc(vcpu, mchk.mcic,
- (u64 __user *) __LC_MCCK_CODE);
- rc |= put_guest_lc(vcpu, mchk.failing_storage_address,
- (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
- rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
- &mchk.fixed_logout, sizeof(mchk.fixed_logout));
- rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ if (deliver) {
+ VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
+ mchk.mcic);
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
+ KVM_S390_MCHK,
+ mchk.cr14, mchk.mcic);
+
+ rc = kvm_s390_vcpu_store_status(vcpu,
+ KVM_S390_STORE_STATUS_PREFIXED);
+ rc |= read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR,
+ &adtl_status_addr,
+ sizeof(unsigned long));
+ rc |= kvm_s390_vcpu_store_adtl_status(vcpu,
+ adtl_status_addr);
+ rc |= put_guest_lc(vcpu, mchk.mcic,
+ (u64 __user *) __LC_MCCK_CODE);
+ rc |= put_guest_lc(vcpu, mchk.failing_storage_address,
+ (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
+ rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
+ &mchk.fixed_logout,
+ sizeof(mchk.fixed_logout));
+ rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw,
+ sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw,
+ sizeof(psw_t));
+ }
return rc ? -EFAULT : 0;
}
@@ -484,7 +495,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
struct kvm_s390_pgm_info pgm_info;
- int rc = 0;
+ int rc = 0, nullifying = false;
u16 ilc = get_ilc(vcpu);
spin_lock(&li->lock);
@@ -509,6 +520,8 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
case PGM_LX_TRANSLATION:
case PGM_PRIMARY_AUTHORITY:
case PGM_SECONDARY_AUTHORITY:
+ nullifying = true;
+ /* fall through */
case PGM_SPACE_SWITCH:
rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
(u64 *)__LC_TRANS_EXC_CODE);
@@ -521,6 +534,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
case PGM_EXTENDED_AUTHORITY:
rc = put_guest_lc(vcpu, pgm_info.exc_access_id,
(u8 *)__LC_EXC_ACCESS_ID);
+ nullifying = true;
break;
case PGM_ASCE_TYPE:
case PGM_PAGE_TRANSLATION:
@@ -534,6 +548,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
(u8 *)__LC_EXC_ACCESS_ID);
rc |= put_guest_lc(vcpu, pgm_info.op_access_id,
(u8 *)__LC_OP_ACCESS_ID);
+ nullifying = true;
break;
case PGM_MONITOR:
rc = put_guest_lc(vcpu, pgm_info.mon_class_nr,
@@ -541,6 +556,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
rc |= put_guest_lc(vcpu, pgm_info.mon_code,
(u64 *)__LC_MON_CODE);
break;
+ case PGM_VECTOR_PROCESSING:
case PGM_DATA:
rc = put_guest_lc(vcpu, pgm_info.data_exc_code,
(u32 *)__LC_DATA_EXC_CODE);
@@ -551,6 +567,15 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
(u8 *)__LC_EXC_ACCESS_ID);
break;
+ case PGM_STACK_FULL:
+ case PGM_STACK_EMPTY:
+ case PGM_STACK_SPECIFICATION:
+ case PGM_STACK_TYPE:
+ case PGM_STACK_OPERATION:
+ case PGM_TRACE_TABEL:
+ case PGM_CRYPTO_OPERATION:
+ nullifying = true;
+ break;
}
if (pgm_info.code & PGM_PER) {
@@ -564,7 +589,12 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
(u8 *) __LC_PER_ACCESS_ID);
}
+ if (nullifying && vcpu->arch.sie_block->icptcode == ICPT_INST)
+ kvm_s390_rewind_psw(vcpu, ilc);
+
rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC);
+ rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea,
+ (u64 *) __LC_LAST_BREAK);
rc |= put_guest_lc(vcpu, pgm_info.code,
(u16 *)__LC_PGM_INT_CODE);
rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
@@ -574,16 +604,27 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
return rc ? -EFAULT : 0;
}
-static int __must_check __deliver_service(struct kvm_vcpu *vcpu,
- struct kvm_s390_interrupt_info *inti)
+static int __must_check __deliver_service(struct kvm_vcpu *vcpu)
{
- int rc;
+ struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
+ struct kvm_s390_ext_info ext;
+ int rc = 0;
+
+ spin_lock(&fi->lock);
+ if (!(test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs))) {
+ spin_unlock(&fi->lock);
+ return 0;
+ }
+ ext = fi->srv_signal;
+ memset(&fi->srv_signal, 0, sizeof(ext));
+ clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
+ spin_unlock(&fi->lock);
VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
- inti->ext.ext_params);
+ ext.ext_params);
vcpu->stat.deliver_service_signal++;
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
- inti->ext.ext_params, 0);
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE,
+ ext.ext_params, 0);
rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE);
rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
@@ -591,106 +632,146 @@ static int __must_check __deliver_service(struct kvm_vcpu *vcpu,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= put_guest_lc(vcpu, inti->ext.ext_params,
+ rc |= put_guest_lc(vcpu, ext.ext_params,
(u32 *)__LC_EXT_PARAMS);
+
return rc ? -EFAULT : 0;
}
-static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu,
- struct kvm_s390_interrupt_info *inti)
+static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu)
{
- int rc;
+ struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
+ struct kvm_s390_interrupt_info *inti;
+ int rc = 0;
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
- KVM_S390_INT_PFAULT_DONE, 0,
- inti->ext.ext_params2);
+ spin_lock(&fi->lock);
+ inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_PFAULT],
+ struct kvm_s390_interrupt_info,
+ list);
+ if (inti) {
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
+ KVM_S390_INT_PFAULT_DONE, 0,
+ inti->ext.ext_params2);
+ list_del(&inti->list);
+ fi->counters[FIRQ_CNTR_PFAULT] -= 1;
+ }
+ if (list_empty(&fi->lists[FIRQ_LIST_PFAULT]))
+ clear_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
+ spin_unlock(&fi->lock);
- rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE);
- rc |= put_guest_lc(vcpu, PFAULT_DONE, (u16 *)__LC_EXT_CPU_ADDR);
- rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
- (u64 *)__LC_EXT_PARAMS2);
+ if (inti) {
+ rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
+ (u16 *)__LC_EXT_INT_CODE);
+ rc |= put_guest_lc(vcpu, PFAULT_DONE,
+ (u16 *)__LC_EXT_CPU_ADDR);
+ rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw,
+ sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw,
+ sizeof(psw_t));
+ rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
+ (u64 *)__LC_EXT_PARAMS2);
+ kfree(inti);
+ }
return rc ? -EFAULT : 0;
}
-static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu,
- struct kvm_s390_interrupt_info *inti)
+static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu)
{
- int rc;
+ struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
+ struct kvm_s390_interrupt_info *inti;
+ int rc = 0;
- VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
- inti->ext.ext_params, inti->ext.ext_params2);
- vcpu->stat.deliver_virtio_interrupt++;
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
- inti->ext.ext_params,
- inti->ext.ext_params2);
+ spin_lock(&fi->lock);
+ inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_VIRTIO],
+ struct kvm_s390_interrupt_info,
+ list);
+ if (inti) {
+ VCPU_EVENT(vcpu, 4,
+ "interrupt: virtio parm:%x,parm64:%llx",
+ inti->ext.ext_params, inti->ext.ext_params2);
+ vcpu->stat.deliver_virtio_interrupt++;
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
+ inti->type,
+ inti->ext.ext_params,
+ inti->ext.ext_params2);
+ list_del(&inti->list);
+ fi->counters[FIRQ_CNTR_VIRTIO] -= 1;
+ }
+ if (list_empty(&fi->lists[FIRQ_LIST_VIRTIO]))
+ clear_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
+ spin_unlock(&fi->lock);
- rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE);
- rc |= put_guest_lc(vcpu, VIRTIO_PARAM, (u16 *)__LC_EXT_CPU_ADDR);
- rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= put_guest_lc(vcpu, inti->ext.ext_params,
- (u32 *)__LC_EXT_PARAMS);
- rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
- (u64 *)__LC_EXT_PARAMS2);
+ if (inti) {
+ rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
+ (u16 *)__LC_EXT_INT_CODE);
+ rc |= put_guest_lc(vcpu, VIRTIO_PARAM,
+ (u16 *)__LC_EXT_CPU_ADDR);
+ rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw,
+ sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw,
+ sizeof(psw_t));
+ rc |= put_guest_lc(vcpu, inti->ext.ext_params,
+ (u32 *)__LC_EXT_PARAMS);
+ rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
+ (u64 *)__LC_EXT_PARAMS2);
+ kfree(inti);
+ }
return rc ? -EFAULT : 0;
}
static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
- struct kvm_s390_interrupt_info *inti)
+ unsigned long irq_type)
{
- int rc;
+ struct list_head *isc_list;
+ struct kvm_s390_float_interrupt *fi;
+ struct kvm_s390_interrupt_info *inti = NULL;
+ int rc = 0;
- VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type);
- vcpu->stat.deliver_io_int++;
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
- ((__u32)inti->io.subchannel_id << 16) |
- inti->io.subchannel_nr,
- ((__u64)inti->io.io_int_parm << 32) |
- inti->io.io_int_word);
-
- rc = put_guest_lc(vcpu, inti->io.subchannel_id,
- (u16 *)__LC_SUBCHANNEL_ID);
- rc |= put_guest_lc(vcpu, inti->io.subchannel_nr,
- (u16 *)__LC_SUBCHANNEL_NR);
- rc |= put_guest_lc(vcpu, inti->io.io_int_parm,
- (u32 *)__LC_IO_INT_PARM);
- rc |= put_guest_lc(vcpu, inti->io.io_int_word,
- (u32 *)__LC_IO_INT_WORD);
- rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- return rc ? -EFAULT : 0;
-}
+ fi = &vcpu->kvm->arch.float_int;
-static int __must_check __deliver_mchk_floating(struct kvm_vcpu *vcpu,
- struct kvm_s390_interrupt_info *inti)
-{
- struct kvm_s390_mchk_info *mchk = &inti->mchk;
- int rc;
+ spin_lock(&fi->lock);
+ isc_list = &fi->lists[irq_type - IRQ_PEND_IO_ISC_0];
+ inti = list_first_entry_or_null(isc_list,
+ struct kvm_s390_interrupt_info,
+ list);
+ if (inti) {
+ VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type);
+ vcpu->stat.deliver_io_int++;
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
+ inti->type,
+ ((__u32)inti->io.subchannel_id << 16) |
+ inti->io.subchannel_nr,
+ ((__u64)inti->io.io_int_parm << 32) |
+ inti->io.io_int_word);
+ list_del(&inti->list);
+ fi->counters[FIRQ_CNTR_IO] -= 1;
+ }
+ if (list_empty(isc_list))
+ clear_bit(irq_type, &fi->pending_irqs);
+ spin_unlock(&fi->lock);
+
+ if (inti) {
+ rc = put_guest_lc(vcpu, inti->io.subchannel_id,
+ (u16 *)__LC_SUBCHANNEL_ID);
+ rc |= put_guest_lc(vcpu, inti->io.subchannel_nr,
+ (u16 *)__LC_SUBCHANNEL_NR);
+ rc |= put_guest_lc(vcpu, inti->io.io_int_parm,
+ (u32 *)__LC_IO_INT_PARM);
+ rc |= put_guest_lc(vcpu, inti->io.io_int_word,
+ (u32 *)__LC_IO_INT_WORD);
+ rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw,
+ sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw,
+ sizeof(psw_t));
+ kfree(inti);
+ }
- VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
- mchk->mcic);
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK,
- mchk->cr14, mchk->mcic);
-
- rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED);
- rc |= put_guest_lc(vcpu, mchk->mcic,
- (u64 __user *) __LC_MCCK_CODE);
- rc |= put_guest_lc(vcpu, mchk->failing_storage_address,
- (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
- rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
- &mchk->fixed_logout, sizeof(mchk->fixed_logout));
- rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
return rc ? -EFAULT : 0;
}
@@ -698,6 +779,7 @@ typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu);
static const deliver_irq_t deliver_irq_funcs[] = {
[IRQ_PEND_MCHK_EX] = __deliver_machine_check,
+ [IRQ_PEND_MCHK_REP] = __deliver_machine_check,
[IRQ_PEND_PROG] = __deliver_prog,
[IRQ_PEND_EXT_EMERGENCY] = __deliver_emergency_signal,
[IRQ_PEND_EXT_EXTERNAL] = __deliver_external_call,
@@ -706,36 +788,11 @@ static const deliver_irq_t deliver_irq_funcs[] = {
[IRQ_PEND_RESTART] = __deliver_restart,
[IRQ_PEND_SET_PREFIX] = __deliver_set_prefix,
[IRQ_PEND_PFAULT_INIT] = __deliver_pfault_init,
+ [IRQ_PEND_EXT_SERVICE] = __deliver_service,
+ [IRQ_PEND_PFAULT_DONE] = __deliver_pfault_done,
+ [IRQ_PEND_VIRTIO] = __deliver_virtio,
};
-static int __must_check __deliver_floating_interrupt(struct kvm_vcpu *vcpu,
- struct kvm_s390_interrupt_info *inti)
-{
- int rc;
-
- switch (inti->type) {
- case KVM_S390_INT_SERVICE:
- rc = __deliver_service(vcpu, inti);
- break;
- case KVM_S390_INT_PFAULT_DONE:
- rc = __deliver_pfault_done(vcpu, inti);
- break;
- case KVM_S390_INT_VIRTIO:
- rc = __deliver_virtio(vcpu, inti);
- break;
- case KVM_S390_MCHK:
- rc = __deliver_mchk_floating(vcpu, inti);
- break;
- case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
- rc = __deliver_io(vcpu, inti);
- break;
- default:
- BUG();
- }
-
- return rc;
-}
-
/* Check whether an external call is pending (deliverable or not) */
int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
{
@@ -751,21 +808,9 @@ int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
{
- struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
- struct kvm_s390_interrupt_info *inti;
int rc;
- rc = !!deliverable_local_irqs(vcpu);
-
- if ((!rc) && atomic_read(&fi->active)) {
- spin_lock(&fi->lock);
- list_for_each_entry(inti, &fi->list, list)
- if (__interrupt_is_deliverable(vcpu, inti)) {
- rc = 1;
- break;
- }
- spin_unlock(&fi->lock);
- }
+ rc = !!deliverable_irqs(vcpu);
if (!rc && kvm_cpu_has_pending_timer(vcpu))
rc = 1;
@@ -784,12 +829,7 @@ int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
- if (!(vcpu->arch.sie_block->ckc <
- get_tod_clock_fast() + vcpu->arch.sie_block->epoch))
- return 0;
- if (!ckc_interrupts_enabled(vcpu))
- return 0;
- return 1;
+ return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu);
}
int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
@@ -884,60 +924,45 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
- struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
- struct kvm_s390_interrupt_info *n, *inti = NULL;
deliver_irq_t func;
- int deliver;
int rc = 0;
unsigned long irq_type;
- unsigned long deliverable_irqs;
+ unsigned long irqs;
__reset_intercept_indicators(vcpu);
/* pending ckc conditions might have been invalidated */
clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
- if (kvm_cpu_has_pending_timer(vcpu))
+ if (ckc_irq_pending(vcpu))
set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
+ /* pending cpu timer conditions might have been invalidated */
+ clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
+ if (cpu_timer_irq_pending(vcpu))
+ set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
+
do {
- deliverable_irqs = deliverable_local_irqs(vcpu);
+ irqs = deliverable_irqs(vcpu);
/* bits are in the order of interrupt priority */
- irq_type = find_first_bit(&deliverable_irqs, IRQ_PEND_COUNT);
+ irq_type = find_first_bit(&irqs, IRQ_PEND_COUNT);
if (irq_type == IRQ_PEND_COUNT)
break;
- func = deliver_irq_funcs[irq_type];
- if (!func) {
- WARN_ON_ONCE(func == NULL);
- clear_bit(irq_type, &li->pending_irqs);
- continue;
+ if (is_ioirq(irq_type)) {
+ rc = __deliver_io(vcpu, irq_type);
+ } else {
+ func = deliver_irq_funcs[irq_type];
+ if (!func) {
+ WARN_ON_ONCE(func == NULL);
+ clear_bit(irq_type, &li->pending_irqs);
+ continue;
+ }
+ rc = func(vcpu);
}
- rc = func(vcpu);
- } while (!rc && irq_type != IRQ_PEND_COUNT);
+ if (rc)
+ break;
+ } while (!rc);
- set_intercept_indicators_local(vcpu);
-
- if (!rc && atomic_read(&fi->active)) {
- do {
- deliver = 0;
- spin_lock(&fi->lock);
- list_for_each_entry_safe(inti, n, &fi->list, list) {
- if (__interrupt_is_deliverable(vcpu, inti)) {
- list_del(&inti->list);
- fi->irq_count--;
- deliver = 1;
- break;
- }
- __set_intercept_indicator(vcpu, inti);
- }
- if (list_empty(&fi->list))
- atomic_set(&fi->active, 0);
- spin_unlock(&fi->lock);
- if (deliver) {
- rc = __deliver_floating_interrupt(vcpu, inti);
- kfree(inti);
- }
- } while (!rc && deliver);
- }
+ set_intercept_indicators(vcpu);
return rc;
}
@@ -1172,80 +1197,182 @@ static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
return 0;
}
+static struct kvm_s390_interrupt_info *get_io_int(struct kvm *kvm,
+ int isc, u32 schid)
+{
+ struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
+ struct list_head *isc_list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
+ struct kvm_s390_interrupt_info *iter;
+ u16 id = (schid & 0xffff0000U) >> 16;
+ u16 nr = schid & 0x0000ffffU;
+ spin_lock(&fi->lock);
+ list_for_each_entry(iter, isc_list, list) {
+ if (schid && (id != iter->io.subchannel_id ||
+ nr != iter->io.subchannel_nr))
+ continue;
+ /* found an appropriate entry */
+ list_del_init(&iter->list);
+ fi->counters[FIRQ_CNTR_IO] -= 1;
+ if (list_empty(isc_list))
+ clear_bit(IRQ_PEND_IO_ISC_0 + isc, &fi->pending_irqs);
+ spin_unlock(&fi->lock);
+ return iter;
+ }
+ spin_unlock(&fi->lock);
+ return NULL;
+}
+
+/*
+ * Dequeue and return an I/O interrupt matching any of the interruption
+ * subclasses as designated by the isc mask in cr6 and the schid (if != 0).
+ */
struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
- u64 cr6, u64 schid)
+ u64 isc_mask, u32 schid)
+{
+ struct kvm_s390_interrupt_info *inti = NULL;
+ int isc;
+
+ for (isc = 0; isc <= MAX_ISC && !inti; isc++) {
+ if (isc_mask & isc_to_isc_bits(isc))
+ inti = get_io_int(kvm, isc, schid);
+ }
+ return inti;
+}
+
+#define SCCB_MASK 0xFFFFFFF8
+#define SCCB_EVENT_PENDING 0x3
+
+static int __inject_service(struct kvm *kvm,
+ struct kvm_s390_interrupt_info *inti)
+{
+ struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
+
+ spin_lock(&fi->lock);
+ fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_EVENT_PENDING;
+ /*
+ * Early versions of the QEMU s390 bios will inject several
+ * service interrupts after another without handling a
+ * condition code indicating busy.
+ * We will silently ignore those superfluous sccb values.
+ * A future version of QEMU will take care of serialization
+ * of servc requests
+ */
+ if (fi->srv_signal.ext_params & SCCB_MASK)
+ goto out;
+ fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_MASK;
+ set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
+out:
+ spin_unlock(&fi->lock);
+ kfree(inti);
+ return 0;
+}
+
+static int __inject_virtio(struct kvm *kvm,
+ struct kvm_s390_interrupt_info *inti)
+{
+ struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
+
+ spin_lock(&fi->lock);
+ if (fi->counters[FIRQ_CNTR_VIRTIO] >= KVM_S390_MAX_VIRTIO_IRQS) {
+ spin_unlock(&fi->lock);
+ return -EBUSY;
+ }
+ fi->counters[FIRQ_CNTR_VIRTIO] += 1;
+ list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_VIRTIO]);
+ set_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
+ spin_unlock(&fi->lock);
+ return 0;
+}
+
+static int __inject_pfault_done(struct kvm *kvm,
+ struct kvm_s390_interrupt_info *inti)
+{
+ struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
+
+ spin_lock(&fi->lock);
+ if (fi->counters[FIRQ_CNTR_PFAULT] >=
+ (ASYNC_PF_PER_VCPU * KVM_MAX_VCPUS)) {
+ spin_unlock(&fi->lock);
+ return -EBUSY;
+ }
+ fi->counters[FIRQ_CNTR_PFAULT] += 1;
+ list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_PFAULT]);
+ set_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
+ spin_unlock(&fi->lock);
+ return 0;
+}
+
+#define CR_PENDING_SUBCLASS 28
+static int __inject_float_mchk(struct kvm *kvm,
+ struct kvm_s390_interrupt_info *inti)
+{
+ struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
+
+ spin_lock(&fi->lock);
+ fi->mchk.cr14 |= inti->mchk.cr14 & (1UL << CR_PENDING_SUBCLASS);
+ fi->mchk.mcic |= inti->mchk.mcic;
+ set_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs);
+ spin_unlock(&fi->lock);
+ kfree(inti);
+ return 0;
+}
+
+static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
{
struct kvm_s390_float_interrupt *fi;
- struct kvm_s390_interrupt_info *inti, *iter;
+ struct list_head *list;
+ int isc;
- if ((!schid && !cr6) || (schid && cr6))
- return NULL;
fi = &kvm->arch.float_int;
spin_lock(&fi->lock);
- inti = NULL;
- list_for_each_entry(iter, &fi->list, list) {
- if (!is_ioint(iter->type))
- continue;
- if (cr6 &&
- ((cr6 & int_word_to_isc_bits(iter->io.io_int_word)) == 0))
- continue;
- if (schid) {
- if (((schid & 0x00000000ffff0000) >> 16) !=
- iter->io.subchannel_id)
- continue;
- if ((schid & 0x000000000000ffff) !=
- iter->io.subchannel_nr)
- continue;
- }
- inti = iter;
- break;
- }
- if (inti) {
- list_del_init(&inti->list);
- fi->irq_count--;
+ if (fi->counters[FIRQ_CNTR_IO] >= KVM_S390_MAX_FLOAT_IRQS) {
+ spin_unlock(&fi->lock);
+ return -EBUSY;
}
- if (list_empty(&fi->list))
- atomic_set(&fi->active, 0);
+ fi->counters[FIRQ_CNTR_IO] += 1;
+
+ isc = int_word_to_isc(inti->io.io_int_word);
+ list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
+ list_add_tail(&inti->list, list);
+ set_bit(IRQ_PEND_IO_ISC_0 + isc, &fi->pending_irqs);
spin_unlock(&fi->lock);
- return inti;
+ return 0;
}
static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
{
struct kvm_s390_local_interrupt *li;
struct kvm_s390_float_interrupt *fi;
- struct kvm_s390_interrupt_info *iter;
struct kvm_vcpu *dst_vcpu = NULL;
int sigcpu;
- int rc = 0;
+ u64 type = READ_ONCE(inti->type);
+ int rc;
fi = &kvm->arch.float_int;
- spin_lock(&fi->lock);
- if (fi->irq_count >= KVM_S390_MAX_FLOAT_IRQS) {
+
+ switch (type) {
+ case KVM_S390_MCHK:
+ rc = __inject_float_mchk(kvm, inti);
+ break;
+ case KVM_S390_INT_VIRTIO:
+ rc = __inject_virtio(kvm, inti);
+ break;
+ case KVM_S390_INT_SERVICE:
+ rc = __inject_service(kvm, inti);
+ break;
+ case KVM_S390_INT_PFAULT_DONE:
+ rc = __inject_pfault_done(kvm, inti);
+ break;
+ case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
+ rc = __inject_io(kvm, inti);
+ break;
+ default:
rc = -EINVAL;
- goto unlock_fi;
}
- fi->irq_count++;
- if (!is_ioint(inti->type)) {
- list_add_tail(&inti->list, &fi->list);
- } else {
- u64 isc_bits = int_word_to_isc_bits(inti->io.io_int_word);
+ if (rc)
+ return rc;
- /* Keep I/O interrupts sorted in isc order. */
- list_for_each_entry(iter, &fi->list, list) {
- if (!is_ioint(iter->type))
- continue;
- if (int_word_to_isc_bits(iter->io.io_int_word)
- <= isc_bits)
- continue;
- break;
- }
- list_add_tail(&inti->list, &iter->list);
- }
- atomic_set(&fi->active, 1);
- if (atomic_read(&kvm->online_vcpus) == 0)
- goto unlock_fi;
sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
if (sigcpu == KVM_MAX_VCPUS) {
do {
@@ -1257,7 +1384,7 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
li = &dst_vcpu->arch.local_int;
spin_lock(&li->lock);
- switch (inti->type) {
+ switch (type) {
case KVM_S390_MCHK:
atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
break;
@@ -1270,9 +1397,8 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
}
spin_unlock(&li->lock);
kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu));
-unlock_fi:
- spin_unlock(&fi->lock);
- return rc;
+ return 0;
+
}
int kvm_s390_inject_vm(struct kvm *kvm,
@@ -1332,10 +1458,10 @@ int kvm_s390_inject_vm(struct kvm *kvm,
return rc;
}
-void kvm_s390_reinject_io_int(struct kvm *kvm,
+int kvm_s390_reinject_io_int(struct kvm *kvm,
struct kvm_s390_interrupt_info *inti)
{
- __inject_vm(kvm, inti);
+ return __inject_vm(kvm, inti);
}
int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
@@ -1388,12 +1514,10 @@ void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu)
spin_unlock(&li->lock);
}
-int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
+static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
{
- struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
int rc;
- spin_lock(&li->lock);
switch (irq->type) {
case KVM_S390_PROGRAM_INT:
VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)",
@@ -1433,83 +1557,130 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
default:
rc = -EINVAL;
}
+
+ return rc;
+}
+
+int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ int rc;
+
+ spin_lock(&li->lock);
+ rc = do_inject_vcpu(vcpu, irq);
spin_unlock(&li->lock);
if (!rc)
kvm_s390_vcpu_wakeup(vcpu);
return rc;
}
-void kvm_s390_clear_float_irqs(struct kvm *kvm)
+static inline void clear_irq_list(struct list_head *_list)
{
- struct kvm_s390_float_interrupt *fi;
- struct kvm_s390_interrupt_info *n, *inti = NULL;
+ struct kvm_s390_interrupt_info *inti, *n;
- fi = &kvm->arch.float_int;
- spin_lock(&fi->lock);
- list_for_each_entry_safe(inti, n, &fi->list, list) {
+ list_for_each_entry_safe(inti, n, _list, list) {
list_del(&inti->list);
kfree(inti);
}
- fi->irq_count = 0;
- atomic_set(&fi->active, 0);
- spin_unlock(&fi->lock);
}
-static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti,
- u8 *addr)
+static void inti_to_irq(struct kvm_s390_interrupt_info *inti,
+ struct kvm_s390_irq *irq)
{
- struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
- struct kvm_s390_irq irq = {0};
-
- irq.type = inti->type;
+ irq->type = inti->type;
switch (inti->type) {
case KVM_S390_INT_PFAULT_INIT:
case KVM_S390_INT_PFAULT_DONE:
case KVM_S390_INT_VIRTIO:
- case KVM_S390_INT_SERVICE:
- irq.u.ext = inti->ext;
+ irq->u.ext = inti->ext;
break;
case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
- irq.u.io = inti->io;
+ irq->u.io = inti->io;
break;
- case KVM_S390_MCHK:
- irq.u.mchk = inti->mchk;
- break;
- default:
- return -EINVAL;
}
+}
- if (copy_to_user(uptr, &irq, sizeof(irq)))
- return -EFAULT;
+void kvm_s390_clear_float_irqs(struct kvm *kvm)
+{
+ struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
+ int i;
- return 0;
-}
+ spin_lock(&fi->lock);
+ for (i = 0; i < FIRQ_LIST_COUNT; i++)
+ clear_irq_list(&fi->lists[i]);
+ for (i = 0; i < FIRQ_MAX_COUNT; i++)
+ fi->counters[i] = 0;
+ spin_unlock(&fi->lock);
+};
-static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len)
+static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
{
struct kvm_s390_interrupt_info *inti;
struct kvm_s390_float_interrupt *fi;
+ struct kvm_s390_irq *buf;
+ struct kvm_s390_irq *irq;
+ int max_irqs;
int ret = 0;
int n = 0;
+ int i;
+
+ if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0)
+ return -EINVAL;
+
+ /*
+ * We are already using -ENOMEM to signal
+ * userspace it may retry with a bigger buffer,
+ * so we need to use something else for this case
+ */
+ buf = vzalloc(len);
+ if (!buf)
+ return -ENOBUFS;
+
+ max_irqs = len / sizeof(struct kvm_s390_irq);
fi = &kvm->arch.float_int;
spin_lock(&fi->lock);
-
- list_for_each_entry(inti, &fi->list, list) {
- if (len < sizeof(struct kvm_s390_irq)) {
+ for (i = 0; i < FIRQ_LIST_COUNT; i++) {
+ list_for_each_entry(inti, &fi->lists[i], list) {
+ if (n == max_irqs) {
+ /* signal userspace to try again */
+ ret = -ENOMEM;
+ goto out;
+ }
+ inti_to_irq(inti, &buf[n]);
+ n++;
+ }
+ }
+ if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs)) {
+ if (n == max_irqs) {
/* signal userspace to try again */
ret = -ENOMEM;
- break;
+ goto out;
}
- ret = copy_irq_to_user(inti, buf);
- if (ret)
- break;
- buf += sizeof(struct kvm_s390_irq);
- len -= sizeof(struct kvm_s390_irq);
+ irq = (struct kvm_s390_irq *) &buf[n];
+ irq->type = KVM_S390_INT_SERVICE;
+ irq->u.ext = fi->srv_signal;
n++;
}
+ if (test_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
+ if (n == max_irqs) {
+ /* signal userspace to try again */
+ ret = -ENOMEM;
+ goto out;
+ }
+ irq = (struct kvm_s390_irq *) &buf[n];
+ irq->type = KVM_S390_MCHK;
+ irq->u.mchk = fi->mchk;
+ n++;
+}
+out:
spin_unlock(&fi->lock);
+ if (!ret && n > 0) {
+ if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n))
+ ret = -EFAULT;
+ }
+ vfree(buf);
return ret < 0 ? ret : n;
}
@@ -1520,7 +1691,7 @@ static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
switch (attr->group) {
case KVM_DEV_FLIC_GET_ALL_IRQS:
- r = get_all_floating_irqs(dev->kvm, (u8 *) attr->addr,
+ r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr,
attr->attr);
break;
default:
@@ -1952,3 +2123,143 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
{
return -EINVAL;
}
+
+int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, void __user *irqstate, int len)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ struct kvm_s390_irq *buf;
+ int r = 0;
+ int n;
+
+ buf = vmalloc(len);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user((void *) buf, irqstate, len)) {
+ r = -EFAULT;
+ goto out_free;
+ }
+
+ /*
+ * Don't allow setting the interrupt state
+ * when there are already interrupts pending
+ */
+ spin_lock(&li->lock);
+ if (li->pending_irqs) {
+ r = -EBUSY;
+ goto out_unlock;
+ }
+
+ for (n = 0; n < len / sizeof(*buf); n++) {
+ r = do_inject_vcpu(vcpu, &buf[n]);
+ if (r)
+ break;
+ }
+
+out_unlock:
+ spin_unlock(&li->lock);
+out_free:
+ vfree(buf);
+
+ return r;
+}
+
+static void store_local_irq(struct kvm_s390_local_interrupt *li,
+ struct kvm_s390_irq *irq,
+ unsigned long irq_type)
+{
+ switch (irq_type) {
+ case IRQ_PEND_MCHK_EX:
+ case IRQ_PEND_MCHK_REP:
+ irq->type = KVM_S390_MCHK;
+ irq->u.mchk = li->irq.mchk;
+ break;
+ case IRQ_PEND_PROG:
+ irq->type = KVM_S390_PROGRAM_INT;
+ irq->u.pgm = li->irq.pgm;
+ break;
+ case IRQ_PEND_PFAULT_INIT:
+ irq->type = KVM_S390_INT_PFAULT_INIT;
+ irq->u.ext = li->irq.ext;
+ break;
+ case IRQ_PEND_EXT_EXTERNAL:
+ irq->type = KVM_S390_INT_EXTERNAL_CALL;
+ irq->u.extcall = li->irq.extcall;
+ break;
+ case IRQ_PEND_EXT_CLOCK_COMP:
+ irq->type = KVM_S390_INT_CLOCK_COMP;
+ break;
+ case IRQ_PEND_EXT_CPU_TIMER:
+ irq->type = KVM_S390_INT_CPU_TIMER;
+ break;
+ case IRQ_PEND_SIGP_STOP:
+ irq->type = KVM_S390_SIGP_STOP;
+ irq->u.stop = li->irq.stop;
+ break;
+ case IRQ_PEND_RESTART:
+ irq->type = KVM_S390_RESTART;
+ break;
+ case IRQ_PEND_SET_PREFIX:
+ irq->type = KVM_S390_SIGP_SET_PREFIX;
+ irq->u.prefix = li->irq.prefix;
+ break;
+ }
+}
+
+int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
+{
+ uint8_t sigp_ctrl = vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl;
+ unsigned long sigp_emerg_pending[BITS_TO_LONGS(KVM_MAX_VCPUS)];
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ unsigned long pending_irqs;
+ struct kvm_s390_irq irq;
+ unsigned long irq_type;
+ int cpuaddr;
+ int n = 0;
+
+ spin_lock(&li->lock);
+ pending_irqs = li->pending_irqs;
+ memcpy(&sigp_emerg_pending, &li->sigp_emerg_pending,
+ sizeof(sigp_emerg_pending));
+ spin_unlock(&li->lock);
+
+ for_each_set_bit(irq_type, &pending_irqs, IRQ_PEND_COUNT) {
+ memset(&irq, 0, sizeof(irq));
+ if (irq_type == IRQ_PEND_EXT_EMERGENCY)
+ continue;
+ if (n + sizeof(irq) > len)
+ return -ENOBUFS;
+ store_local_irq(&vcpu->arch.local_int, &irq, irq_type);
+ if (copy_to_user(&buf[n], &irq, sizeof(irq)))
+ return -EFAULT;
+ n += sizeof(irq);
+ }
+
+ if (test_bit(IRQ_PEND_EXT_EMERGENCY, &pending_irqs)) {
+ for_each_set_bit(cpuaddr, sigp_emerg_pending, KVM_MAX_VCPUS) {
+ memset(&irq, 0, sizeof(irq));
+ if (n + sizeof(irq) > len)
+ return -ENOBUFS;
+ irq.type = KVM_S390_INT_EMERGENCY;
+ irq.u.emerg.code = cpuaddr;
+ if (copy_to_user(&buf[n], &irq, sizeof(irq)))
+ return -EFAULT;
+ n += sizeof(irq);
+ }
+ }
+
+ if ((sigp_ctrl & SIGP_CTRL_C) &&
+ (atomic_read(&vcpu->arch.sie_block->cpuflags) &
+ CPUSTAT_ECALL_PEND)) {
+ if (n + sizeof(irq) > len)
+ return -ENOBUFS;
+ memset(&irq, 0, sizeof(irq));
+ irq.type = KVM_S390_INT_EXTERNAL_CALL;
+ irq.u.extcall.code = sigp_ctrl & SIGP_CTRL_SCN_MASK;
+ if (copy_to_user(&buf[n], &irq, sizeof(irq)))
+ return -EFAULT;
+ n += sizeof(irq);
+ }
+
+ return n;
+}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 19e17bd..afa2bd7 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -25,11 +25,13 @@
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/timer.h>
+#include <linux/vmalloc.h>
#include <asm/asm-offsets.h>
#include <asm/lowcore.h>
#include <asm/pgtable.h>
#include <asm/nmi.h>
#include <asm/switch_to.h>
+#include <asm/isc.h>
#include <asm/sclp.h>
#include "kvm-s390.h"
#include "gaccess.h"
@@ -38,6 +40,11 @@
#include "trace.h"
#include "trace-s390.h"
+#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
+#define LOCAL_IRQS 32
+#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
+ (KVM_MAX_VCPUS + LOCAL_IRQS))
+
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
struct kvm_stats_debugfs_item debugfs_entries[] = {
@@ -87,6 +94,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
{ "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
{ "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
+ { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
@@ -101,8 +109,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
/* upper facilities limit for kvm */
unsigned long kvm_s390_fac_list_mask[] = {
- 0xff82fffbf4fc2000UL,
- 0x005c000000000000UL,
+ 0xffe6fffbfcfdfc40UL,
+ 0x205c800000000000UL,
};
unsigned long kvm_s390_fac_list_mask_size(void)
@@ -171,9 +179,16 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_S390_IRQCHIP:
case KVM_CAP_VM_ATTRIBUTES:
case KVM_CAP_MP_STATE:
+ case KVM_CAP_S390_INJECT_IRQ:
case KVM_CAP_S390_USER_SIGP:
+ case KVM_CAP_S390_USER_STSI:
+ case KVM_CAP_S390_SKEYS:
+ case KVM_CAP_S390_IRQ_STATE:
r = 1;
break;
+ case KVM_CAP_S390_MEM_OP:
+ r = MEM_OP_MAX_SIZE;
+ break;
case KVM_CAP_NR_VCPUS:
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
@@ -184,6 +199,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_S390_COW:
r = MACHINE_HAS_ESOP;
break;
+ case KVM_CAP_S390_VECTOR_REGISTERS:
+ r = MACHINE_HAS_VX;
+ break;
default:
r = 0;
}
@@ -264,6 +282,18 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
kvm->arch.user_sigp = 1;
r = 0;
break;
+ case KVM_CAP_S390_VECTOR_REGISTERS:
+ if (MACHINE_HAS_VX) {
+ set_kvm_facility(kvm->arch.model.fac->mask, 129);
+ set_kvm_facility(kvm->arch.model.fac->list, 129);
+ r = 0;
+ } else
+ r = -EINVAL;
+ break;
+ case KVM_CAP_S390_USER_STSI:
+ kvm->arch.user_stsi = 1;
+ r = 0;
+ break;
default:
r = -EINVAL;
break;
@@ -708,6 +738,108 @@ static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
return ret;
}
+static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
+{
+ uint8_t *keys;
+ uint64_t hva;
+ unsigned long curkey;
+ int i, r = 0;
+
+ if (args->flags != 0)
+ return -EINVAL;
+
+ /* Is this guest using storage keys? */
+ if (!mm_use_skey(current->mm))
+ return KVM_S390_GET_SKEYS_NONE;
+
+ /* Enforce sane limit on memory allocation */
+ if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
+ return -EINVAL;
+
+ keys = kmalloc_array(args->count, sizeof(uint8_t),
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!keys)
+ keys = vmalloc(sizeof(uint8_t) * args->count);
+ if (!keys)
+ return -ENOMEM;
+
+ for (i = 0; i < args->count; i++) {
+ hva = gfn_to_hva(kvm, args->start_gfn + i);
+ if (kvm_is_error_hva(hva)) {
+ r = -EFAULT;
+ goto out;
+ }
+
+ curkey = get_guest_storage_key(current->mm, hva);
+ if (IS_ERR_VALUE(curkey)) {
+ r = curkey;
+ goto out;
+ }
+ keys[i] = curkey;
+ }
+
+ r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
+ sizeof(uint8_t) * args->count);
+ if (r)
+ r = -EFAULT;
+out:
+ kvfree(keys);
+ return r;
+}
+
+static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
+{
+ uint8_t *keys;
+ uint64_t hva;
+ int i, r = 0;
+
+ if (args->flags != 0)
+ return -EINVAL;
+
+ /* Enforce sane limit on memory allocation */
+ if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
+ return -EINVAL;
+
+ keys = kmalloc_array(args->count, sizeof(uint8_t),
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!keys)
+ keys = vmalloc(sizeof(uint8_t) * args->count);
+ if (!keys)
+ return -ENOMEM;
+
+ r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
+ sizeof(uint8_t) * args->count);
+ if (r) {
+ r = -EFAULT;
+ goto out;
+ }
+
+ /* Enable storage key handling for the guest */
+ s390_enable_skey();
+
+ for (i = 0; i < args->count; i++) {
+ hva = gfn_to_hva(kvm, args->start_gfn + i);
+ if (kvm_is_error_hva(hva)) {
+ r = -EFAULT;
+ goto out;
+ }
+
+ /* Lowest order bit is reserved */
+ if (keys[i] & 0x01) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ r = set_guest_storage_key(current->mm, hva,
+ (unsigned long)keys[i], 0);
+ if (r)
+ goto out;
+ }
+out:
+ kvfree(keys);
+ return r;
+}
+
long kvm_arch_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -767,6 +899,26 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = kvm_s390_vm_has_attr(kvm, &attr);
break;
}
+ case KVM_S390_GET_SKEYS: {
+ struct kvm_s390_skeys args;
+
+ r = -EFAULT;
+ if (copy_from_user(&args, argp,
+ sizeof(struct kvm_s390_skeys)))
+ break;
+ r = kvm_s390_get_skeys(kvm, &args);
+ break;
+ }
+ case KVM_S390_SET_SKEYS: {
+ struct kvm_s390_skeys args;
+
+ r = -EFAULT;
+ if (copy_from_user(&args, argp,
+ sizeof(struct kvm_s390_skeys)))
+ break;
+ r = kvm_s390_set_skeys(kvm, &args);
+ break;
+ }
default:
r = -ENOTTY;
}
@@ -887,7 +1039,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
if (!kvm->arch.dbf)
- goto out_nodbf;
+ goto out_err;
/*
* The architectural maximum amount of facilities is 16 kbit. To store
@@ -899,7 +1051,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.model.fac =
(struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!kvm->arch.model.fac)
- goto out_nofac;
+ goto out_err;
/* Populate the facility mask initially. */
memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
@@ -919,10 +1071,11 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff;
if (kvm_s390_crypto_init(kvm) < 0)
- goto out_crypto;
+ goto out_err;
spin_lock_init(&kvm->arch.float_int.lock);
- INIT_LIST_HEAD(&kvm->arch.float_int.list);
+ for (i = 0; i < FIRQ_LIST_COUNT; i++)
+ INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
init_waitqueue_head(&kvm->arch.ipte_wq);
mutex_init(&kvm->arch.ipte_mutex);
@@ -934,7 +1087,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
} else {
kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
if (!kvm->arch.gmap)
- goto out_nogmap;
+ goto out_err;
kvm->arch.gmap->private = kvm;
kvm->arch.gmap->pfault_enabled = 0;
}
@@ -946,15 +1099,11 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
spin_lock_init(&kvm->arch.start_stop_lock);
return 0;
-out_nogmap:
+out_err:
kfree(kvm->arch.crypto.crycb);
-out_crypto:
free_page((unsigned long)kvm->arch.model.fac);
-out_nofac:
debug_unregister(kvm->arch.dbf);
-out_nodbf:
free_page((unsigned long)(kvm->arch.sca));
-out_err:
return rc;
}
@@ -1034,6 +1183,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
KVM_SYNC_CRS |
KVM_SYNC_ARCH0 |
KVM_SYNC_PFAULT;
+ if (test_kvm_facility(vcpu->kvm, 129))
+ vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
if (kvm_is_ucontrol(vcpu->kvm))
return __kvm_ucontrol_vcpu_init(vcpu);
@@ -1044,10 +1195,18 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
- save_fp_regs(vcpu->arch.host_fpregs.fprs);
+ if (test_kvm_facility(vcpu->kvm, 129))
+ save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
+ else
+ save_fp_regs(vcpu->arch.host_fpregs.fprs);
save_access_regs(vcpu->arch.host_acrs);
- restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
- restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
+ if (test_kvm_facility(vcpu->kvm, 129)) {
+ restore_fp_ctl(&vcpu->run->s.regs.fpc);
+ restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
+ } else {
+ restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
+ restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
+ }
restore_access_regs(vcpu->run->s.regs.acrs);
gmap_enable(vcpu->arch.gmap);
atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
@@ -1057,11 +1216,19 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
gmap_disable(vcpu->arch.gmap);
- save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
- save_fp_regs(vcpu->arch.guest_fpregs.fprs);
+ if (test_kvm_facility(vcpu->kvm, 129)) {
+ save_fp_ctl(&vcpu->run->s.regs.fpc);
+ save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
+ } else {
+ save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
+ save_fp_regs(vcpu->arch.guest_fpregs.fprs);
+ }
save_access_regs(vcpu->run->s.regs.acrs);
restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
- restore_fp_regs(vcpu->arch.host_fpregs.fprs);
+ if (test_kvm_facility(vcpu->kvm, 129))
+ restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
+ else
+ restore_fp_regs(vcpu->arch.host_fpregs.fprs);
restore_access_regs(vcpu->arch.host_acrs);
}
@@ -1129,6 +1296,15 @@ int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
return 0;
}
+static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
+{
+ struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
+
+ vcpu->arch.cpu_id = model->cpu_id;
+ vcpu->arch.sie_block->ibc = model->ibc;
+ vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
+}
+
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
int rc = 0;
@@ -1137,6 +1313,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
CPUSTAT_SM |
CPUSTAT_STOPPED |
CPUSTAT_GED);
+ kvm_s390_vcpu_setup_model(vcpu);
+
vcpu->arch.sie_block->ecb = 6;
if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
vcpu->arch.sie_block->ecb |= 0x10;
@@ -1147,8 +1325,11 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu->arch.sie_block->eca |= 1;
if (sclp_has_sigpif())
vcpu->arch.sie_block->eca |= 0x10000000U;
- vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
- ICTL_TPROT;
+ if (test_kvm_facility(vcpu->kvm, 129)) {
+ vcpu->arch.sie_block->eca |= 0x00020000;
+ vcpu->arch.sie_block->ecd |= 0x20000000;
+ }
+ vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
if (kvm_s390_cmma_enabled(vcpu->kvm)) {
rc = kvm_s390_vcpu_setup_cmma(vcpu);
@@ -1158,11 +1339,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
- mutex_lock(&vcpu->kvm->lock);
- vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id;
- vcpu->arch.sie_block->ibc = vcpu->kvm->arch.model.ibc;
- mutex_unlock(&vcpu->kvm->lock);
-
kvm_s390_vcpu_crypto_setup(vcpu);
return rc;
@@ -1190,6 +1366,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
vcpu->arch.sie_block = &sie_page->sie_block;
vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
+ vcpu->arch.host_vregs = &sie_page->vregs;
vcpu->arch.sie_block->icpua = id;
if (!kvm_is_ucontrol(kvm)) {
@@ -1205,7 +1382,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
}
- vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->list;
spin_lock_init(&vcpu->arch.local_int.lock);
vcpu->arch.local_int.float_int = &kvm->arch.float_int;
@@ -1725,6 +1901,31 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
return 0;
}
+static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
+{
+ psw_t *psw = &vcpu->arch.sie_block->gpsw;
+ u8 opcode;
+ int rc;
+
+ VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
+ trace_kvm_s390_sie_fault(vcpu);
+
+ /*
+ * We want to inject an addressing exception, which is defined as a
+ * suppressing or terminating exception. However, since we came here
+ * by a DAT access exception, the PSW still points to the faulting
+ * instruction since DAT exceptions are nullifying. So we've got
+ * to look up the current opcode to get the length of the instruction
+ * to be able to forward the PSW.
+ */
+ rc = read_guest(vcpu, psw->addr, 0, &opcode, 1);
+ if (rc)
+ return kvm_s390_inject_prog_cond(vcpu, rc);
+ psw->addr = __rewind_psw(*psw, -insn_length(opcode));
+
+ return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+}
+
static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
{
int rc = -1;
@@ -1756,11 +1957,8 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
}
}
- if (rc == -1) {
- VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
- trace_kvm_s390_sie_fault(vcpu);
- rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
- }
+ if (rc == -1)
+ rc = vcpu_post_run_fault_in_sie(vcpu);
memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
@@ -1976,6 +2174,35 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
return kvm_s390_store_status_unloaded(vcpu, addr);
}
+/*
+ * store additional status at address
+ */
+int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
+ unsigned long gpa)
+{
+ /* Only bits 0-53 are used for address formation */
+ if (!(gpa & ~0x3ff))
+ return 0;
+
+ return write_guest_abs(vcpu, gpa & ~0x3ff,
+ (void *)&vcpu->run->s.regs.vrs, 512);
+}
+
+int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
+{
+ if (!test_kvm_facility(vcpu->kvm, 129))
+ return 0;
+
+ /*
+ * The guest VXRS are in the host VXRs due to the lazy
+ * copying in vcpu load/put. Let's update our copies before we save
+ * it into the save area.
+ */
+ save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
+
+ return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
+}
+
static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
{
kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
@@ -2100,6 +2327,65 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
return r;
}
+static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
+ struct kvm_s390_mem_op *mop)
+{
+ void __user *uaddr = (void __user *)mop->buf;
+ void *tmpbuf = NULL;
+ int r, srcu_idx;
+ const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
+ | KVM_S390_MEMOP_F_CHECK_ONLY;
+
+ if (mop->flags & ~supported_flags)
+ return -EINVAL;
+
+ if (mop->size > MEM_OP_MAX_SIZE)
+ return -E2BIG;
+
+ if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
+ tmpbuf = vmalloc(mop->size);
+ if (!tmpbuf)
+ return -ENOMEM;
+ }
+
+ srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+
+ switch (mop->op) {
+ case KVM_S390_MEMOP_LOGICAL_READ:
+ if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
+ r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false);
+ break;
+ }
+ r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
+ if (r == 0) {
+ if (copy_to_user(uaddr, tmpbuf, mop->size))
+ r = -EFAULT;
+ }
+ break;
+ case KVM_S390_MEMOP_LOGICAL_WRITE:
+ if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
+ r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true);
+ break;
+ }
+ if (copy_from_user(tmpbuf, uaddr, mop->size)) {
+ r = -EFAULT;
+ break;
+ }
+ r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
+ break;
+ default:
+ r = -EINVAL;
+ }
+
+ srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
+
+ if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
+ kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
+
+ vfree(tmpbuf);
+ return r;
+}
+
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -2109,6 +2395,15 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
long r;
switch (ioctl) {
+ case KVM_S390_IRQ: {
+ struct kvm_s390_irq s390irq;
+
+ r = -EFAULT;
+ if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
+ break;
+ r = kvm_s390_inject_vcpu(vcpu, &s390irq);
+ break;
+ }
case KVM_S390_INTERRUPT: {
struct kvm_s390_interrupt s390int;
struct kvm_s390_irq s390irq;
@@ -2199,6 +2494,47 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
break;
}
+ case KVM_S390_MEM_OP: {
+ struct kvm_s390_mem_op mem_op;
+
+ if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
+ r = kvm_s390_guest_mem_op(vcpu, &mem_op);
+ else
+ r = -EFAULT;
+ break;
+ }
+ case KVM_S390_SET_IRQ_STATE: {
+ struct kvm_s390_irq_state irq_state;
+
+ r = -EFAULT;
+ if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
+ break;
+ if (irq_state.len > VCPU_IRQS_MAX_BUF ||
+ irq_state.len == 0 ||
+ irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
+ r = -EINVAL;
+ break;
+ }
+ r = kvm_s390_set_irq_state(vcpu,
+ (void __user *) irq_state.buf,
+ irq_state.len);
+ break;
+ }
+ case KVM_S390_GET_IRQ_STATE: {
+ struct kvm_s390_irq_state irq_state;
+
+ r = -EFAULT;
+ if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
+ break;
+ if (irq_state.len == 0) {
+ r = -EINVAL;
+ break;
+ }
+ r = kvm_s390_get_irq_state(vcpu,
+ (__u8 __user *) irq_state.buf,
+ irq_state.len);
+ break;
+ }
default:
r = -ENOTTY;
}
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index c34109a..ca108b9 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -70,16 +70,22 @@ static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix)
kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
}
-static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu)
+typedef u8 __bitwise ar_t;
+
+static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, ar_t *ar)
{
u32 base2 = vcpu->arch.sie_block->ipb >> 28;
u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
+ if (ar)
+ *ar = base2;
+
return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
}
static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
- u64 *address1, u64 *address2)
+ u64 *address1, u64 *address2,
+ ar_t *ar_b1, ar_t *ar_b2)
{
u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16;
@@ -88,6 +94,11 @@ static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
*address1 = (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1;
*address2 = (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
+
+ if (ar_b1)
+ *ar_b1 = base1;
+ if (ar_b2)
+ *ar_b2 = base2;
}
static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2)
@@ -98,7 +109,7 @@ static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2
*r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
}
-static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu)
+static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, ar_t *ar)
{
u32 base2 = vcpu->arch.sie_block->ipb >> 28;
u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
@@ -107,14 +118,20 @@ static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu)
if (disp2 & 0x80000)
disp2+=0xfff00000;
+ if (ar)
+ *ar = base2;
+
return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2;
}
-static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu)
+static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu, ar_t *ar)
{
u32 base2 = vcpu->arch.sie_block->ipb >> 28;
u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
+ if (ar)
+ *ar = base2;
+
return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
}
@@ -125,13 +142,24 @@ static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc)
vcpu->arch.sie_block->gpsw.mask |= cc << 44;
}
-/* test availability of facility in a kvm intance */
+/* test availability of facility in a kvm instance */
static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr)
{
return __test_facility(nr, kvm->arch.model.fac->mask) &&
__test_facility(nr, kvm->arch.model.fac->list);
}
+static inline int set_kvm_facility(u64 *fac_list, unsigned long nr)
+{
+ unsigned char *ptr;
+
+ if (nr >= MAX_FACILITY_BIT)
+ return -EINVAL;
+ ptr = (unsigned char *) fac_list + (nr >> 3);
+ *ptr |= (0x80UL >> (nr & 7));
+ return 0;
+}
+
/* are cpu states controlled by user space */
static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm)
{
@@ -150,9 +178,9 @@ int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
struct kvm_s390_irq *irq);
int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
- u64 cr6, u64 schid);
-void kvm_s390_reinject_io_int(struct kvm *kvm,
- struct kvm_s390_interrupt_info *inti);
+ u64 isc_mask, u32 schid);
+int kvm_s390_reinject_io_int(struct kvm *kvm,
+ struct kvm_s390_interrupt_info *inti);
int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked);
/* implemented in intercept.c */
@@ -177,7 +205,10 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
/* implemented in kvm-s390.c */
long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
+int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
+ unsigned long addr);
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
+int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
void s390_vcpu_block(struct kvm_vcpu *vcpu);
@@ -241,6 +272,10 @@ int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu);
extern struct kvm_device_ops kvm_flic_ops;
int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu);
void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu);
+int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu,
+ void __user *buf, int len);
+int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu,
+ __u8 __user *buf, int len);
/* implemented in guestdbg.c */
void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 3511169..d22d8ee 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -36,15 +36,16 @@ static int handle_set_clock(struct kvm_vcpu *vcpu)
struct kvm_vcpu *cpup;
s64 hostclk, val;
int i, rc;
+ ar_t ar;
u64 op2;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- op2 = kvm_s390_get_base_disp_s(vcpu);
+ op2 = kvm_s390_get_base_disp_s(vcpu, &ar);
if (op2 & 7) /* Operand must be on a doubleword boundary */
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- rc = read_guest(vcpu, op2, &val, sizeof(val));
+ rc = read_guest(vcpu, op2, ar, &val, sizeof(val));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
@@ -68,20 +69,21 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu)
u64 operand2;
u32 address;
int rc;
+ ar_t ar;
vcpu->stat.instruction_spx++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- operand2 = kvm_s390_get_base_disp_s(vcpu);
+ operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
/* must be word boundary */
if (operand2 & 3)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
/* get the value */
- rc = read_guest(vcpu, operand2, &address, sizeof(address));
+ rc = read_guest(vcpu, operand2, ar, &address, sizeof(address));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
@@ -107,13 +109,14 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu)
u64 operand2;
u32 address;
int rc;
+ ar_t ar;
vcpu->stat.instruction_stpx++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- operand2 = kvm_s390_get_base_disp_s(vcpu);
+ operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
/* must be word boundary */
if (operand2 & 3)
@@ -122,7 +125,7 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu)
address = kvm_s390_get_prefix(vcpu);
/* get the value */
- rc = write_guest(vcpu, operand2, &address, sizeof(address));
+ rc = write_guest(vcpu, operand2, ar, &address, sizeof(address));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
@@ -136,18 +139,19 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
u16 vcpu_id = vcpu->vcpu_id;
u64 ga;
int rc;
+ ar_t ar;
vcpu->stat.instruction_stap++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- ga = kvm_s390_get_base_disp_s(vcpu);
+ ga = kvm_s390_get_base_disp_s(vcpu, &ar);
if (ga & 1)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- rc = write_guest(vcpu, ga, &vcpu_id, sizeof(vcpu_id));
+ rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
@@ -207,7 +211,7 @@ static int handle_test_block(struct kvm_vcpu *vcpu)
kvm_s390_get_regs_rre(vcpu, NULL, &reg2);
addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
addr = kvm_s390_logical_to_effective(vcpu, addr);
- if (kvm_s390_check_low_addr_protection(vcpu, addr))
+ if (kvm_s390_check_low_addr_prot_real(vcpu, addr))
return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
addr = kvm_s390_real_to_abs(vcpu, addr);
@@ -229,18 +233,20 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
struct kvm_s390_interrupt_info *inti;
unsigned long len;
u32 tpi_data[3];
- int cc, rc;
+ int rc;
u64 addr;
+ ar_t ar;
- rc = 0;
- addr = kvm_s390_get_base_disp_s(vcpu);
+ addr = kvm_s390_get_base_disp_s(vcpu, &ar);
if (addr & 3)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- cc = 0;
+
inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
- if (!inti)
- goto no_interrupt;
- cc = 1;
+ if (!inti) {
+ kvm_s390_set_psw_cc(vcpu, 0);
+ return 0;
+ }
+
tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
tpi_data[1] = inti->io.io_int_parm;
tpi_data[2] = inti->io.io_int_word;
@@ -250,40 +256,51 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
* provided area.
*/
len = sizeof(tpi_data) - 4;
- rc = write_guest(vcpu, addr, &tpi_data, len);
- if (rc)
- return kvm_s390_inject_prog_cond(vcpu, rc);
+ rc = write_guest(vcpu, addr, ar, &tpi_data, len);
+ if (rc) {
+ rc = kvm_s390_inject_prog_cond(vcpu, rc);
+ goto reinject_interrupt;
+ }
} else {
/*
* Store the three-word I/O interruption code into
* the appropriate lowcore area.
*/
len = sizeof(tpi_data);
- if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len))
+ if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) {
+ /* failed writes to the low core are not recoverable */
rc = -EFAULT;
+ goto reinject_interrupt;
+ }
}
+
+ /* irq was successfully handed to the guest */
+ kfree(inti);
+ kvm_s390_set_psw_cc(vcpu, 1);
+ return 0;
+reinject_interrupt:
/*
* If we encounter a problem storing the interruption code, the
* instruction is suppressed from the guest's view: reinject the
* interrupt.
*/
- if (!rc)
+ if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) {
kfree(inti);
- else
- kvm_s390_reinject_io_int(vcpu->kvm, inti);
-no_interrupt:
- /* Set condition code and we're done. */
- if (!rc)
- kvm_s390_set_psw_cc(vcpu, cc);
+ rc = -EFAULT;
+ }
+ /* don't set the cc, a pgm irq was injected or we drop to user space */
return rc ? -EFAULT : 0;
}
static int handle_tsch(struct kvm_vcpu *vcpu)
{
- struct kvm_s390_interrupt_info *inti;
+ struct kvm_s390_interrupt_info *inti = NULL;
+ const u64 isc_mask = 0xffUL << 24; /* all iscs set */
- inti = kvm_s390_get_io_int(vcpu->kvm, 0,
- vcpu->run->s.regs.gprs[1]);
+ /* a valid schid has at least one bit set */
+ if (vcpu->run->s.regs.gprs[1])
+ inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask,
+ vcpu->run->s.regs.gprs[1]);
/*
* Prepare exit to userspace.
@@ -386,15 +403,16 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
psw_compat_t new_psw;
u64 addr;
int rc;
+ ar_t ar;
if (gpsw->mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- addr = kvm_s390_get_base_disp_s(vcpu);
+ addr = kvm_s390_get_base_disp_s(vcpu, &ar);
if (addr & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw));
+ rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
if (!(new_psw.mask & PSW32_MASK_BASE))
@@ -412,14 +430,15 @@ static int handle_lpswe(struct kvm_vcpu *vcpu)
psw_t new_psw;
u64 addr;
int rc;
+ ar_t ar;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- addr = kvm_s390_get_base_disp_s(vcpu);
+ addr = kvm_s390_get_base_disp_s(vcpu, &ar);
if (addr & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw));
+ rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
vcpu->arch.sie_block->gpsw = new_psw;
@@ -433,18 +452,19 @@ static int handle_stidp(struct kvm_vcpu *vcpu)
u64 stidp_data = vcpu->arch.stidp_data;
u64 operand2;
int rc;
+ ar_t ar;
vcpu->stat.instruction_stidp++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- operand2 = kvm_s390_get_base_disp_s(vcpu);
+ operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
if (operand2 & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- rc = write_guest(vcpu, operand2, &stidp_data, sizeof(stidp_data));
+ rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
@@ -467,6 +487,7 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
for (n = mem->count - 1; n > 0 ; n--)
memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
+ memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
mem->vm[0].cpus_total = cpus;
mem->vm[0].cpus_configured = cpus;
mem->vm[0].cpus_standby = 0;
@@ -478,6 +499,17 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
ASCEBC(mem->vm[0].cpi, 16);
}
+static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, ar_t ar,
+ u8 fc, u8 sel1, u16 sel2)
+{
+ vcpu->run->exit_reason = KVM_EXIT_S390_STSI;
+ vcpu->run->s390_stsi.addr = addr;
+ vcpu->run->s390_stsi.ar = ar;
+ vcpu->run->s390_stsi.fc = fc;
+ vcpu->run->s390_stsi.sel1 = sel1;
+ vcpu->run->s390_stsi.sel2 = sel2;
+}
+
static int handle_stsi(struct kvm_vcpu *vcpu)
{
int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
@@ -486,6 +518,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
unsigned long mem = 0;
u64 operand2;
int rc = 0;
+ ar_t ar;
vcpu->stat.instruction_stsi++;
VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
@@ -508,7 +541,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
return 0;
}
- operand2 = kvm_s390_get_base_disp_s(vcpu);
+ operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
if (operand2 & 0xfff)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -532,16 +565,20 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
break;
}
- rc = write_guest(vcpu, operand2, (void *)mem, PAGE_SIZE);
+ rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE);
if (rc) {
rc = kvm_s390_inject_prog_cond(vcpu, rc);
goto out;
}
+ if (vcpu->kvm->arch.user_stsi) {
+ insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2);
+ rc = -EREMOTE;
+ }
trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
free_page(mem);
kvm_s390_set_psw_cc(vcpu, 0);
vcpu->run->s.regs.gprs[0] = 0;
- return 0;
+ return rc;
out_no_data:
kvm_s390_set_psw_cc(vcpu, 3);
out:
@@ -670,7 +707,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
}
if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
- if (kvm_s390_check_low_addr_protection(vcpu, start))
+ if (kvm_s390_check_low_addr_prot_real(vcpu, start))
return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
}
@@ -776,13 +813,14 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
int reg, rc, nr_regs;
u32 ctl_array[16];
u64 ga;
+ ar_t ar;
vcpu->stat.instruction_lctl++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- ga = kvm_s390_get_base_disp_rs(vcpu);
+ ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
if (ga & 3)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -791,7 +829,7 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
nr_regs = ((reg3 - reg1) & 0xf) + 1;
- rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32));
+ rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
reg = reg1;
@@ -814,13 +852,14 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
int reg, rc, nr_regs;
u32 ctl_array[16];
u64 ga;
+ ar_t ar;
vcpu->stat.instruction_stctl++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- ga = kvm_s390_get_base_disp_rs(vcpu);
+ ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
if (ga & 3)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -836,7 +875,7 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
break;
reg = (reg + 1) % 16;
} while (1);
- rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32));
+ rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
}
@@ -847,13 +886,14 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
int reg, rc, nr_regs;
u64 ctl_array[16];
u64 ga;
+ ar_t ar;
vcpu->stat.instruction_lctlg++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- ga = kvm_s390_get_base_disp_rsy(vcpu);
+ ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
if (ga & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -862,7 +902,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
nr_regs = ((reg3 - reg1) & 0xf) + 1;
- rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64));
+ rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
reg = reg1;
@@ -884,13 +924,14 @@ static int handle_stctg(struct kvm_vcpu *vcpu)
int reg, rc, nr_regs;
u64 ctl_array[16];
u64 ga;
+ ar_t ar;
vcpu->stat.instruction_stctg++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- ga = kvm_s390_get_base_disp_rsy(vcpu);
+ ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
if (ga & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -906,7 +947,7 @@ static int handle_stctg(struct kvm_vcpu *vcpu)
break;
reg = (reg + 1) % 16;
} while (1);
- rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64));
+ rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
}
@@ -931,13 +972,14 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
unsigned long hva, gpa;
int ret = 0, cc = 0;
bool writable;
+ ar_t ar;
vcpu->stat.instruction_tprot++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- kvm_s390_get_base_disp_sse(vcpu, &address1, &address2);
+ kvm_s390_get_base_disp_sse(vcpu, &address1, &address2, &ar, NULL);
/* we only handle the Linux memory detection case:
* access key == 0
@@ -946,11 +988,11 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
return -EOPNOTSUPP;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
ipte_lock(vcpu);
- ret = guest_translate_address(vcpu, address1, &gpa, 1);
+ ret = guest_translate_address(vcpu, address1, ar, &gpa, 1);
if (ret == PGM_PROTECTION) {
/* Write protected? Try again with read-only... */
cc = 1;
- ret = guest_translate_address(vcpu, address1, &gpa, 0);
+ ret = guest_translate_address(vcpu, address1, ar, &gpa, 0);
}
if (ret) {
if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) {
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 23b1e86..72e58bd 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -393,6 +393,9 @@ static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code)
case SIGP_STORE_STATUS_AT_ADDRESS:
vcpu->stat.instruction_sigp_store_status++;
break;
+ case SIGP_STORE_ADDITIONAL_STATUS:
+ vcpu->stat.instruction_sigp_store_adtl_status++;
+ break;
case SIGP_SET_PREFIX:
vcpu->stat.instruction_sigp_prefix++;
break;
@@ -431,7 +434,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- order_code = kvm_s390_get_base_disp_rs(vcpu);
+ order_code = kvm_s390_get_base_disp_rs(vcpu, NULL);
if (handle_sigp_order_in_user_space(vcpu, order_code))
return -EOPNOTSUPP;
@@ -473,7 +476,7 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
int r3 = vcpu->arch.sie_block->ipa & 0x000f;
u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
struct kvm_vcpu *dest_vcpu;
- u8 order_code = kvm_s390_get_base_disp_rs(vcpu);
+ u8 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL);
trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr);
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index a236e39..dea2e7e 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -81,11 +81,6 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
(base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
}
-#define SELECTOR_TI_MASK (1 << 2)
-#define SELECTOR_RPL_MASK 0x03
-
-#define IOPL_SHIFT 12
-
#define KVM_PERMILLE_MMU_PAGES 20
#define KVM_MIN_ALLOC_MMU_PAGES 64
#define KVM_MMU_HASH_SHIFT 10
@@ -345,6 +340,7 @@ struct kvm_pmu {
enum {
KVM_DEBUGREG_BP_ENABLED = 1,
KVM_DEBUGREG_WONT_EXIT = 2,
+ KVM_DEBUGREG_RELOAD = 4,
};
struct kvm_vcpu_arch {
@@ -431,6 +427,9 @@ struct kvm_vcpu_arch {
int cpuid_nent;
struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
+
+ int maxphyaddr;
+
/* emulate context */
struct x86_emulate_ctxt emulate_ctxt;
@@ -550,11 +549,20 @@ struct kvm_arch_memory_slot {
struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
};
+/*
+ * We use as the mode the number of bits allocated in the LDR for the
+ * logical processor ID. It happens that these are all powers of two.
+ * This makes it is very easy to detect cases where the APICs are
+ * configured for multiple modes; in that case, we cannot use the map and
+ * hence cannot use kvm_irq_delivery_to_apic_fast either.
+ */
+#define KVM_APIC_MODE_XAPIC_CLUSTER 4
+#define KVM_APIC_MODE_XAPIC_FLAT 8
+#define KVM_APIC_MODE_X2APIC 16
+
struct kvm_apic_map {
struct rcu_head rcu;
- u8 ldr_bits;
- /* fields bellow are used to decode ldr values in different modes */
- u32 cid_shift, cid_mask, lid_mask, broadcast;
+ u8 mode;
struct kvm_lapic *phys_map[256];
/* first index is cluster id second is cpu id in a cluster */
struct kvm_lapic *logical_map[16][16];
@@ -859,6 +867,8 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
struct kvm_memory_slot *memslot);
+void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
+ struct kvm_memory_slot *memslot);
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
struct kvm_memory_slot *memslot);
void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
@@ -933,6 +943,7 @@ struct x86_emulate_ctxt;
int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port);
void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
int kvm_emulate_halt(struct kvm_vcpu *vcpu);
+int kvm_vcpu_halt(struct kvm_vcpu *vcpu);
int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
@@ -1128,7 +1139,6 @@ int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
-int cpuid_maxphyaddr(struct kvm_vcpu *vcpu);
int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index e62cf89..c1adf33 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -115,7 +115,7 @@ static inline void kvm_spinlock_init(void)
static inline bool kvm_para_available(void)
{
- return 0;
+ return false;
}
static inline unsigned int kvm_arch_para_features(void)
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index d6b078e..25b1cc0 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -95,6 +95,7 @@ unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
struct pvclock_vsyscall_time_info {
struct pvclock_vcpu_time_info pvti;
+ u32 migrate_count;
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
#define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info)
diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
index c5f1a1d..1fe9218 100644
--- a/arch/x86/include/uapi/asm/vmx.h
+++ b/arch/x86/include/uapi/asm/vmx.h
@@ -67,6 +67,7 @@
#define EXIT_REASON_EPT_VIOLATION 48
#define EXIT_REASON_EPT_MISCONFIG 49
#define EXIT_REASON_INVEPT 50
+#define EXIT_REASON_RDTSCP 51
#define EXIT_REASON_PREEMPTION_TIMER 52
#define EXIT_REASON_INVVPID 53
#define EXIT_REASON_WBINVD 54
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index 2f355d2..e5ecd20 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -141,7 +141,46 @@ void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock,
set_normalized_timespec(ts, now.tv_sec, now.tv_nsec);
}
+static struct pvclock_vsyscall_time_info *pvclock_vdso_info;
+
+static struct pvclock_vsyscall_time_info *
+pvclock_get_vsyscall_user_time_info(int cpu)
+{
+ if (!pvclock_vdso_info) {
+ BUG();
+ return NULL;
+ }
+
+ return &pvclock_vdso_info[cpu];
+}
+
+struct pvclock_vcpu_time_info *pvclock_get_vsyscall_time_info(int cpu)
+{
+ return &pvclock_get_vsyscall_user_time_info(cpu)->pvti;
+}
+
#ifdef CONFIG_X86_64
+static int pvclock_task_migrate(struct notifier_block *nb, unsigned long l,
+ void *v)
+{
+ struct task_migration_notifier *mn = v;
+ struct pvclock_vsyscall_time_info *pvti;
+
+ pvti = pvclock_get_vsyscall_user_time_info(mn->from_cpu);
+
+ /* this is NULL when pvclock vsyscall is not initialized */
+ if (unlikely(pvti == NULL))
+ return NOTIFY_DONE;
+
+ pvti->migrate_count++;
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block pvclock_migrate = {
+ .notifier_call = pvclock_task_migrate,
+};
+
/*
* Initialize the generic pvclock vsyscall state. This will allocate
* a/some page(s) for the per-vcpu pvclock information, set up a
@@ -155,12 +194,17 @@ int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i,
WARN_ON (size != PVCLOCK_VSYSCALL_NR_PAGES*PAGE_SIZE);
+ pvclock_vdso_info = i;
+
for (idx = 0; idx <= (PVCLOCK_FIXMAP_END-PVCLOCK_FIXMAP_BEGIN); idx++) {
__set_fixmap(PVCLOCK_FIXMAP_BEGIN + idx,
__pa(i) + (idx*PAGE_SIZE),
PAGE_KERNEL_VVAR);
}
+
+ register_task_migration_notifier(&pvclock_migrate);
+
return 0;
}
#endif
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index 08f790d..16e8f96 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -1,5 +1,5 @@
-ccflags-y += -Ivirt/kvm -Iarch/x86/kvm
+ccflags-y += -Iarch/x86/kvm
CFLAGS_x86.o := -I.
CFLAGS_svm.o := -I.
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 8a80737..59b69f6 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -104,6 +104,9 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
((best->eax & 0xff00) >> 8) != 0)
return -EINVAL;
+ /* Update physical-address width */
+ vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
+
kvm_pmu_cpuid_update(vcpu);
return 0;
}
@@ -135,6 +138,21 @@ static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
}
}
+int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
+ if (!best || best->eax < 0x80000008)
+ goto not_found;
+ best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
+ if (best)
+ return best->eax & 0xff;
+not_found:
+ return 36;
+}
+EXPORT_SYMBOL_GPL(cpuid_query_maxphyaddr);
+
/* when an old userspace process fills a new kernel module */
int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
struct kvm_cpuid *cpuid,
@@ -757,21 +775,6 @@ struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
}
EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
-int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
-{
- struct kvm_cpuid_entry2 *best;
-
- best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
- if (!best || best->eax < 0x80000008)
- goto not_found;
- best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
- if (best)
- return best->eax & 0xff;
-not_found:
- return 36;
-}
-EXPORT_SYMBOL_GPL(cpuid_maxphyaddr);
-
/*
* If no match is found, check whether we exceed the vCPU's limit
* and return the content of the highest valid _standard_ leaf instead.
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index 4452eed..c3b1ad9 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -20,13 +20,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
struct kvm_cpuid_entry2 __user *entries);
void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
+int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
+
+static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.maxphyaddr;
+}
static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
if (!static_cpu_has(X86_FEATURE_XSAVE))
- return 0;
+ return false;
best = kvm_find_cpuid_entry(vcpu, 1, 0);
return best && (best->ecx & bit(X86_FEATURE_XSAVE));
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 106c015..630bcb0 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -248,27 +248,7 @@ struct mode_dual {
struct opcode mode64;
};
-/* EFLAGS bit definitions. */
-#define EFLG_ID (1<<21)
-#define EFLG_VIP (1<<20)
-#define EFLG_VIF (1<<19)
-#define EFLG_AC (1<<18)
-#define EFLG_VM (1<<17)
-#define EFLG_RF (1<<16)
-#define EFLG_IOPL (3<<12)
-#define EFLG_NT (1<<14)
-#define EFLG_OF (1<<11)
-#define EFLG_DF (1<<10)
-#define EFLG_IF (1<<9)
-#define EFLG_TF (1<<8)
-#define EFLG_SF (1<<7)
-#define EFLG_ZF (1<<6)
-#define EFLG_AF (1<<4)
-#define EFLG_PF (1<<2)
-#define EFLG_CF (1<<0)
-
#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
-#define EFLG_RESERVED_ONE_MASK 2
enum x86_transfer_type {
X86_TRANSFER_NONE,
@@ -317,7 +297,8 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
* These EFLAGS bits are restored from saved value during emulation, and
* any changes are written back to the saved value after emulation.
*/
-#define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
+#define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
+ X86_EFLAGS_PF|X86_EFLAGS_CF)
#ifdef CONFIG_X86_64
#define ON64(x) x
@@ -478,6 +459,25 @@ static void assign_masked(ulong *dest, ulong src, ulong mask)
*dest = (*dest & ~mask) | (src & mask);
}
+static void assign_register(unsigned long *reg, u64 val, int bytes)
+{
+ /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
+ switch (bytes) {
+ case 1:
+ *(u8 *)reg = (u8)val;
+ break;
+ case 2:
+ *(u16 *)reg = (u16)val;
+ break;
+ case 4:
+ *reg = (u32)val;
+ break; /* 64b: zero-extend */
+ case 8:
+ *reg = val;
+ break;
+ }
+}
+
static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
{
return (1UL << (ctxt->ad_bytes << 3)) - 1;
@@ -943,6 +943,22 @@ FASTOP2(xadd);
FASTOP2R(cmp, cmp_r);
+static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
+{
+ /* If src is zero, do not writeback, but update flags */
+ if (ctxt->src.val == 0)
+ ctxt->dst.type = OP_NONE;
+ return fastop(ctxt, em_bsf);
+}
+
+static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
+{
+ /* If src is zero, do not writeback, but update flags */
+ if (ctxt->src.val == 0)
+ ctxt->dst.type = OP_NONE;
+ return fastop(ctxt, em_bsr);
+}
+
static u8 test_cc(unsigned int condition, unsigned long flags)
{
u8 rc;
@@ -1399,7 +1415,7 @@ static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
unsigned int in_page, n;
unsigned int count = ctxt->rep_prefix ?
address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
- in_page = (ctxt->eflags & EFLG_DF) ?
+ in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
@@ -1412,7 +1428,7 @@ static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
}
if (ctxt->rep_prefix && (ctxt->d & String) &&
- !(ctxt->eflags & EFLG_DF)) {
+ !(ctxt->eflags & X86_EFLAGS_DF)) {
ctxt->dst.data = rc->data + rc->pos;
ctxt->dst.type = OP_MEM_STR;
ctxt->dst.count = (rc->end - rc->pos) / size;
@@ -1691,21 +1707,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
static void write_register_operand(struct operand *op)
{
- /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
- switch (op->bytes) {
- case 1:
- *(u8 *)op->addr.reg = (u8)op->val;
- break;
- case 2:
- *(u16 *)op->addr.reg = (u16)op->val;
- break;
- case 4:
- *op->addr.reg = (u32)op->val;
- break; /* 64b: zero-extend */
- case 8:
- *op->addr.reg = op->val;
- break;
- }
+ return assign_register(op->addr.reg, op->val, op->bytes);
}
static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
@@ -1792,32 +1794,34 @@ static int emulate_popf(struct x86_emulate_ctxt *ctxt,
{
int rc;
unsigned long val, change_mask;
- int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
+ int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
int cpl = ctxt->ops->cpl(ctxt);
rc = emulate_pop(ctxt, &val, len);
if (rc != X86EMUL_CONTINUE)
return rc;
- change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
- | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID;
+ change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
+ X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
+ X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
+ X86_EFLAGS_AC | X86_EFLAGS_ID;
switch(ctxt->mode) {
case X86EMUL_MODE_PROT64:
case X86EMUL_MODE_PROT32:
case X86EMUL_MODE_PROT16:
if (cpl == 0)
- change_mask |= EFLG_IOPL;
+ change_mask |= X86_EFLAGS_IOPL;
if (cpl <= iopl)
- change_mask |= EFLG_IF;
+ change_mask |= X86_EFLAGS_IF;
break;
case X86EMUL_MODE_VM86:
if (iopl < 3)
return emulate_gp(ctxt, 0);
- change_mask |= EFLG_IF;
+ change_mask |= X86_EFLAGS_IF;
break;
default: /* real mode */
- change_mask |= (EFLG_IOPL | EFLG_IF);
+ change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
break;
}
@@ -1918,7 +1922,7 @@ static int em_pusha(struct x86_emulate_ctxt *ctxt)
static int em_pushf(struct x86_emulate_ctxt *ctxt)
{
- ctxt->src.val = (unsigned long)ctxt->eflags & ~EFLG_VM;
+ ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
return em_push(ctxt);
}
@@ -1926,6 +1930,7 @@ static int em_popa(struct x86_emulate_ctxt *ctxt)
{
int rc = X86EMUL_CONTINUE;
int reg = VCPU_REGS_RDI;
+ u32 val;
while (reg >= VCPU_REGS_RAX) {
if (reg == VCPU_REGS_RSP) {
@@ -1933,9 +1938,10 @@ static int em_popa(struct x86_emulate_ctxt *ctxt)
--reg;
}
- rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
+ rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
break;
+ assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
--reg;
}
return rc;
@@ -1956,7 +1962,7 @@ static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
if (rc != X86EMUL_CONTINUE)
return rc;
- ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
+ ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
rc = em_push(ctxt);
@@ -2022,10 +2028,14 @@ static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
unsigned long temp_eip = 0;
unsigned long temp_eflags = 0;
unsigned long cs = 0;
- unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
- EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
- EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
- unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
+ unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
+ X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
+ X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
+ X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
+ X86_EFLAGS_AC | X86_EFLAGS_ID |
+ X86_EFLAGS_FIXED;
+ unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
+ X86_EFLAGS_VIP;
/* TODO: Add stack limit check */
@@ -2054,7 +2064,6 @@ static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
ctxt->_eip = temp_eip;
-
if (ctxt->op_bytes == 4)
ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
else if (ctxt->op_bytes == 2) {
@@ -2063,7 +2072,7 @@ static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
}
ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
- ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
+ ctxt->eflags |= X86_EFLAGS_FIXED;
ctxt->ops->set_nmi_mask(ctxt, false);
return rc;
@@ -2145,12 +2154,12 @@ static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
*reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
*reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
- ctxt->eflags &= ~EFLG_ZF;
+ ctxt->eflags &= ~X86_EFLAGS_ZF;
} else {
ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
(u32) reg_read(ctxt, VCPU_REGS_RBX);
- ctxt->eflags |= EFLG_ZF;
+ ctxt->eflags |= X86_EFLAGS_ZF;
}
return X86EMUL_CONTINUE;
}
@@ -2222,7 +2231,7 @@ static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
ctxt->src.val = ctxt->dst.orig_val;
fastop(ctxt, em_cmp);
- if (ctxt->eflags & EFLG_ZF) {
+ if (ctxt->eflags & X86_EFLAGS_ZF) {
/* Success: write back to memory; no update of EAX */
ctxt->src.type = OP_NONE;
ctxt->dst.val = ctxt->src.orig_val;
@@ -2381,14 +2390,14 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt)
ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
ctxt->eflags &= ~msr_data;
- ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
+ ctxt->eflags |= X86_EFLAGS_FIXED;
#endif
} else {
/* legacy mode */
ops->get_msr(ctxt, MSR_STAR, &msr_data);
ctxt->_eip = (u32)msr_data;
- ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
+ ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
}
return X86EMUL_CONTINUE;
@@ -2425,8 +2434,8 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
if ((msr_data & 0xfffc) == 0x0)
return emulate_gp(ctxt, 0);
- ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
- cs_sel = (u16)msr_data & ~SELECTOR_RPL_MASK;
+ ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
+ cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
ss_sel = cs_sel + 8;
if (efer & EFER_LMA) {
cs.d = 0;
@@ -2493,8 +2502,8 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
return emulate_gp(ctxt, 0);
break;
}
- cs_sel |= SELECTOR_RPL_MASK;
- ss_sel |= SELECTOR_RPL_MASK;
+ cs_sel |= SEGMENT_RPL_MASK;
+ ss_sel |= SEGMENT_RPL_MASK;
ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
@@ -2512,7 +2521,7 @@ static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
return false;
if (ctxt->mode == X86EMUL_MODE_VM86)
return true;
- iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
+ iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
return ctxt->ops->cpl(ctxt) > iopl;
}
@@ -2782,10 +2791,8 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
return ret;
ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
- if (ret != X86EMUL_CONTINUE)
- return ret;
- return X86EMUL_CONTINUE;
+ return ret;
}
static int task_switch_32(struct x86_emulate_ctxt *ctxt,
@@ -2954,7 +2961,7 @@ int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
struct operand *op)
{
- int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
+ int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
register_address_increment(ctxt, reg, df * op->bytes);
op->addr.mem.ea = register_address(ctxt, reg);
@@ -3323,7 +3330,7 @@ static int em_clts(struct x86_emulate_ctxt *ctxt)
return X86EMUL_CONTINUE;
}
-static int em_vmcall(struct x86_emulate_ctxt *ctxt)
+static int em_hypercall(struct x86_emulate_ctxt *ctxt)
{
int rc = ctxt->ops->fix_hypercall(ctxt);
@@ -3395,17 +3402,6 @@ static int em_lgdt(struct x86_emulate_ctxt *ctxt)
return em_lgdt_lidt(ctxt, true);
}
-static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
-{
- int rc;
-
- rc = ctxt->ops->fix_hypercall(ctxt);
-
- /* Disable writeback. */
- ctxt->dst.type = OP_NONE;
- return rc;
-}
-
static int em_lidt(struct x86_emulate_ctxt *ctxt)
{
return em_lgdt_lidt(ctxt, false);
@@ -3504,7 +3500,8 @@ static int em_sahf(struct x86_emulate_ctxt *ctxt)
{
u32 flags;
- flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
+ flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
+ X86_EFLAGS_SF;
flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
ctxt->eflags &= ~0xffUL;
@@ -3769,7 +3766,7 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt)
static const struct opcode group7_rm0[] = {
N,
- I(SrcNone | Priv | EmulateOnUD, em_vmcall),
+ I(SrcNone | Priv | EmulateOnUD, em_hypercall),
N, N, N, N, N, N,
};
@@ -3781,7 +3778,7 @@ static const struct opcode group7_rm1[] = {
static const struct opcode group7_rm3[] = {
DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
- II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall),
+ II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
DIP(SrcNone | Prot | Priv, stgi, check_svme),
@@ -4192,7 +4189,8 @@ static const struct opcode twobyte_table[256] = {
N, N,
G(BitOp, group8),
F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
- F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
+ I(DstReg | SrcMem | ModRM, em_bsf_c),
+ I(DstReg | SrcMem | ModRM, em_bsr_c),
D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
/* 0xC0 - 0xC7 */
F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
@@ -4759,9 +4757,9 @@ static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
(ctxt->b == 0xae) || (ctxt->b == 0xaf))
&& (((ctxt->rep_prefix == REPE_PREFIX) &&
- ((ctxt->eflags & EFLG_ZF) == 0))
+ ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
|| ((ctxt->rep_prefix == REPNE_PREFIX) &&
- ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
+ ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
return true;
return false;
@@ -4913,7 +4911,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
/* All REP prefixes have the same first termination condition */
if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
ctxt->eip = ctxt->_eip;
- ctxt->eflags &= ~EFLG_RF;
+ ctxt->eflags &= ~X86_EFLAGS_RF;
goto done;
}
}
@@ -4963,9 +4961,9 @@ special_insn:
}
if (ctxt->rep_prefix && (ctxt->d & String))
- ctxt->eflags |= EFLG_RF;
+ ctxt->eflags |= X86_EFLAGS_RF;
else
- ctxt->eflags &= ~EFLG_RF;
+ ctxt->eflags &= ~X86_EFLAGS_RF;
if (ctxt->execute) {
if (ctxt->d & Fastop) {
@@ -5014,7 +5012,7 @@ special_insn:
rc = emulate_int(ctxt, ctxt->src.val);
break;
case 0xce: /* into */
- if (ctxt->eflags & EFLG_OF)
+ if (ctxt->eflags & X86_EFLAGS_OF)
rc = emulate_int(ctxt, 4);
break;
case 0xe9: /* jmp rel */
@@ -5027,19 +5025,19 @@ special_insn:
break;
case 0xf5: /* cmc */
/* complement carry flag from eflags reg */
- ctxt->eflags ^= EFLG_CF;
+ ctxt->eflags ^= X86_EFLAGS_CF;
break;
case 0xf8: /* clc */
- ctxt->eflags &= ~EFLG_CF;
+ ctxt->eflags &= ~X86_EFLAGS_CF;
break;
case 0xf9: /* stc */
- ctxt->eflags |= EFLG_CF;
+ ctxt->eflags |= X86_EFLAGS_CF;
break;
case 0xfc: /* cld */
- ctxt->eflags &= ~EFLG_DF;
+ ctxt->eflags &= ~X86_EFLAGS_DF;
break;
case 0xfd: /* std */
- ctxt->eflags |= EFLG_DF;
+ ctxt->eflags |= X86_EFLAGS_DF;
break;
default:
goto cannot_emulate;
@@ -5100,7 +5098,7 @@ writeback:
}
goto done; /* skip rip writeback */
}
- ctxt->eflags &= ~EFLG_RF;
+ ctxt->eflags &= ~X86_EFLAGS_RF;
}
ctxt->eip = ctxt->_eip;
@@ -5137,8 +5135,7 @@ twobyte_insn:
case 0x40 ... 0x4f: /* cmov */
if (test_cc(ctxt->b, ctxt->eflags))
ctxt->dst.val = ctxt->src.val;
- else if (ctxt->mode != X86EMUL_MODE_PROT64 ||
- ctxt->op_bytes != 4)
+ else if (ctxt->op_bytes != 4)
ctxt->dst.type = OP_NONE; /* no writeback */
break;
case 0x80 ... 0x8f: /* jnz rel, etc*/
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 298781d..4dce6f8 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -443,7 +443,8 @@ static inline int pit_in_range(gpa_t addr)
(addr < KVM_PIT_BASE_ADDRESS + KVM_PIT_MEM_LENGTH));
}
-static int pit_ioport_write(struct kvm_io_device *this,
+static int pit_ioport_write(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *this,
gpa_t addr, int len, const void *data)
{
struct kvm_pit *pit = dev_to_pit(this);
@@ -519,7 +520,8 @@ static int pit_ioport_write(struct kvm_io_device *this,
return 0;
}
-static int pit_ioport_read(struct kvm_io_device *this,
+static int pit_ioport_read(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *this,
gpa_t addr, int len, void *data)
{
struct kvm_pit *pit = dev_to_pit(this);
@@ -589,7 +591,8 @@ static int pit_ioport_read(struct kvm_io_device *this,
return 0;
}
-static int speaker_ioport_write(struct kvm_io_device *this,
+static int speaker_ioport_write(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *this,
gpa_t addr, int len, const void *data)
{
struct kvm_pit *pit = speaker_to_pit(this);
@@ -606,8 +609,9 @@ static int speaker_ioport_write(struct kvm_io_device *this,
return 0;
}
-static int speaker_ioport_read(struct kvm_io_device *this,
- gpa_t addr, int len, void *data)
+static int speaker_ioport_read(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *this,
+ gpa_t addr, int len, void *data)
{
struct kvm_pit *pit = speaker_to_pit(this);
struct kvm_kpit_state *pit_state = &pit->pit_state;
diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h
index dd1b16b..c84990b 100644
--- a/arch/x86/kvm/i8254.h
+++ b/arch/x86/kvm/i8254.h
@@ -3,7 +3,7 @@
#include <linux/kthread.h>
-#include "iodev.h"
+#include <kvm/iodev.h>
struct kvm_kpit_channel_state {
u32 count; /* can be 65536 */
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index 9541ba3..fef922f 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -529,42 +529,42 @@ static int picdev_read(struct kvm_pic *s,
return 0;
}
-static int picdev_master_write(struct kvm_io_device *dev,
+static int picdev_master_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
gpa_t addr, int len, const void *val)
{
return picdev_write(container_of(dev, struct kvm_pic, dev_master),
addr, len, val);
}
-static int picdev_master_read(struct kvm_io_device *dev,
+static int picdev_master_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
gpa_t addr, int len, void *val)
{
return picdev_read(container_of(dev, struct kvm_pic, dev_master),
addr, len, val);
}
-static int picdev_slave_write(struct kvm_io_device *dev,
+static int picdev_slave_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
gpa_t addr, int len, const void *val)
{
return picdev_write(container_of(dev, struct kvm_pic, dev_slave),
addr, len, val);
}
-static int picdev_slave_read(struct kvm_io_device *dev,
+static int picdev_slave_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
gpa_t addr, int len, void *val)
{
return picdev_read(container_of(dev, struct kvm_pic, dev_slave),
addr, len, val);
}
-static int picdev_eclr_write(struct kvm_io_device *dev,
+static int picdev_eclr_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
gpa_t addr, int len, const void *val)
{
return picdev_write(container_of(dev, struct kvm_pic, dev_eclr),
addr, len, val);
}
-static int picdev_eclr_read(struct kvm_io_device *dev,
+static int picdev_eclr_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
gpa_t addr, int len, void *val)
{
return picdev_read(container_of(dev, struct kvm_pic, dev_eclr),
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index 46d4449..28146f0 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -206,6 +206,8 @@ static int ioapic_set_irq(struct kvm_ioapic *ioapic, unsigned int irq,
old_irr = ioapic->irr;
ioapic->irr |= mask;
+ if (edge)
+ ioapic->irr_delivered &= ~mask;
if ((edge && old_irr == ioapic->irr) ||
(!edge && entry.fields.remote_irr)) {
ret = 0;
@@ -349,7 +351,7 @@ static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status)
irqe.shorthand = 0;
if (irqe.trig_mode == IOAPIC_EDGE_TRIG)
- ioapic->irr &= ~(1 << irq);
+ ioapic->irr_delivered |= 1 << irq;
if (irq == RTC_GSI && line_status) {
/*
@@ -473,13 +475,6 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
}
}
-bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector)
-{
- struct kvm_ioapic *ioapic = kvm->arch.vioapic;
- smp_rmb();
- return test_bit(vector, ioapic->handled_vectors);
-}
-
void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, int trigger_mode)
{
struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
@@ -500,8 +495,8 @@ static inline int ioapic_in_range(struct kvm_ioapic *ioapic, gpa_t addr)
(addr < ioapic->base_address + IOAPIC_MEM_LENGTH)));
}
-static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len,
- void *val)
+static int ioapic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
+ gpa_t addr, int len, void *val)
{
struct kvm_ioapic *ioapic = to_ioapic(this);
u32 result;
@@ -543,8 +538,8 @@ static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len,
return 0;
}
-static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
- const void *val)
+static int ioapic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
+ gpa_t addr, int len, const void *val)
{
struct kvm_ioapic *ioapic = to_ioapic(this);
u32 data;
@@ -599,6 +594,7 @@ static void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS;
ioapic->ioregsel = 0;
ioapic->irr = 0;
+ ioapic->irr_delivered = 0;
ioapic->id = 0;
memset(ioapic->irq_eoi, 0x00, IOAPIC_NUM_PINS);
rtc_irq_eoi_tracking_reset(ioapic);
@@ -656,6 +652,7 @@ int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
spin_lock(&ioapic->lock);
memcpy(state, ioapic, sizeof(struct kvm_ioapic_state));
+ state->irr &= ~ioapic->irr_delivered;
spin_unlock(&ioapic->lock);
return 0;
}
@@ -669,6 +666,7 @@ int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
spin_lock(&ioapic->lock);
memcpy(ioapic, state, sizeof(struct kvm_ioapic_state));
ioapic->irr = 0;
+ ioapic->irr_delivered = 0;
update_handled_vectors(ioapic);
kvm_vcpu_request_scan_ioapic(kvm);
kvm_ioapic_inject_all(ioapic, state->irr);
diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h
index c2e36d9..ca0b0b4 100644
--- a/arch/x86/kvm/ioapic.h
+++ b/arch/x86/kvm/ioapic.h
@@ -3,7 +3,7 @@
#include <linux/kvm_host.h>
-#include "iodev.h"
+#include <kvm/iodev.h>
struct kvm;
struct kvm_vcpu;
@@ -77,6 +77,7 @@ struct kvm_ioapic {
struct rtc_status rtc_status;
struct delayed_work eoi_inject;
u32 irq_eoi[IOAPIC_NUM_PINS];
+ u32 irr_delivered;
};
#ifdef DEBUG
@@ -97,13 +98,19 @@ static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm)
return kvm->arch.vioapic;
}
+static inline bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector)
+{
+ struct kvm_ioapic *ioapic = kvm->arch.vioapic;
+ smp_rmb();
+ return test_bit(vector, ioapic->handled_vectors);
+}
+
void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu);
bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
int short_hand, unsigned int dest, int dest_mode);
int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2);
void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector,
int trigger_mode);
-bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector);
int kvm_ioapic_init(struct kvm *kvm);
void kvm_ioapic_destroy(struct kvm *kvm);
int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
index 2d03568..ad68c73 100644
--- a/arch/x86/kvm/irq.h
+++ b/arch/x86/kvm/irq.h
@@ -27,7 +27,7 @@
#include <linux/kvm_host.h>
#include <linux/spinlock.h>
-#include "iodev.h"
+#include <kvm/iodev.h>
#include "ioapic.h"
#include "lapic.h"
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 4ee827d..d67206a 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -133,6 +133,28 @@ static inline int kvm_apic_id(struct kvm_lapic *apic)
return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff;
}
+/* The logical map is definitely wrong if we have multiple
+ * modes at the same time. (Physical map is always right.)
+ */
+static inline bool kvm_apic_logical_map_valid(struct kvm_apic_map *map)
+{
+ return !(map->mode & (map->mode - 1));
+}
+
+static inline void
+apic_logical_id(struct kvm_apic_map *map, u32 dest_id, u16 *cid, u16 *lid)
+{
+ unsigned lid_bits;
+
+ BUILD_BUG_ON(KVM_APIC_MODE_XAPIC_CLUSTER != 4);
+ BUILD_BUG_ON(KVM_APIC_MODE_XAPIC_FLAT != 8);
+ BUILD_BUG_ON(KVM_APIC_MODE_X2APIC != 16);
+ lid_bits = map->mode;
+
+ *cid = dest_id >> lid_bits;
+ *lid = dest_id & ((1 << lid_bits) - 1);
+}
+
static void recalculate_apic_map(struct kvm *kvm)
{
struct kvm_apic_map *new, *old = NULL;
@@ -146,48 +168,6 @@ static void recalculate_apic_map(struct kvm *kvm)
if (!new)
goto out;
- new->ldr_bits = 8;
- /* flat mode is default */
- new->cid_shift = 8;
- new->cid_mask = 0;
- new->lid_mask = 0xff;
- new->broadcast = APIC_BROADCAST;
-
- kvm_for_each_vcpu(i, vcpu, kvm) {
- struct kvm_lapic *apic = vcpu->arch.apic;
-
- if (!kvm_apic_present(vcpu))
- continue;
-
- if (apic_x2apic_mode(apic)) {
- new->ldr_bits = 32;
- new->cid_shift = 16;
- new->cid_mask = new->lid_mask = 0xffff;
- new->broadcast = X2APIC_BROADCAST;
- } else if (kvm_apic_get_reg(apic, APIC_LDR)) {
- if (kvm_apic_get_reg(apic, APIC_DFR) ==
- APIC_DFR_CLUSTER) {
- new->cid_shift = 4;
- new->cid_mask = 0xf;
- new->lid_mask = 0xf;
- } else {
- new->cid_shift = 8;
- new->cid_mask = 0;
- new->lid_mask = 0xff;
- }
- }
-
- /*
- * All APICs have to be configured in the same mode by an OS.
- * We take advatage of this while building logical id loockup
- * table. After reset APICs are in software disabled mode, so if
- * we find apic with different setting we assume this is the mode
- * OS wants all apics to be in; build lookup table accordingly.
- */
- if (kvm_apic_sw_enabled(apic))
- break;
- }
-
kvm_for_each_vcpu(i, vcpu, kvm) {
struct kvm_lapic *apic = vcpu->arch.apic;
u16 cid, lid;
@@ -198,11 +178,25 @@ static void recalculate_apic_map(struct kvm *kvm)
aid = kvm_apic_id(apic);
ldr = kvm_apic_get_reg(apic, APIC_LDR);
- cid = apic_cluster_id(new, ldr);
- lid = apic_logical_id(new, ldr);
if (aid < ARRAY_SIZE(new->phys_map))
new->phys_map[aid] = apic;
+
+ if (apic_x2apic_mode(apic)) {
+ new->mode |= KVM_APIC_MODE_X2APIC;
+ } else if (ldr) {
+ ldr = GET_APIC_LOGICAL_ID(ldr);
+ if (kvm_apic_get_reg(apic, APIC_DFR) == APIC_DFR_FLAT)
+ new->mode |= KVM_APIC_MODE_XAPIC_FLAT;
+ else
+ new->mode |= KVM_APIC_MODE_XAPIC_CLUSTER;
+ }
+
+ if (!kvm_apic_logical_map_valid(new))
+ continue;
+
+ apic_logical_id(new, ldr, &cid, &lid);
+
if (lid && cid < ARRAY_SIZE(new->logical_map))
new->logical_map[cid][ffs(lid) - 1] = apic;
}
@@ -588,15 +582,23 @@ static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
apic_update_ppr(apic);
}
-static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 dest)
+static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda)
{
- return dest == (apic_x2apic_mode(apic) ?
- X2APIC_BROADCAST : APIC_BROADCAST);
+ if (apic_x2apic_mode(apic))
+ return mda == X2APIC_BROADCAST;
+
+ return GET_APIC_DEST_FIELD(mda) == APIC_BROADCAST;
}
-static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 dest)
+static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda)
{
- return kvm_apic_id(apic) == dest || kvm_apic_broadcast(apic, dest);
+ if (kvm_apic_broadcast(apic, mda))
+ return true;
+
+ if (apic_x2apic_mode(apic))
+ return mda == kvm_apic_id(apic);
+
+ return mda == SET_APIC_DEST_FIELD(kvm_apic_id(apic));
}
static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
@@ -613,6 +615,7 @@ static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
&& (logical_id & mda & 0xffff) != 0;
logical_id = GET_APIC_LOGICAL_ID(logical_id);
+ mda = GET_APIC_DEST_FIELD(mda);
switch (kvm_apic_get_reg(apic, APIC_DFR)) {
case APIC_DFR_FLAT:
@@ -627,10 +630,27 @@ static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
}
}
+/* KVM APIC implementation has two quirks
+ * - dest always begins at 0 while xAPIC MDA has offset 24,
+ * - IOxAPIC messages have to be delivered (directly) to x2APIC.
+ */
+static u32 kvm_apic_mda(unsigned int dest_id, struct kvm_lapic *source,
+ struct kvm_lapic *target)
+{
+ bool ipi = source != NULL;
+ bool x2apic_mda = apic_x2apic_mode(ipi ? source : target);
+
+ if (!ipi && dest_id == APIC_BROADCAST && x2apic_mda)
+ return X2APIC_BROADCAST;
+
+ return x2apic_mda ? dest_id : SET_APIC_DEST_FIELD(dest_id);
+}
+
bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
int short_hand, unsigned int dest, int dest_mode)
{
struct kvm_lapic *target = vcpu->arch.apic;
+ u32 mda = kvm_apic_mda(dest, source, target);
apic_debug("target %p, source %p, dest 0x%x, "
"dest_mode 0x%x, short_hand 0x%x\n",
@@ -640,9 +660,9 @@ bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
switch (short_hand) {
case APIC_DEST_NOSHORT:
if (dest_mode == APIC_DEST_PHYSICAL)
- return kvm_apic_match_physical_addr(target, dest);
+ return kvm_apic_match_physical_addr(target, mda);
else
- return kvm_apic_match_logical_addr(target, dest);
+ return kvm_apic_match_logical_addr(target, mda);
case APIC_DEST_SELF:
return target == source;
case APIC_DEST_ALLINC:
@@ -664,6 +684,7 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
struct kvm_lapic **dst;
int i;
bool ret = false;
+ bool x2apic_ipi = src && apic_x2apic_mode(src);
*r = -1;
@@ -675,15 +696,15 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
if (irq->shorthand)
return false;
+ if (irq->dest_id == (x2apic_ipi ? X2APIC_BROADCAST : APIC_BROADCAST))
+ return false;
+
rcu_read_lock();
map = rcu_dereference(kvm->arch.apic_map);
if (!map)
goto out;
- if (irq->dest_id == map->broadcast)
- goto out;
-
ret = true;
if (irq->dest_mode == APIC_DEST_PHYSICAL) {
@@ -692,16 +713,20 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
dst = &map->phys_map[irq->dest_id];
} else {
- u32 mda = irq->dest_id << (32 - map->ldr_bits);
- u16 cid = apic_cluster_id(map, mda);
+ u16 cid;
+
+ if (!kvm_apic_logical_map_valid(map)) {
+ ret = false;
+ goto out;
+ }
+
+ apic_logical_id(map, irq->dest_id, &cid, (u16 *)&bitmap);
if (cid >= ARRAY_SIZE(map->logical_map))
goto out;
dst = map->logical_map[cid];
- bitmap = apic_logical_id(map, mda);
-
if (irq->delivery_mode == APIC_DM_LOWEST) {
int l = -1;
for_each_set_bit(i, &bitmap, 16) {
@@ -1037,7 +1062,7 @@ static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
addr < apic->base_address + LAPIC_MMIO_LENGTH;
}
-static int apic_mmio_read(struct kvm_io_device *this,
+static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
gpa_t address, int len, void *data)
{
struct kvm_lapic *apic = to_lapic(this);
@@ -1357,7 +1382,7 @@ static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
return ret;
}
-static int apic_mmio_write(struct kvm_io_device *this,
+static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
gpa_t address, int len, const void *data)
{
struct kvm_lapic *apic = to_lapic(this);
@@ -1497,8 +1522,6 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
return;
}
- if (!kvm_vcpu_is_bsp(apic->vcpu))
- value &= ~MSR_IA32_APICBASE_BSP;
vcpu->arch.apic_base = value;
/* update jump label if enable bit changes */
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 0bc6c65..9d28383 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -1,7 +1,7 @@
#ifndef __KVM_X86_LAPIC_H
#define __KVM_X86_LAPIC_H
-#include "iodev.h"
+#include <kvm/iodev.h>
#include <linux/kvm_host.h>
@@ -148,21 +148,6 @@ static inline bool kvm_apic_vid_enabled(struct kvm *kvm)
return kvm_x86_ops->vm_has_apicv(kvm);
}
-static inline u16 apic_cluster_id(struct kvm_apic_map *map, u32 ldr)
-{
- u16 cid;
- ldr >>= 32 - map->ldr_bits;
- cid = (ldr >> map->cid_shift) & map->cid_mask;
-
- return cid;
-}
-
-static inline u16 apic_logical_id(struct kvm_apic_map *map, u32 ldr)
-{
- ldr >>= (32 - map->ldr_bits);
- return ldr & map->lid_mask;
-}
-
static inline bool kvm_apic_has_events(struct kvm_vcpu *vcpu)
{
return vcpu->arch.apic->pending_events;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index cee7592..146f295 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4465,6 +4465,79 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
kvm_flush_remote_tlbs(kvm);
}
+static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
+ unsigned long *rmapp)
+{
+ u64 *sptep;
+ struct rmap_iterator iter;
+ int need_tlb_flush = 0;
+ pfn_t pfn;
+ struct kvm_mmu_page *sp;
+
+ for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
+ BUG_ON(!(*sptep & PT_PRESENT_MASK));
+
+ sp = page_header(__pa(sptep));
+ pfn = spte_to_pfn(*sptep);
+
+ /*
+ * Only EPT supported for now; otherwise, one would need to
+ * find out efficiently whether the guest page tables are
+ * also using huge pages.
+ */
+ if (sp->role.direct &&
+ !kvm_is_reserved_pfn(pfn) &&
+ PageTransCompound(pfn_to_page(pfn))) {
+ drop_spte(kvm, sptep);
+ sptep = rmap_get_first(*rmapp, &iter);
+ need_tlb_flush = 1;
+ } else
+ sptep = rmap_get_next(&iter);
+ }
+
+ return need_tlb_flush;
+}
+
+void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
+ struct kvm_memory_slot *memslot)
+{
+ bool flush = false;
+ unsigned long *rmapp;
+ unsigned long last_index, index;
+ gfn_t gfn_start, gfn_end;
+
+ spin_lock(&kvm->mmu_lock);
+
+ gfn_start = memslot->base_gfn;
+ gfn_end = memslot->base_gfn + memslot->npages - 1;
+
+ if (gfn_start >= gfn_end)
+ goto out;
+
+ rmapp = memslot->arch.rmap[0];
+ last_index = gfn_to_index(gfn_end, memslot->base_gfn,
+ PT_PAGE_TABLE_LEVEL);
+
+ for (index = 0; index <= last_index; ++index, ++rmapp) {
+ if (*rmapp)
+ flush |= kvm_mmu_zap_collapsible_spte(kvm, rmapp);
+
+ if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
+ if (flush) {
+ kvm_flush_remote_tlbs(kvm);
+ flush = false;
+ }
+ cond_resched_lock(&kvm->mmu_lock);
+ }
+ }
+
+ if (flush)
+ kvm_flush_remote_tlbs(kvm);
+
+out:
+ spin_unlock(&kvm->mmu_lock);
+}
+
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
struct kvm_memory_slot *memslot)
{
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 8e6b7d8..29fbf9d 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -38,7 +38,7 @@ static struct kvm_arch_event_perf_mapping {
};
/* mapping between fixed pmc index and arch_events array */
-int fixed_pmc_events[] = {1, 0, 7};
+static int fixed_pmc_events[] = {1, 0, 7};
static bool pmc_is_gp(struct kvm_pmc *pmc)
{
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index cc618c8..ce741b8 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1261,7 +1261,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
MSR_IA32_APICBASE_ENABLE;
- if (kvm_vcpu_is_bsp(&svm->vcpu))
+ if (kvm_vcpu_is_reset_bsp(&svm->vcpu))
svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
svm_init_osvw(&svm->vcpu);
@@ -1929,14 +1929,12 @@ static int nop_on_interception(struct vcpu_svm *svm)
static int halt_interception(struct vcpu_svm *svm)
{
svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
- skip_emulated_instruction(&svm->vcpu);
return kvm_emulate_halt(&svm->vcpu);
}
static int vmmcall_interception(struct vcpu_svm *svm)
{
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
- skip_emulated_instruction(&svm->vcpu);
kvm_emulate_hypercall(&svm->vcpu);
return 1;
}
@@ -2757,11 +2755,11 @@ static int invlpga_interception(struct vcpu_svm *svm)
{
struct kvm_vcpu *vcpu = &svm->vcpu;
- trace_kvm_invlpga(svm->vmcb->save.rip, vcpu->arch.regs[VCPU_REGS_RCX],
- vcpu->arch.regs[VCPU_REGS_RAX]);
+ trace_kvm_invlpga(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RCX),
+ kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
/* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
- kvm_mmu_invlpg(vcpu, vcpu->arch.regs[VCPU_REGS_RAX]);
+ kvm_mmu_invlpg(vcpu, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
skip_emulated_instruction(&svm->vcpu);
@@ -2770,12 +2768,18 @@ static int invlpga_interception(struct vcpu_svm *svm)
static int skinit_interception(struct vcpu_svm *svm)
{
- trace_kvm_skinit(svm->vmcb->save.rip, svm->vcpu.arch.regs[VCPU_REGS_RAX]);
+ trace_kvm_skinit(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
kvm_queue_exception(&svm->vcpu, UD_VECTOR);
return 1;
}
+static int wbinvd_interception(struct vcpu_svm *svm)
+{
+ kvm_emulate_wbinvd(&svm->vcpu);
+ return 1;
+}
+
static int xsetbv_interception(struct vcpu_svm *svm)
{
u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
@@ -2902,7 +2906,8 @@ static int rdpmc_interception(struct vcpu_svm *svm)
return 1;
}
-bool check_selective_cr0_intercepted(struct vcpu_svm *svm, unsigned long val)
+static bool check_selective_cr0_intercepted(struct vcpu_svm *svm,
+ unsigned long val)
{
unsigned long cr0 = svm->vcpu.arch.cr0;
bool ret = false;
@@ -2940,7 +2945,10 @@ static int cr_interception(struct vcpu_svm *svm)
return emulate_on_interception(svm);
reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
- cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
+ if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE)
+ cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0;
+ else
+ cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
err = 0;
if (cr >= 16) { /* mov to cr */
@@ -3133,7 +3141,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
static int rdmsr_interception(struct vcpu_svm *svm)
{
- u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
+ u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
u64 data;
if (svm_get_msr(&svm->vcpu, ecx, &data)) {
@@ -3142,8 +3150,8 @@ static int rdmsr_interception(struct vcpu_svm *svm)
} else {
trace_kvm_msr_read(ecx, data);
- svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff;
- svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
+ kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, data & 0xffffffff);
+ kvm_register_write(&svm->vcpu, VCPU_REGS_RDX, data >> 32);
svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
skip_emulated_instruction(&svm->vcpu);
}
@@ -3246,9 +3254,8 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
static int wrmsr_interception(struct vcpu_svm *svm)
{
struct msr_data msr;
- u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
- u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
- | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
+ u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
+ u64 data = kvm_read_edx_eax(&svm->vcpu);
msr.data = data;
msr.index = ecx;
@@ -3325,7 +3332,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
[SVM_EXIT_READ_CR3] = cr_interception,
[SVM_EXIT_READ_CR4] = cr_interception,
[SVM_EXIT_READ_CR8] = cr_interception,
- [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception,
+ [SVM_EXIT_CR0_SEL_WRITE] = cr_interception,
[SVM_EXIT_WRITE_CR0] = cr_interception,
[SVM_EXIT_WRITE_CR3] = cr_interception,
[SVM_EXIT_WRITE_CR4] = cr_interception,
@@ -3376,7 +3383,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
[SVM_EXIT_STGI] = stgi_interception,
[SVM_EXIT_CLGI] = clgi_interception,
[SVM_EXIT_SKINIT] = skinit_interception,
- [SVM_EXIT_WBINVD] = emulate_on_interception,
+ [SVM_EXIT_WBINVD] = wbinvd_interception,
[SVM_EXIT_MONITOR] = monitor_interception,
[SVM_EXIT_MWAIT] = mwait_interception,
[SVM_EXIT_XSETBV] = xsetbv_interception,
@@ -3555,7 +3562,7 @@ static int handle_exit(struct kvm_vcpu *vcpu)
if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
|| !svm_exit_handlers[exit_code]) {
- WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_code);
+ WARN_ONCE(1, "svm: unexpected exit reason 0x%x\n", exit_code);
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index ae4f6d3..f5e8dce 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2470,6 +2470,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
vmx->nested.nested_vmx_secondary_ctls_low = 0;
vmx->nested.nested_vmx_secondary_ctls_high &=
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+ SECONDARY_EXEC_RDTSCP |
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
SECONDARY_EXEC_APIC_REGISTER_VIRT |
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
@@ -3268,8 +3269,8 @@ static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg,
* default value.
*/
if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS)
- save->selector &= ~SELECTOR_RPL_MASK;
- save->dpl = save->selector & SELECTOR_RPL_MASK;
+ save->selector &= ~SEGMENT_RPL_MASK;
+ save->dpl = save->selector & SEGMENT_RPL_MASK;
save->s = 1;
}
vmx_set_segment(vcpu, save, seg);
@@ -3842,7 +3843,7 @@ static bool code_segment_valid(struct kvm_vcpu *vcpu)
unsigned int cs_rpl;
vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
- cs_rpl = cs.selector & SELECTOR_RPL_MASK;
+ cs_rpl = cs.selector & SEGMENT_RPL_MASK;
if (cs.unusable)
return false;
@@ -3870,7 +3871,7 @@ static bool stack_segment_valid(struct kvm_vcpu *vcpu)
unsigned int ss_rpl;
vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
- ss_rpl = ss.selector & SELECTOR_RPL_MASK;
+ ss_rpl = ss.selector & SEGMENT_RPL_MASK;
if (ss.unusable)
return true;
@@ -3892,7 +3893,7 @@ static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg)
unsigned int rpl;
vmx_get_segment(vcpu, &var, seg);
- rpl = var.selector & SELECTOR_RPL_MASK;
+ rpl = var.selector & SEGMENT_RPL_MASK;
if (var.unusable)
return true;
@@ -3919,7 +3920,7 @@ static bool tr_valid(struct kvm_vcpu *vcpu)
if (tr.unusable)
return false;
- if (tr.selector & SELECTOR_TI_MASK) /* TI = 1 */
+ if (tr.selector & SEGMENT_TI_MASK) /* TI = 1 */
return false;
if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */
return false;
@@ -3937,7 +3938,7 @@ static bool ldtr_valid(struct kvm_vcpu *vcpu)
if (ldtr.unusable)
return true;
- if (ldtr.selector & SELECTOR_TI_MASK) /* TI = 1 */
+ if (ldtr.selector & SEGMENT_TI_MASK) /* TI = 1 */
return false;
if (ldtr.type != 2)
return false;
@@ -3954,8 +3955,8 @@ static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
- return ((cs.selector & SELECTOR_RPL_MASK) ==
- (ss.selector & SELECTOR_RPL_MASK));
+ return ((cs.selector & SEGMENT_RPL_MASK) ==
+ (ss.selector & SEGMENT_RPL_MASK));
}
/*
@@ -4711,7 +4712,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
kvm_set_cr8(&vmx->vcpu, 0);
apic_base_msr.data = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE;
- if (kvm_vcpu_is_bsp(&vmx->vcpu))
+ if (kvm_vcpu_is_reset_bsp(&vmx->vcpu))
apic_base_msr.data |= MSR_IA32_APICBASE_BSP;
apic_base_msr.host_initiated = true;
kvm_set_apic_base(&vmx->vcpu, &apic_base_msr);
@@ -5006,7 +5007,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
if (emulate_instruction(vcpu, 0) == EMULATE_DONE) {
if (vcpu->arch.halt_request) {
vcpu->arch.halt_request = 0;
- return kvm_emulate_halt(vcpu);
+ return kvm_vcpu_halt(vcpu);
}
return 1;
}
@@ -5071,6 +5072,10 @@ static int handle_exception(struct kvm_vcpu *vcpu)
}
if (is_invalid_opcode(intr_info)) {
+ if (is_guest_mode(vcpu)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+ }
er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
if (er != EMULATE_DONE)
kvm_queue_exception(vcpu, UD_VECTOR);
@@ -5090,9 +5095,10 @@ static int handle_exception(struct kvm_vcpu *vcpu)
!(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX;
- vcpu->run->internal.ndata = 2;
+ vcpu->run->internal.ndata = 3;
vcpu->run->internal.data[0] = vect_info;
vcpu->run->internal.data[1] = intr_info;
+ vcpu->run->internal.data[2] = error_code;
return 0;
}
@@ -5533,13 +5539,11 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu)
static int handle_halt(struct kvm_vcpu *vcpu)
{
- skip_emulated_instruction(vcpu);
return kvm_emulate_halt(vcpu);
}
static int handle_vmcall(struct kvm_vcpu *vcpu)
{
- skip_emulated_instruction(vcpu);
kvm_emulate_hypercall(vcpu);
return 1;
}
@@ -5570,7 +5574,6 @@ static int handle_rdpmc(struct kvm_vcpu *vcpu)
static int handle_wbinvd(struct kvm_vcpu *vcpu)
{
- skip_emulated_instruction(vcpu);
kvm_emulate_wbinvd(vcpu);
return 1;
}
@@ -5828,7 +5831,7 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
gpa_t gpa;
gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
- if (!kvm_io_bus_write(vcpu->kvm, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
+ if (!kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
skip_emulated_instruction(vcpu);
return 1;
}
@@ -5909,7 +5912,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
if (vcpu->arch.halt_request) {
vcpu->arch.halt_request = 0;
- ret = kvm_emulate_halt(vcpu);
+ ret = kvm_vcpu_halt(vcpu);
goto out;
}
@@ -7318,21 +7321,21 @@ static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
else if (port < 0x10000)
bitmap = vmcs12->io_bitmap_b;
else
- return 1;
+ return true;
bitmap += (port & 0x7fff) / 8;
if (last_bitmap != bitmap)
if (kvm_read_guest(vcpu->kvm, bitmap, &b, 1))
- return 1;
+ return true;
if (b & (1 << (port & 7)))
- return 1;
+ return true;
port++;
size--;
last_bitmap = bitmap;
}
- return 0;
+ return false;
}
/*
@@ -7348,7 +7351,7 @@ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
gpa_t bitmap;
if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
- return 1;
+ return true;
/*
* The MSR_BITMAP page is divided into four 1024-byte bitmaps,
@@ -7367,10 +7370,10 @@ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
if (msr_index < 1024*8) {
unsigned char b;
if (kvm_read_guest(vcpu->kvm, bitmap + msr_index/8, &b, 1))
- return 1;
+ return true;
return 1 & (b >> (msr_index & 7));
} else
- return 1; /* let L1 handle the wrong parameter */
+ return true; /* let L1 handle the wrong parameter */
}
/*
@@ -7392,7 +7395,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
case 0:
if (vmcs12->cr0_guest_host_mask &
(val ^ vmcs12->cr0_read_shadow))
- return 1;
+ return true;
break;
case 3:
if ((vmcs12->cr3_target_count >= 1 &&
@@ -7403,37 +7406,37 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
vmcs12->cr3_target_value2 == val) ||
(vmcs12->cr3_target_count >= 4 &&
vmcs12->cr3_target_value3 == val))
- return 0;
+ return false;
if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING))
- return 1;
+ return true;
break;
case 4:
if (vmcs12->cr4_guest_host_mask &
(vmcs12->cr4_read_shadow ^ val))
- return 1;
+ return true;
break;
case 8:
if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING))
- return 1;
+ return true;
break;
}
break;
case 2: /* clts */
if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) &&
(vmcs12->cr0_read_shadow & X86_CR0_TS))
- return 1;
+ return true;
break;
case 1: /* mov from cr */
switch (cr) {
case 3:
if (vmcs12->cpu_based_vm_exec_control &
CPU_BASED_CR3_STORE_EXITING)
- return 1;
+ return true;
break;
case 8:
if (vmcs12->cpu_based_vm_exec_control &
CPU_BASED_CR8_STORE_EXITING)
- return 1;
+ return true;
break;
}
break;
@@ -7444,14 +7447,14 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
*/
if (vmcs12->cr0_guest_host_mask & 0xe &
(val ^ vmcs12->cr0_read_shadow))
- return 1;
+ return true;
if ((vmcs12->cr0_guest_host_mask & 0x1) &&
!(vmcs12->cr0_read_shadow & 0x1) &&
(val & 0x1))
- return 1;
+ return true;
break;
}
- return 0;
+ return false;
}
/*
@@ -7474,48 +7477,48 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
KVM_ISA_VMX);
if (vmx->nested.nested_run_pending)
- return 0;
+ return false;
if (unlikely(vmx->fail)) {
pr_info_ratelimited("%s failed vm entry %x\n", __func__,
vmcs_read32(VM_INSTRUCTION_ERROR));
- return 1;
+ return true;
}
switch (exit_reason) {
case EXIT_REASON_EXCEPTION_NMI:
if (!is_exception(intr_info))
- return 0;
+ return false;
else if (is_page_fault(intr_info))
return enable_ept;
else if (is_no_device(intr_info) &&
!(vmcs12->guest_cr0 & X86_CR0_TS))
- return 0;
+ return false;
return vmcs12->exception_bitmap &
(1u << (intr_info & INTR_INFO_VECTOR_MASK));
case EXIT_REASON_EXTERNAL_INTERRUPT:
- return 0;
+ return false;
case EXIT_REASON_TRIPLE_FAULT:
- return 1;
+ return true;
case EXIT_REASON_PENDING_INTERRUPT:
return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING);
case EXIT_REASON_NMI_WINDOW:
return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING);
case EXIT_REASON_TASK_SWITCH:
- return 1;
+ return true;
case EXIT_REASON_CPUID:
if (kvm_register_read(vcpu, VCPU_REGS_RAX) == 0xa)
- return 0;
- return 1;
+ return false;
+ return true;
case EXIT_REASON_HLT:
return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
case EXIT_REASON_INVD:
- return 1;
+ return true;
case EXIT_REASON_INVLPG:
return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
case EXIT_REASON_RDPMC:
return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
- case EXIT_REASON_RDTSC:
+ case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP:
return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD:
@@ -7527,7 +7530,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
* VMX instructions trap unconditionally. This allows L1 to
* emulate them for its L2 guest, i.e., allows 3-level nesting!
*/
- return 1;
+ return true;
case EXIT_REASON_CR_ACCESS:
return nested_vmx_exit_handled_cr(vcpu, vmcs12);
case EXIT_REASON_DR_ACCESS:
@@ -7538,7 +7541,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
case EXIT_REASON_MSR_WRITE:
return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
case EXIT_REASON_INVALID_STATE:
- return 1;
+ return true;
case EXIT_REASON_MWAIT_INSTRUCTION:
return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
case EXIT_REASON_MONITOR_INSTRUCTION:
@@ -7548,7 +7551,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
nested_cpu_has2(vmcs12,
SECONDARY_EXEC_PAUSE_LOOP_EXITING);
case EXIT_REASON_MCE_DURING_VMENTRY:
- return 0;
+ return false;
case EXIT_REASON_TPR_BELOW_THRESHOLD:
return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW);
case EXIT_REASON_APIC_ACCESS:
@@ -7557,7 +7560,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
case EXIT_REASON_APIC_WRITE:
case EXIT_REASON_EOI_INDUCED:
/* apic_write and eoi_induced should exit unconditionally. */
- return 1;
+ return true;
case EXIT_REASON_EPT_VIOLATION:
/*
* L0 always deals with the EPT violation. If nested EPT is
@@ -7565,7 +7568,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
* missing in the guest EPT table (EPT12), the EPT violation
* will be injected with nested_ept_inject_page_fault()
*/
- return 0;
+ return false;
case EXIT_REASON_EPT_MISCONFIG:
/*
* L2 never uses directly L1's EPT, but rather L0's own EPT
@@ -7573,11 +7576,11 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
* (EPT on EPT). So any problems with the structure of the
* table is L0's fault.
*/
- return 0;
+ return false;
case EXIT_REASON_WBINVD:
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
case EXIT_REASON_XSETBV:
- return 1;
+ return true;
case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS:
/*
* This should never happen, since it is not possible to
@@ -7587,7 +7590,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
*/
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
default:
- return 1;
+ return true;
}
}
@@ -8522,6 +8525,9 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
exec_control);
}
}
+ if (nested && !vmx->rdtscp_enabled)
+ vmx->nested.nested_vmx_secondary_ctls_high &=
+ ~SECONDARY_EXEC_RDTSCP;
}
/* Exposing INVPCID only when PCID is exposed */
@@ -8622,10 +8628,11 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ int maxphyaddr = cpuid_maxphyaddr(vcpu);
if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
- /* TODO: Also verify bits beyond physical address width are 0 */
- if (!PAGE_ALIGNED(vmcs12->apic_access_addr))
+ if (!PAGE_ALIGNED(vmcs12->apic_access_addr) ||
+ vmcs12->apic_access_addr >> maxphyaddr)
return false;
/*
@@ -8641,8 +8648,8 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu,
}
if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
- /* TODO: Also verify bits beyond physical address width are 0 */
- if (!PAGE_ALIGNED(vmcs12->virtual_apic_page_addr))
+ if (!PAGE_ALIGNED(vmcs12->virtual_apic_page_addr) ||
+ vmcs12->virtual_apic_page_addr >> maxphyaddr)
return false;
if (vmx->nested.virtual_apic_page) /* shouldn't happen */
@@ -8665,7 +8672,8 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu,
}
if (nested_cpu_has_posted_intr(vmcs12)) {
- if (!IS_ALIGNED(vmcs12->posted_intr_desc_addr, 64))
+ if (!IS_ALIGNED(vmcs12->posted_intr_desc_addr, 64) ||
+ vmcs12->posted_intr_desc_addr >> maxphyaddr)
return false;
if (vmx->nested.pi_desc_page) { /* shouldn't happen */
@@ -8864,9 +8872,9 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
unsigned long count_field,
- unsigned long addr_field,
- int maxphyaddr)
+ unsigned long addr_field)
{
+ int maxphyaddr;
u64 count, addr;
if (vmcs12_read_any(vcpu, count_field, &count) ||
@@ -8876,6 +8884,7 @@ static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
}
if (count == 0)
return 0;
+ maxphyaddr = cpuid_maxphyaddr(vcpu);
if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr ||
(addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr) {
pr_warn_ratelimited(
@@ -8889,19 +8898,16 @@ static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
- int maxphyaddr;
-
if (vmcs12->vm_exit_msr_load_count == 0 &&
vmcs12->vm_exit_msr_store_count == 0 &&
vmcs12->vm_entry_msr_load_count == 0)
return 0; /* Fast path */
- maxphyaddr = cpuid_maxphyaddr(vcpu);
if (nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_LOAD_COUNT,
- VM_EXIT_MSR_LOAD_ADDR, maxphyaddr) ||
+ VM_EXIT_MSR_LOAD_ADDR) ||
nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_STORE_COUNT,
- VM_EXIT_MSR_STORE_ADDR, maxphyaddr) ||
+ VM_EXIT_MSR_STORE_ADDR) ||
nested_vmx_check_msr_switch(vcpu, VM_ENTRY_MSR_LOAD_COUNT,
- VM_ENTRY_MSR_LOAD_ADDR, maxphyaddr))
+ VM_ENTRY_MSR_LOAD_ADDR))
return -EINVAL;
return 0;
}
@@ -9151,8 +9157,9 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
exec_control &= ~SECONDARY_EXEC_RDTSCP;
/* Take the following fields only from vmcs12 */
exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+ SECONDARY_EXEC_RDTSCP |
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
- SECONDARY_EXEC_APIC_REGISTER_VIRT);
+ SECONDARY_EXEC_APIC_REGISTER_VIRT);
if (nested_cpu_has(vmcs12,
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
exec_control |= vmcs12->secondary_vm_exec_control;
@@ -9385,7 +9392,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
}
if (!nested_get_vmcs12_pages(vcpu, vmcs12)) {
- /*TODO: Also verify bits beyond physical address width are 0*/
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
return 1;
}
@@ -9524,7 +9530,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
vmcs12->launch_state = 1;
if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT)
- return kvm_emulate_halt(vcpu);
+ return kvm_vcpu_halt(vcpu);
vmx->nested.nested_run_pending = 1;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 32bf19e..2b2dd03 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -801,6 +801,17 @@ unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_get_cr8);
+static void kvm_update_dr0123(struct kvm_vcpu *vcpu)
+{
+ int i;
+
+ if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
+ for (i = 0; i < KVM_NR_DB_REGS; i++)
+ vcpu->arch.eff_db[i] = vcpu->arch.db[i];
+ vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD;
+ }
+}
+
static void kvm_update_dr6(struct kvm_vcpu *vcpu)
{
if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
@@ -3149,6 +3160,7 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
return -EINVAL;
memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
+ kvm_update_dr0123(vcpu);
vcpu->arch.dr6 = dbgregs->dr6;
kvm_update_dr6(vcpu);
vcpu->arch.dr7 = dbgregs->dr7;
@@ -4114,8 +4126,8 @@ static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
do {
n = min(len, 8);
if (!(vcpu->arch.apic &&
- !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, n, v))
- && kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, n, v))
+ !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v))
+ && kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v))
break;
handled += n;
addr += n;
@@ -4134,8 +4146,9 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
do {
n = min(len, 8);
if (!(vcpu->arch.apic &&
- !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, n, v))
- && kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, n, v))
+ !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev,
+ addr, n, v))
+ && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v))
break;
trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v);
handled += n;
@@ -4475,7 +4488,8 @@ mmio:
return X86EMUL_CONTINUE;
}
-int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr,
+static int emulator_read_write(struct x86_emulate_ctxt *ctxt,
+ unsigned long addr,
void *val, unsigned int bytes,
struct x86_exception *exception,
const struct read_write_emulator_ops *ops)
@@ -4538,7 +4552,7 @@ static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
exception, &read_emultor);
}
-int emulator_write_emulated(struct x86_emulate_ctxt *ctxt,
+static int emulator_write_emulated(struct x86_emulate_ctxt *ctxt,
unsigned long addr,
const void *val,
unsigned int bytes,
@@ -4629,10 +4643,10 @@ static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
int r;
if (vcpu->arch.pio.in)
- r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port,
+ r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port,
vcpu->arch.pio.size, pd);
else
- r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
+ r = kvm_io_bus_write(vcpu, KVM_PIO_BUS,
vcpu->arch.pio.port, vcpu->arch.pio.size,
pd);
return r;
@@ -4705,7 +4719,7 @@ static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address)
kvm_mmu_invlpg(emul_to_vcpu(ctxt), address);
}
-int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
+int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
{
if (!need_emulate_wbinvd(vcpu))
return X86EMUL_CONTINUE;
@@ -4722,19 +4736,29 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
wbinvd();
return X86EMUL_CONTINUE;
}
+
+int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
+{
+ kvm_x86_ops->skip_emulated_instruction(vcpu);
+ return kvm_emulate_wbinvd_noskip(vcpu);
+}
EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
+
+
static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
{
- kvm_emulate_wbinvd(emul_to_vcpu(ctxt));
+ kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt));
}
-int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
+static int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
+ unsigned long *dest)
{
return kvm_get_dr(emul_to_vcpu(ctxt), dr, dest);
}
-int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
+static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
+ unsigned long value)
{
return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value);
@@ -5816,7 +5840,7 @@ void kvm_arch_exit(void)
free_percpu(shared_msrs);
}
-int kvm_emulate_halt(struct kvm_vcpu *vcpu)
+int kvm_vcpu_halt(struct kvm_vcpu *vcpu)
{
++vcpu->stat.halt_exits;
if (irqchip_in_kernel(vcpu->kvm)) {
@@ -5827,6 +5851,13 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
return 0;
}
}
+EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
+
+int kvm_emulate_halt(struct kvm_vcpu *vcpu)
+{
+ kvm_x86_ops->skip_emulated_instruction(vcpu);
+ return kvm_vcpu_halt(vcpu);
+}
EXPORT_SYMBOL_GPL(kvm_emulate_halt);
int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
@@ -5903,7 +5934,7 @@ static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid)
lapic_irq.dest_id = apicid;
lapic_irq.delivery_mode = APIC_DM_REMRD;
- kvm_irq_delivery_to_apic(kvm, 0, &lapic_irq, NULL);
+ kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL);
}
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
@@ -5911,6 +5942,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
unsigned long nr, a0, a1, a2, a3, ret;
int op_64_bit, r = 1;
+ kvm_x86_ops->skip_emulated_instruction(vcpu);
+
if (kvm_hv_hypercall_enabled(vcpu->kvm))
return kvm_hv_hypercall(vcpu);
@@ -6164,7 +6197,7 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
}
/*
- * Returns 1 to let __vcpu_run() continue the guest execution loop without
+ * Returns 1 to let vcpu_run() continue the guest execution loop without
* exiting to the userspace. Otherwise, the value will be returned to the
* userspace.
*/
@@ -6301,6 +6334,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
set_debugreg(vcpu->arch.eff_db[2], 2);
set_debugreg(vcpu->arch.eff_db[3], 3);
set_debugreg(vcpu->arch.dr6, 6);
+ vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
}
trace_kvm_entry(vcpu->vcpu_id);
@@ -6382,42 +6416,47 @@ out:
return r;
}
+static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
+{
+ if (!kvm_arch_vcpu_runnable(vcpu)) {
+ srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
+ kvm_vcpu_block(vcpu);
+ vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
+ if (!kvm_check_request(KVM_REQ_UNHALT, vcpu))
+ return 1;
+ }
+
+ kvm_apic_accept_events(vcpu);
+ switch(vcpu->arch.mp_state) {
+ case KVM_MP_STATE_HALTED:
+ vcpu->arch.pv.pv_unhalted = false;
+ vcpu->arch.mp_state =
+ KVM_MP_STATE_RUNNABLE;
+ case KVM_MP_STATE_RUNNABLE:
+ vcpu->arch.apf.halted = false;
+ break;
+ case KVM_MP_STATE_INIT_RECEIVED:
+ break;
+ default:
+ return -EINTR;
+ break;
+ }
+ return 1;
+}
-static int __vcpu_run(struct kvm_vcpu *vcpu)
+static int vcpu_run(struct kvm_vcpu *vcpu)
{
int r;
struct kvm *kvm = vcpu->kvm;
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
- r = 1;
- while (r > 0) {
+ for (;;) {
if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
!vcpu->arch.apf.halted)
r = vcpu_enter_guest(vcpu);
- else {
- srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
- kvm_vcpu_block(vcpu);
- vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
- if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
- kvm_apic_accept_events(vcpu);
- switch(vcpu->arch.mp_state) {
- case KVM_MP_STATE_HALTED:
- vcpu->arch.pv.pv_unhalted = false;
- vcpu->arch.mp_state =
- KVM_MP_STATE_RUNNABLE;
- case KVM_MP_STATE_RUNNABLE:
- vcpu->arch.apf.halted = false;
- break;
- case KVM_MP_STATE_INIT_RECEIVED:
- break;
- default:
- r = -EINTR;
- break;
- }
- }
- }
-
+ else
+ r = vcpu_block(kvm, vcpu);
if (r <= 0)
break;
@@ -6429,6 +6468,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
r = -EINTR;
vcpu->run->exit_reason = KVM_EXIT_INTR;
++vcpu->stat.request_irq_exits;
+ break;
}
kvm_check_async_pf_completion(vcpu);
@@ -6437,6 +6477,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
r = -EINTR;
vcpu->run->exit_reason = KVM_EXIT_INTR;
++vcpu->stat.signal_exits;
+ break;
}
if (need_resched()) {
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
@@ -6568,7 +6609,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
} else
WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed);
- r = __vcpu_run(vcpu);
+ r = vcpu_run(vcpu);
out:
post_kvm_run_save(vcpu);
@@ -7075,11 +7116,14 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu)
kvm_clear_exception_queue(vcpu);
memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
+ kvm_update_dr0123(vcpu);
vcpu->arch.dr6 = DR6_INIT;
kvm_update_dr6(vcpu);
vcpu->arch.dr7 = DR7_FIXED_1;
kvm_update_dr7(vcpu);
+ vcpu->arch.cr2 = 0;
+
kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu->arch.apf.msr_val = 0;
vcpu->arch.st.msr_val = 0;
@@ -7240,7 +7284,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
vcpu->arch.pv.pv_unhalted = false;
vcpu->arch.emulate_ctxt.ops = &emulate_ops;
- if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
+ if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_reset_bsp(vcpu))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
else
vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
@@ -7288,6 +7332,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
vcpu->arch.guest_supported_xcr0 = 0;
vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
+ vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
+
kvm_async_pf_hash_reset(vcpu);
kvm_pmu_init(vcpu);
@@ -7428,7 +7474,7 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) {
- kvm_kvfree(free->arch.rmap[i]);
+ kvfree(free->arch.rmap[i]);
free->arch.rmap[i] = NULL;
}
if (i == 0)
@@ -7436,7 +7482,7 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
if (!dont || free->arch.lpage_info[i - 1] !=
dont->arch.lpage_info[i - 1]) {
- kvm_kvfree(free->arch.lpage_info[i - 1]);
+ kvfree(free->arch.lpage_info[i - 1]);
free->arch.lpage_info[i - 1] = NULL;
}
}
@@ -7490,12 +7536,12 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
out_free:
for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
- kvm_kvfree(slot->arch.rmap[i]);
+ kvfree(slot->arch.rmap[i]);
slot->arch.rmap[i] = NULL;
if (i == 0)
continue;
- kvm_kvfree(slot->arch.lpage_info[i - 1]);
+ kvfree(slot->arch.lpage_info[i - 1]);
slot->arch.lpage_info[i - 1] = NULL;
}
return -ENOMEM;
@@ -7618,6 +7664,23 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
new = id_to_memslot(kvm->memslots, mem->slot);
/*
+ * Dirty logging tracks sptes in 4k granularity, meaning that large
+ * sptes have to be split. If live migration is successful, the guest
+ * in the source machine will be destroyed and large sptes will be
+ * created in the destination. However, if the guest continues to run
+ * in the source machine (for example if live migration fails), small
+ * sptes will remain around and cause bad performance.
+ *
+ * Scan sptes if dirty logging has been stopped, dropping those
+ * which can be collapsed into a single large-page spte. Later
+ * page faults will create the large-page sptes.
+ */
+ if ((change != KVM_MR_DELETE) &&
+ (old->flags & KVM_MEM_LOG_DIRTY_PAGES) &&
+ !(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
+ kvm_mmu_zap_collapsible_sptes(kvm, new);
+
+ /*
* Set up write protection and/or dirty logging for the new slot.
*
* For KVM_MR_DELETE and KVM_MR_MOVE, the shadow pages of old slot have
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index 9793322..40d2473 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -82,18 +82,15 @@ static notrace cycle_t vread_pvclock(int *mode)
cycle_t ret;
u64 last;
u32 version;
+ u32 migrate_count;
u8 flags;
unsigned cpu, cpu1;
/*
- * Note: hypervisor must guarantee that:
- * 1. cpu ID number maps 1:1 to per-CPU pvclock time info.
- * 2. that per-CPU pvclock time info is updated if the
- * underlying CPU changes.
- * 3. that version is increased whenever underlying CPU
- * changes.
- *
+ * When looping to get a consistent (time-info, tsc) pair, we
+ * also need to deal with the possibility we can switch vcpus,
+ * so make sure we always re-fetch time-info for the current vcpu.
*/
do {
cpu = __getcpu() & VGETCPU_CPU_MASK;
@@ -102,20 +99,27 @@ static notrace cycle_t vread_pvclock(int *mode)
* __getcpu() calls (Gleb).
*/
- pvti = get_pvti(cpu);
+ /* Make sure migrate_count will change if we leave the VCPU. */
+ do {
+ pvti = get_pvti(cpu);
+ migrate_count = pvti->migrate_count;
+
+ cpu1 = cpu;
+ cpu = __getcpu() & VGETCPU_CPU_MASK;
+ } while (unlikely(cpu != cpu1));
version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags);
/*
* Test we're still on the cpu as well as the version.
- * We could have been migrated just after the first
- * vgetcpu but before fetching the version, so we
- * wouldn't notice a version change.
+ * - We must read TSC of pvti's VCPU.
+ * - KVM doesn't follow the versioning protocol, so data could
+ * change before version if we left the VCPU.
*/
- cpu1 = __getcpu() & VGETCPU_CPU_MASK;
- } while (unlikely(cpu != cpu1 ||
- (pvti->pvti.version & 1) ||
- pvti->pvti.version != version));
+ smp_rmb();
+ } while (unlikely((pvti->pvti.version & 1) ||
+ pvti->pvti.version != version ||
+ pvti->migrate_count != migrate_count));
if (unlikely(!(flags & PVCLOCK_TSC_STABLE_BIT)))
*mode = VCLOCK_NONE;