From 7f662273e476e2d7ff44f411fa9f17c946480100 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Mon, 10 Dec 2012 11:42:30 +0200 Subject: KVM: emulator: implement AAD instruction Windows2000 uses it during boot. This fixes https://bugzilla.kernel.org/show_bug.cgi?id=50921 Signed-off-by: Gleb Natapov Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index a27e763..47d62e1 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -2852,6 +2852,27 @@ static int em_das(struct x86_emulate_ctxt *ctxt) return X86EMUL_CONTINUE; } +static int em_aad(struct x86_emulate_ctxt *ctxt) +{ + u8 al = ctxt->dst.val & 0xff; + u8 ah = (ctxt->dst.val >> 8) & 0xff; + + al = (al + (ah * ctxt->src.val)) & 0xff; + + ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al; + + ctxt->eflags &= ~(X86_EFLAGS_PF | X86_EFLAGS_SF | X86_EFLAGS_ZF); + + if (!al) + ctxt->eflags |= X86_EFLAGS_ZF; + if (!(al & 1)) + ctxt->eflags |= X86_EFLAGS_PF; + if (al & 0x80) + ctxt->eflags |= X86_EFLAGS_SF; + + return X86EMUL_CONTINUE; +} + static int em_call(struct x86_emulate_ctxt *ctxt) { long rel = ctxt->src.val; @@ -3801,7 +3822,7 @@ static const struct opcode opcode_table[256] = { D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret), /* 0xD0 - 0xD7 */ D2bv(DstMem | SrcOne | ModRM), D2bv(DstMem | ModRM), - N, N, N, N, + N, I(DstAcc | SrcImmByte | No64, em_aad), N, N, /* 0xD8 - 0xDF */ N, N, N, N, N, N, N, N, /* 0xE0 - 0xE7 */ -- cgit v0.10.2 From 5e2c688351f4aee9981918661b6c1679f4155f06 Mon Sep 17 00:00:00 2001 From: Nadav Amit Date: Thu, 6 Dec 2012 21:55:10 -0200 Subject: KVM: x86: fix mov immediate emulation for 64-bit operands MOV immediate instruction (opcodes 0xB8-0xBF) may take 64-bit operand. The previous emulation implementation assumes the operand is no longer than 32. Adding OpImm64 for this matter. Fixes https://bugzilla.redhat.com/show_bug.cgi?id=881579 Signed-off-by: Nadav Amit Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 47d62e1..c7547b3 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -43,7 +43,7 @@ #define OpCL 9ull /* CL register (for shifts) */ #define OpImmByte 10ull /* 8-bit sign extended immediate */ #define OpOne 11ull /* Implied 1 */ -#define OpImm 12ull /* Sign extended immediate */ +#define OpImm 12ull /* Sign extended up to 32-bit immediate */ #define OpMem16 13ull /* Memory operand (16-bit). */ #define OpMem32 14ull /* Memory operand (32-bit). */ #define OpImmU 15ull /* Immediate operand, zero extended */ @@ -58,6 +58,7 @@ #define OpFS 24ull /* FS */ #define OpGS 25ull /* GS */ #define OpMem8 26ull /* 8-bit zero extended memory operand */ +#define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */ #define OpBits 5 /* Width of operand field */ #define OpMask ((1ull << OpBits) - 1) @@ -101,6 +102,7 @@ #define SrcMemFAddr (OpMemFAddr << SrcShift) #define SrcAcc (OpAcc << SrcShift) #define SrcImmU16 (OpImmU16 << SrcShift) +#define SrcImm64 (OpImm64 << SrcShift) #define SrcDX (OpDX << SrcShift) #define SrcMem8 (OpMem8 << SrcShift) #define SrcMask (OpMask << SrcShift) @@ -3807,7 +3809,7 @@ static const struct opcode opcode_table[256] = { /* 0xB0 - 0xB7 */ X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)), /* 0xB8 - 0xBF */ - X8(I(DstReg | SrcImm | Mov, em_mov)), + X8(I(DstReg | SrcImm64 | Mov, em_mov)), /* 0xC0 - 0xC7 */ D2bv(DstMem | SrcImmByte | ModRM), I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm), @@ -3971,6 +3973,9 @@ static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op, case 4: op->val = insn_fetch(s32, ctxt); break; + case 8: + op->val = insn_fetch(s64, ctxt); + break; } if (!sign_extension) { switch (op->bytes) { @@ -4049,6 +4054,9 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, case OpImm: rc = decode_imm(ctxt, op, imm_size(ctxt), true); break; + case OpImm64: + rc = decode_imm(ctxt, op, ctxt->op_bytes, true); + break; case OpMem8: ctxt->memop.bytes = 1; goto mem_common; -- cgit v0.10.2 From f3200d00ea42e485772ff92d6d649aa8eeb640c0 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Mon, 10 Dec 2012 14:05:55 +0200 Subject: KVM: inject ExtINT interrupt before APIC interrupts According to Intel SDM Volume 3 Section 10.8.1 "Interrupt Handling with the Pentium 4 and Intel Xeon Processors" and Section 10.8.2 "Interrupt Handling with the P6 Family and Pentium Processors" ExtINT interrupts are sent directly to the processor core for handling. Currently KVM checks APIC before it considers ExtINT interrupts for injection which is backwards from the spec. Make code behave according to the SDM. Signed-off-by: Gleb Natapov Acked-by: "Zhang, Yang Z" Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index 848206d..cc31f7c 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c @@ -241,6 +241,8 @@ int kvm_pic_read_irq(struct kvm *kvm) int irq, irq2, intno; struct kvm_pic *s = pic_irqchip(kvm); + s->output = 0; + pic_lock(s); irq = pic_get_irq(&s->pics[0]); if (irq >= 0) { diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c index 7e06ba1..ebd98d0 100644 --- a/arch/x86/kvm/irq.c +++ b/arch/x86/kvm/irq.c @@ -48,14 +48,10 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *v) if (!irqchip_in_kernel(v->kvm)) return v->arch.interrupt.pending; - if (kvm_apic_has_interrupt(v) == -1) { /* LAPIC */ - if (kvm_apic_accept_pic_intr(v)) { - s = pic_irqchip(v->kvm); /* PIC */ - return s->output; - } else - return 0; - } - return 1; + if (kvm_apic_accept_pic_intr(v) && pic_irqchip(v->kvm)->output) + return pic_irqchip(v->kvm)->output; /* PIC */ + + return kvm_apic_has_interrupt(v) != -1; /* LAPIC */ } EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt); @@ -65,20 +61,14 @@ EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt); int kvm_cpu_get_interrupt(struct kvm_vcpu *v) { struct kvm_pic *s; - int vector; if (!irqchip_in_kernel(v->kvm)) return v->arch.interrupt.nr; - vector = kvm_get_apic_interrupt(v); /* APIC */ - if (vector == -1) { - if (kvm_apic_accept_pic_intr(v)) { - s = pic_irqchip(v->kvm); - s->output = 0; /* PIC */ - vector = kvm_pic_read_irq(v->kvm); - } - } - return vector; + if (kvm_apic_accept_pic_intr(v) && pic_irqchip(v->kvm)->output) + return kvm_pic_read_irq(v->kvm); /* PIC */ + + return kvm_get_apic_interrupt(v); /* APIC */ } EXPORT_SYMBOL_GPL(kvm_cpu_get_interrupt); -- cgit v0.10.2 From f0736cf0550b349a5d5a374d65ca0488cc2eee40 Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Mon, 10 Dec 2012 10:32:45 -0700 Subject: KVM: Restrict non-existing slot state transitions The API documentation states: When changing an existing slot, it may be moved in the guest physical memory space, or its flags may be modified. An "existing slot" requires a non-zero npages (memory_size). The only transition we should therefore allow for a non-existing slot should be to create the slot, which includes setting a non-zero memory_size. We currently allow calls to modify non-existing slots, which is pointless, confusing, and possibly wrong. With this we know that the invalidation path of __kvm_set_memory_region is always for a delete or move and never for adding a zero size slot. Reviewed-by: Gleb Natapov Signed-off-by: Alex Williamson Signed-off-by: Marcelo Tosatti diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 1cd693a..3caf816 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -758,10 +758,15 @@ int __kvm_set_memory_region(struct kvm *kvm, new.npages = npages; new.flags = mem->flags; - /* Disallow changing a memory slot's size. */ + /* + * Disallow changing a memory slot's size or changing anything about + * zero sized slots that doesn't involve making them non-zero. + */ r = -EINVAL; if (npages && old.npages && npages != old.npages) goto out_free; + if (!npages && !old.npages) + goto out_free; /* Check for overlaps */ r = -EEXIST; @@ -780,7 +785,7 @@ int __kvm_set_memory_region(struct kvm *kvm, r = -ENOMEM; /* Allocate if a slot is being created */ - if (npages && !old.npages) { + if (!old.npages) { new.user_alloc = user_alloc; new.userspace_addr = mem->userspace_addr; -- cgit v0.10.2 From 9c695d42dbd465bcaa865603b411a73c60e60978 Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Mon, 10 Dec 2012 10:32:51 -0700 Subject: KVM: Check userspace_addr when modifying a memory slot The API documents that only flags and guest physical memory space can be modified on an existing slot, but we don't enforce that the userspace address cannot be modified. Instead we just ignore it. This means that a user may think they've successfully moved both the guest and user addresses, when in fact only the guest address changed. Check and error instead. Reviewed-by: Gleb Natapov Signed-off-by: Alex Williamson Signed-off-by: Marcelo Tosatti diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 3caf816..e4d3581 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -784,13 +784,19 @@ int __kvm_set_memory_region(struct kvm *kvm, r = -ENOMEM; - /* Allocate if a slot is being created */ + /* + * Allocate if a slot is being created. If modifying a slot, + * the userspace_addr cannot change. + */ if (!old.npages) { new.user_alloc = user_alloc; new.userspace_addr = mem->userspace_addr; if (kvm_arch_create_memslot(&new, npages)) goto out_free; + } else if (npages && mem->userspace_addr != old.userspace_addr) { + r = -EINVAL; + goto out_free; } /* Allocate page dirty bitmap if needed */ -- cgit v0.10.2 From e40f193f5bb022e927a57a4f5d5194e4f12ddb74 Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Mon, 10 Dec 2012 10:32:57 -0700 Subject: KVM: Fix iommu map/unmap to handle memory slot moves The iommu integration into memory slots expects memory slots to be added or removed and doesn't handle the move case. We can unmap slots from the iommu after we mark them invalid and map them before installing the final memslot array. Also re-order the kmemdup vs map so we don't leave iommu mappings if we get ENOMEM. Reviewed-by: Gleb Natapov Signed-off-by: Alex Williamson Signed-off-by: Marcelo Tosatti diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index e4d3581..9a56ca2 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -822,6 +822,8 @@ int __kvm_set_memory_region(struct kvm *kvm, old_memslots = kvm->memslots; rcu_assign_pointer(kvm->memslots, slots); synchronize_srcu_expedited(&kvm->srcu); + /* slot was deleted or moved, clear iommu mapping */ + kvm_iommu_unmap_pages(kvm, &old); /* From this point no new shadow pages pointing to a deleted, * or moved, memslot will be created. * @@ -837,20 +839,19 @@ int __kvm_set_memory_region(struct kvm *kvm, if (r) goto out_free; - /* map/unmap the pages in iommu page table */ - if (npages) { - r = kvm_iommu_map_pages(kvm, &new); - if (r) - goto out_free; - } else - kvm_iommu_unmap_pages(kvm, &old); - r = -ENOMEM; slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots), GFP_KERNEL); if (!slots) goto out_free; + /* map new memory slot into the iommu */ + if (npages) { + r = kvm_iommu_map_pages(kvm, &new); + if (r) + goto out_slots; + } + /* actual memory is freed via old in kvm_free_physmem_slot below */ if (!npages) { new.dirty_bitmap = NULL; @@ -869,6 +870,8 @@ int __kvm_set_memory_region(struct kvm *kvm, return 0; +out_slots: + kfree(slots); out_free: kvm_free_physmem_slot(&new, &old); out: -- cgit v0.10.2 From b7f69c555ca430129b6cde81e9f0927531420c5c Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Mon, 10 Dec 2012 10:33:03 -0700 Subject: KVM: Minor memory slot optimization If a slot is removed or moved in the guest physical address space, we first allocate and install a new slot array with the invalidated entry. The old array is then freed. We then proceed to allocate yet another slot array to install the permanent replacement. Re-use the original array when this occurs and avoid the extra kfree/kmalloc. Reviewed-by: Gleb Natapov Signed-off-by: Alex Williamson Signed-off-by: Marcelo Tosatti diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 9a56ca2..4c8c065 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -716,7 +716,7 @@ int __kvm_set_memory_region(struct kvm *kvm, unsigned long npages; struct kvm_memory_slot *memslot, *slot; struct kvm_memory_slot old, new; - struct kvm_memslots *slots, *old_memslots; + struct kvm_memslots *slots = NULL, *old_memslots; r = check_memory_region_flags(mem); if (r) @@ -832,18 +832,25 @@ int __kvm_set_memory_region(struct kvm *kvm, * - kvm_is_visible_gfn (mmu_check_roots) */ kvm_arch_flush_shadow_memslot(kvm, slot); - kfree(old_memslots); + slots = old_memslots; } r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc); if (r) - goto out_free; + goto out_slots; r = -ENOMEM; - slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots), - GFP_KERNEL); - if (!slots) - goto out_free; + /* + * We can re-use the old_memslots from above, the only difference + * from the currently installed memslots is the invalid flag. This + * will get overwritten by update_memslots anyway. + */ + if (!slots) { + slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots), + GFP_KERNEL); + if (!slots) + goto out_free; + } /* map new memory slot into the iommu */ if (npages) { -- cgit v0.10.2 From bbacc0c111c3c5d1f3192b8cc1642b9c3954f80d Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Mon, 10 Dec 2012 10:33:09 -0700 Subject: KVM: Rename KVM_MEMORY_SLOTS -> KVM_USER_MEM_SLOTS It's easy to confuse KVM_MEMORY_SLOTS and KVM_MEM_SLOTS_NUM. One is the user accessible slots and the other is user + private. Make this more obvious. Reviewed-by: Gleb Natapov Signed-off-by: Alex Williamson Signed-off-by: Marcelo Tosatti diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h index 6d6a5ac..48d7b0e 100644 --- a/arch/ia64/include/asm/kvm_host.h +++ b/arch/ia64/include/asm/kvm_host.h @@ -23,7 +23,7 @@ #ifndef __ASM_KVM_HOST_H #define __ASM_KVM_HOST_H -#define KVM_MEMORY_SLOTS 32 +#define KVM_USER_MEM_SLOTS 32 /* memory slots that does not exposed to userspace */ #define KVM_PRIVATE_MEM_SLOTS 4 diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index bd1c515..9bacfe2 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -1834,7 +1834,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, mutex_lock(&kvm->slots_lock); r = -EINVAL; - if (log->slot >= KVM_MEMORY_SLOTS) + if (log->slot >= KVM_USER_MEM_SLOTS) goto out; memslot = id_to_memslot(kvm->memslots, log->slot); diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index ca9bf45..ac19ad6 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -37,10 +37,10 @@ #define KVM_MAX_VCPUS NR_CPUS #define KVM_MAX_VCORES NR_CPUS -#define KVM_MEMORY_SLOTS 32 +#define KVM_USER_MEM_SLOTS 32 /* memory slots that does not exposed to userspace */ #define KVM_PRIVATE_MEM_SLOTS 4 -#define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS) +#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) #ifdef CONFIG_KVM_MMIO #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 71d0c90..80dcc53 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -1549,7 +1549,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) mutex_lock(&kvm->slots_lock); r = -EINVAL; - if (log->slot >= KVM_MEMORY_SLOTS) + if (log->slot >= KVM_USER_MEM_SLOTS) goto out; memslot = id_to_memslot(kvm->memslots, log->slot); diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index b784154..ac33432 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -20,7 +20,7 @@ #include #define KVM_MAX_VCPUS 64 -#define KVM_MEMORY_SLOTS 32 +#define KVM_USER_MEM_SLOTS 32 /* memory slots that does not exposed to userspace */ #define KVM_PRIVATE_MEM_SLOTS 4 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index dc87b65..c7df6ff 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -33,10 +33,10 @@ #define KVM_MAX_VCPUS 254 #define KVM_SOFT_MAX_VCPUS 160 -#define KVM_MEMORY_SLOTS 32 +#define KVM_USER_MEM_SLOTS 32 /* memory slots that does not exposed to userspace */ #define KVM_PRIVATE_MEM_SLOTS 4 -#define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS) +#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) #define KVM_MMIO_SIZE 16 diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index c2d56b3..e385df9 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -427,9 +427,9 @@ enum vmcs_field { #define AR_RESERVD_MASK 0xfffe0f00 -#define TSS_PRIVATE_MEMSLOT (KVM_MEMORY_SLOTS + 0) -#define APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (KVM_MEMORY_SLOTS + 1) -#define IDENTITY_PAGETABLE_PRIVATE_MEMSLOT (KVM_MEMORY_SLOTS + 2) +#define TSS_PRIVATE_MEMSLOT (KVM_USER_MEM_SLOTS + 0) +#define APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (KVM_USER_MEM_SLOTS + 1) +#define IDENTITY_PAGETABLE_PRIVATE_MEMSLOT (KVM_USER_MEM_SLOTS + 2) #define VMX_NR_VPIDS (1 << 16) #define VMX_VPID_EXTENT_SINGLE_CONTEXT 1 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 76f5446..8160747 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2518,7 +2518,7 @@ int kvm_dev_ioctl_check_extension(long ext) r = KVM_MAX_VCPUS; break; case KVM_CAP_NR_MEMSLOTS: - r = KVM_MEMORY_SLOTS; + r = KVM_USER_MEM_SLOTS; break; case KVM_CAP_PV_MMU: /* obsolete */ r = 0; @@ -3435,7 +3435,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) mutex_lock(&kvm->slots_lock); r = -EINVAL; - if (log->slot >= KVM_MEMORY_SLOTS) + if (log->slot >= KVM_USER_MEM_SLOTS) goto out; memslot = id_to_memslot(kvm->memslots, log->slot); @@ -6845,7 +6845,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, int map_flags = MAP_PRIVATE | MAP_ANONYMOUS; /* Prevent internal slot pages from being moved by fork()/COW. */ - if (memslot->id >= KVM_MEMORY_SLOTS) + if (memslot->id >= KVM_USER_MEM_SLOTS) map_flags = MAP_SHARED | MAP_ANONYMOUS; /*To keep backward compatibility with older userspace, diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 2c497ab..abad7f3 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -314,7 +314,7 @@ struct kvm_irq_routing_table {}; #endif #ifndef KVM_MEM_SLOTS_NUM -#define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS) +#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) #endif /* diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 4c8c065..5f0638c 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -771,7 +771,7 @@ int __kvm_set_memory_region(struct kvm *kvm, /* Check for overlaps */ r = -EEXIST; kvm_for_each_memslot(slot, kvm->memslots) { - if (slot->id >= KVM_MEMORY_SLOTS || slot == memslot) + if (slot->id >= KVM_USER_MEM_SLOTS || slot == memslot) continue; if (!((base_gfn + npages <= slot->base_gfn) || (base_gfn >= slot->base_gfn + slot->npages))) @@ -905,7 +905,7 @@ int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, kvm_userspace_memory_region *mem, int user_alloc) { - if (mem->slot >= KVM_MEMORY_SLOTS) + if (mem->slot >= KVM_USER_MEM_SLOTS) return -EINVAL; return kvm_set_memory_region(kvm, mem, user_alloc); } @@ -919,7 +919,7 @@ int kvm_get_dirty_log(struct kvm *kvm, unsigned long any = 0; r = -EINVAL; - if (log->slot >= KVM_MEMORY_SLOTS) + if (log->slot >= KVM_USER_MEM_SLOTS) goto out; memslot = id_to_memslot(kvm->memslots, log->slot); @@ -965,7 +965,7 @@ int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) { struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); - if (!memslot || memslot->id >= KVM_MEMORY_SLOTS || + if (!memslot || memslot->id >= KVM_USER_MEM_SLOTS || memslot->flags & KVM_MEMSLOT_INVALID) return 0; -- cgit v0.10.2 From 0743247fbf0c4a27185b2aa1fdda91d0745dfed1 Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Mon, 10 Dec 2012 10:33:15 -0700 Subject: KVM: Make KVM_PRIVATE_MEM_SLOTS optional Seems like everyone copied x86 and defined 4 private memory slots that never actually get used. Even x86 only uses 3 of the 4. These aren't exposed so there's no need to add padding. Reviewed-by: Gleb Natapov Signed-off-by: Alex Williamson Signed-off-by: Marcelo Tosatti diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h index 48d7b0e..cfa7498 100644 --- a/arch/ia64/include/asm/kvm_host.h +++ b/arch/ia64/include/asm/kvm_host.h @@ -24,8 +24,6 @@ #define __ASM_KVM_HOST_H #define KVM_USER_MEM_SLOTS 32 -/* memory slots that does not exposed to userspace */ -#define KVM_PRIVATE_MEM_SLOTS 4 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index ac19ad6..ab49c6c 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -38,9 +38,7 @@ #define KVM_MAX_VCPUS NR_CPUS #define KVM_MAX_VCORES NR_CPUS #define KVM_USER_MEM_SLOTS 32 -/* memory slots that does not exposed to userspace */ -#define KVM_PRIVATE_MEM_SLOTS 4 -#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) +#define KVM_MEM_SLOTS_NUM KVM_USER_MEM_SLOTS #ifdef CONFIG_KVM_MMIO #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index ac33432..711c5ab 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -21,8 +21,6 @@ #define KVM_MAX_VCPUS 64 #define KVM_USER_MEM_SLOTS 32 -/* memory slots that does not exposed to userspace */ -#define KVM_PRIVATE_MEM_SLOTS 4 struct sca_entry { atomic_t scn; diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index c7df6ff..51d5210 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -34,8 +34,8 @@ #define KVM_MAX_VCPUS 254 #define KVM_SOFT_MAX_VCPUS 160 #define KVM_USER_MEM_SLOTS 32 -/* memory slots that does not exposed to userspace */ -#define KVM_PRIVATE_MEM_SLOTS 4 +/* memory slots that are not exposed to userspace */ +#define KVM_PRIVATE_MEM_SLOTS 3 #define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) #define KVM_MMIO_SIZE 16 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index abad7f3..5a3581c 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -313,6 +313,10 @@ struct kvm_irq_routing_table {}; #endif +#ifndef KVM_PRIVATE_MEM_SLOTS +#define KVM_PRIVATE_MEM_SLOTS 0 +#endif + #ifndef KVM_MEM_SLOTS_NUM #define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) #endif -- cgit v0.10.2 From f82a8cfe9354f5cdea55ebeceba3fd19051d3ee8 Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Mon, 10 Dec 2012 10:33:21 -0700 Subject: KVM: struct kvm_memory_slot.user_alloc -> bool There's no need for this to be an int, it holds a boolean. Move to the end of the struct for alignment. Reviewed-by: Gleb Natapov Signed-off-by: Alex Williamson Signed-off-by: Marcelo Tosatti diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 9bacfe2..ad3126a 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -955,7 +955,7 @@ long kvm_arch_vm_ioctl(struct file *filp, kvm_mem.guest_phys_addr; kvm_userspace_mem.memory_size = kvm_mem.memory_size; r = kvm_vm_ioctl_set_memory_region(kvm, - &kvm_userspace_mem, 0); + &kvm_userspace_mem, false); if (r) goto out; break; @@ -1580,7 +1580,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, struct kvm_memory_slot old, struct kvm_userspace_memory_region *mem, - int user_alloc) + bool user_alloc) { unsigned long i; unsigned long pfn; @@ -1611,7 +1611,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, void kvm_arch_commit_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, struct kvm_memory_slot old, - int user_alloc) + bool user_alloc) { return; } diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 70739a0..be83fca 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -412,7 +412,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, struct kvm_memory_slot old, struct kvm_userspace_memory_region *mem, - int user_alloc) + bool user_alloc) { return kvmppc_core_prepare_memory_region(kvm, memslot, mem); } @@ -420,7 +420,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, void kvm_arch_commit_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, struct kvm_memory_slot old, - int user_alloc) + bool user_alloc) { kvmppc_core_commit_memory_region(kvm, mem, old); } diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index c9011bf..f718bc6 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -928,7 +928,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, struct kvm_memory_slot old, struct kvm_userspace_memory_region *mem, - int user_alloc) + bool user_alloc) { /* A few sanity checks. We can have exactly one memory slot which has to start at guest virtual zero and which has to be located at a @@ -958,7 +958,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, void kvm_arch_commit_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, struct kvm_memory_slot old, - int user_alloc) + bool user_alloc) { int rc; diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 9120ae1..b3101e3 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3667,7 +3667,7 @@ static int alloc_apic_access_page(struct kvm *kvm) kvm_userspace_mem.flags = 0; kvm_userspace_mem.guest_phys_addr = 0xfee00000ULL; kvm_userspace_mem.memory_size = PAGE_SIZE; - r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0); + r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, false); if (r) goto out; @@ -3697,7 +3697,7 @@ static int alloc_identity_pagetable(struct kvm *kvm) kvm_userspace_mem.guest_phys_addr = kvm->arch.ept_identity_map_addr; kvm_userspace_mem.memory_size = PAGE_SIZE; - r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0); + r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, false); if (r) goto out; @@ -4251,7 +4251,7 @@ static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) .flags = 0, }; - ret = kvm_set_memory_region(kvm, &tss_mem, 0); + ret = kvm_set_memory_region(kvm, &tss_mem, false); if (ret) return ret; kvm->arch.tss_addr = addr; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 8160747..1c9c834 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6839,7 +6839,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, struct kvm_memory_slot old, struct kvm_userspace_memory_region *mem, - int user_alloc) + bool user_alloc) { int npages = memslot->npages; int map_flags = MAP_PRIVATE | MAP_ANONYMOUS; @@ -6875,7 +6875,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, void kvm_arch_commit_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, struct kvm_memory_slot old, - int user_alloc) + bool user_alloc) { int nr_mmu_pages = 0, npages = mem->memory_size >> PAGE_SHIFT; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 5a3581c..d897f03 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -270,8 +270,8 @@ struct kvm_memory_slot { unsigned long *dirty_bitmap; struct kvm_arch_memory_slot arch; unsigned long userspace_addr; - int user_alloc; int id; + bool user_alloc; }; static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) @@ -451,10 +451,10 @@ id_to_memslot(struct kvm_memslots *slots, int id) int kvm_set_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, - int user_alloc); + bool user_alloc); int __kvm_set_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, - int user_alloc); + bool user_alloc); void kvm_arch_free_memslot(struct kvm_memory_slot *free, struct kvm_memory_slot *dont); int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages); @@ -462,11 +462,11 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, struct kvm_memory_slot old, struct kvm_userspace_memory_region *mem, - int user_alloc); + bool user_alloc); void kvm_arch_commit_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, struct kvm_memory_slot old, - int user_alloc); + bool user_alloc); bool kvm_largepages_enabled(void); void kvm_disable_largepages(void); /* flush all memory translations */ @@ -553,7 +553,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, - int user_alloc); + bool user_alloc); int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level); long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 5f0638c..42c1eb73 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -709,7 +709,7 @@ static int check_memory_region_flags(struct kvm_userspace_memory_region *mem) */ int __kvm_set_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, - int user_alloc) + bool user_alloc) { int r; gfn_t base_gfn; @@ -889,7 +889,7 @@ EXPORT_SYMBOL_GPL(__kvm_set_memory_region); int kvm_set_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, - int user_alloc) + bool user_alloc) { int r; @@ -903,7 +903,7 @@ EXPORT_SYMBOL_GPL(kvm_set_memory_region); int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, - int user_alloc) + bool user_alloc) { if (mem->slot >= KVM_USER_MEM_SLOTS) return -EINVAL; @@ -2148,7 +2148,7 @@ static long kvm_vm_ioctl(struct file *filp, sizeof kvm_userspace_mem)) goto out; - r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1); + r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, true); break; } case KVM_GET_DIRTY_LOG: { -- cgit v0.10.2 From 6104f472a5ea287fbdcf4644e74867dfd905a018 Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Mon, 10 Dec 2012 10:33:26 -0700 Subject: KVM: struct kvm_memory_slot.flags -> u32 struct kvm_userspace_memory_region.flags is a u32 with a comment that bits 0 ~ 15 are visible to userspace and the other bits are reserved for kvm internal use. KVM_MEMSLOT_INVALID is the only internal use flag and it has a comment that bits 16 ~ 31 are internally used and the other bits are visible to userspace. Therefore, let's define this as a u32 so we don't waste bytes on LP64 systems. Move to the end of the struct for alignment. Reviewed-by: Gleb Natapov Signed-off-by: Alex Williamson Signed-off-by: Marcelo Tosatti diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index d897f03..fec6075 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -266,10 +266,10 @@ static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) struct kvm_memory_slot { gfn_t base_gfn; unsigned long npages; - unsigned long flags; unsigned long *dirty_bitmap; struct kvm_arch_memory_slot arch; unsigned long userspace_addr; + u32 flags; int id; bool user_alloc; }; -- cgit v0.10.2 From 1e702d9af5d633cf0eca76f6340b3c50fbb5a4e5 Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Mon, 10 Dec 2012 10:33:32 -0700 Subject: KVM: struct kvm_memory_slot.id -> short We're currently offering a whopping 32 memory slots to user space, an int is a bit excessive for storing this. We would like to increase our memslots, but SHRT_MAX should be more than enough. Reviewed-by: Gleb Natapov Signed-off-by: Alex Williamson Signed-off-by: Marcelo Tosatti diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index fec6075..32fdc45 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -270,7 +270,7 @@ struct kvm_memory_slot { struct kvm_arch_memory_slot arch; unsigned long userspace_addr; u32 flags; - int id; + short id; bool user_alloc; }; @@ -330,7 +330,7 @@ struct kvm_memslots { u64 generation; struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM]; /* The mapping table from slot id to the index in memslots[]. */ - int id_to_index[KVM_MEM_SLOTS_NUM]; + short id_to_index[KVM_MEM_SLOTS_NUM]; }; struct kvm { diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 42c1eb73..bd31096 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -474,6 +474,8 @@ static struct kvm *kvm_create_vm(unsigned long type) INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); #endif + BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); + r = -ENOMEM; kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); if (!kvm->memslots) -- cgit v0.10.2 From 0f888f5acd0cd806d4fd9f4067276b3855a13309 Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Mon, 10 Dec 2012 10:33:38 -0700 Subject: KVM: Increase user memory slots on x86 to 125 With the 3 private slots, this gives us a nice round 128 slots total. The primary motivation for this is to support more assigned devices. Each assigned device can theoretically use up to 8 slots (6 MMIO BARs, 1 ROM BAR, 1 spare for a split MSI-X table mapping) though it's far more typical for a device to use 3-4 slots. If we assume a typical VM uses a dozen slots for non-assigned devices purposes, we should always be able to support 14 worst case assigned devices or 28 to 37 typical devices. Reviewed-by: Gleb Natapov Signed-off-by: Alex Williamson Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 51d5210..c431b33 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -33,7 +33,7 @@ #define KVM_MAX_VCPUS 254 #define KVM_SOFT_MAX_VCPUS 160 -#define KVM_USER_MEM_SLOTS 32 +#define KVM_USER_MEM_SLOTS 125 /* memory slots that are not exposed to userspace */ #define KVM_PRIVATE_MEM_SLOTS 3 #define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) -- cgit v0.10.2 From e11ae1a102b46f76441e328a2743ae5d6e201423 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Fri, 14 Dec 2012 15:23:16 +0200 Subject: KVM: remove unused variable. Signed-off-by: Gleb Natapov Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c index ebd98d0..b111aee 100644 --- a/arch/x86/kvm/irq.c +++ b/arch/x86/kvm/irq.c @@ -43,8 +43,6 @@ EXPORT_SYMBOL(kvm_cpu_has_pending_timer); */ int kvm_cpu_has_interrupt(struct kvm_vcpu *v) { - struct kvm_pic *s; - if (!irqchip_in_kernel(v->kvm)) return v->arch.interrupt.pending; @@ -60,8 +58,6 @@ EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt); */ int kvm_cpu_get_interrupt(struct kvm_vcpu *v) { - struct kvm_pic *s; - if (!irqchip_in_kernel(v->kvm)) return v->arch.interrupt.nr; -- cgit v0.10.2 From d4b06c2d4cce466e2d62163c0a954e1b2ce96f8b Mon Sep 17 00:00:00 2001 From: Nickolai Zeldovich Date: Sat, 15 Dec 2012 06:34:37 -0500 Subject: kvm: fix i8254 counter 0 wraparound The kvm i8254 emulation for counter 0 (but not for counters 1 and 2) has at least two bugs in mode 0: 1. The OUT bit, computed by pit_get_out(), is never set high. 2. The counter value, computed by pit_get_count(), wraps back around to the initial counter value, rather than wrapping back to 0xFFFF (which is the behavior described in the comment in __kpit_elapsed, the behavior implemented by qemu, and the behavior observed on AMD hardware). The bug stems from __kpit_elapsed computing the elapsed time mod the initial counter value (stored as nanoseconds in ps->period). This is both unnecessary (none of the callers of kpit_elapsed expect the value to be at most the initial counter value) and incorrect (it causes pit_get_count to appear to wrap around to the initial counter value rather than 0xFFFF). Removing this mod from __kpit_elapsed fixes both of the above bugs. Signed-off-by: Nickolai Zeldovich Reviewed-by: Marcelo Tosatti Signed-off-by: Gleb Natapov diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 11300d2..c1d30b2 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -122,7 +122,6 @@ static s64 __kpit_elapsed(struct kvm *kvm) */ remaining = hrtimer_get_remaining(&ps->timer); elapsed = ps->period - ktime_to_ns(remaining); - elapsed = mod_64(elapsed, ps->period); return elapsed; } -- cgit v0.10.2 From 55c171a6d90dc0574021f9c836127cfd1a7d2e30 Mon Sep 17 00:00:00 2001 From: Cornelia Huck Date: Fri, 14 Dec 2012 17:02:16 +0100 Subject: KVM: s390: Handle hosts not supporting s390-virtio. Running under a kvm host does not necessarily imply the presence of a page mapped above the main memory with the virtio information; however, the code includes a hard coded access to that page. Instead, check for the presence of the page and exit gracefully before we hit an addressing exception if it does not exist. Reviewed-by: Marcelo Tosatti Reviewed-by: Alexander Graf Signed-off-by: Cornelia Huck cc: stable@vger.kernel.org Signed-off-by: Gleb Natapov diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c index 7dabef6..b846b6c 100644 --- a/drivers/s390/kvm/kvm_virtio.c +++ b/drivers/s390/kvm/kvm_virtio.c @@ -422,6 +422,26 @@ static void kvm_extint_handler(struct ext_code ext_code, } /* + * For s390-virtio, we expect a page above main storage containing + * the virtio configuration. Try to actually load from this area + * in order to figure out if the host provides this page. + */ +static int __init test_devices_support(unsigned long addr) +{ + int ret = -EIO; + + asm volatile( + "0: lura 0,%1\n" + "1: xgr %0,%0\n" + "2:\n" + EX_TABLE(0b,2b) + EX_TABLE(1b,2b) + : "+d" (ret) + : "a" (addr) + : "0", "cc"); + return ret; +} +/* * Init function for virtio * devices are in a single page above top of "normal" mem */ @@ -432,21 +452,23 @@ static int __init kvm_devices_init(void) if (!MACHINE_IS_KVM) return -ENODEV; + if (test_devices_support(real_memory_size) < 0) + return -ENODEV; + + rc = vmem_add_mapping(real_memory_size, PAGE_SIZE); + if (rc) + return rc; + + kvm_devices = (void *) real_memory_size; + kvm_root = root_device_register("kvm_s390"); if (IS_ERR(kvm_root)) { rc = PTR_ERR(kvm_root); printk(KERN_ERR "Could not register kvm_s390 root device"); + vmem_remove_mapping(real_memory_size, PAGE_SIZE); return rc; } - rc = vmem_add_mapping(real_memory_size, PAGE_SIZE); - if (rc) { - root_device_unregister(kvm_root); - return rc; - } - - kvm_devices = (void *) real_memory_size; - INIT_WORK(&hotplug_work, hotplug_devices); service_subclass_irq_register(); -- cgit v0.10.2 From 0abbe448eddb2263db3fb776757f480b34accd88 Mon Sep 17 00:00:00 2001 From: Cornelia Huck Date: Fri, 14 Dec 2012 17:02:17 +0100 Subject: s390/ccwdev: Include asm/schid.h. Get the definition of struct subchannel_id. Reviewed-by: Alexander Graf Signed-off-by: Cornelia Huck Signed-off-by: Gleb Natapov diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h index 6d1f357..e606161 100644 --- a/arch/s390/include/asm/ccwdev.h +++ b/arch/s390/include/asm/ccwdev.h @@ -12,15 +12,13 @@ #include #include #include +#include /* structs from asm/cio.h */ struct irb; struct ccw1; struct ccw_dev_id; -/* from asm/schid.h */ -struct subchannel_id; - /* simplified initializers for struct ccw_device: * CCW_DEVICE and CCW_DEVICE_DEVTYPE initialize one * entry in your MODULE_DEVICE_TABLE and set the match_flag correctly */ -- cgit v0.10.2 From 7e64e0597fd67c975bfa3e76401bfbcdd5ae0ff9 Mon Sep 17 00:00:00 2001 From: Cornelia Huck Date: Fri, 14 Dec 2012 17:02:18 +0100 Subject: KVM: s390: Add a channel I/O based virtio transport driver. Add a driver for kvm guests that matches virtual ccw devices provided by the host as virtio bridge devices. These virtio-ccw devices use a special set of channel commands in order to perform virtio functions. Reviewed-by: Marcelo Tosatti Reviewed-by: Alexander Graf Signed-off-by: Cornelia Huck Signed-off-by: Gleb Natapov diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h index e6972f8..aa6d0d7 100644 --- a/arch/s390/include/asm/irq.h +++ b/arch/s390/include/asm/irq.h @@ -35,6 +35,7 @@ enum interruption_class { IOINT_CSC, IOINT_PCI, IOINT_MSI, + IOINT_VIR, NMI_NMI, NR_IRQS, }; diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index bf24293..a9806ea 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c @@ -60,6 +60,7 @@ static const struct irq_class intrclass_names[] = { [IOINT_CSC] = {.name = "CSC", .desc = "[I/O] CHSC Subchannel"}, [IOINT_PCI] = {.name = "PCI", .desc = "[I/O] PCI Interrupt" }, [IOINT_MSI] = {.name = "MSI", .desc = "[I/O] MSI Interrupt" }, + [IOINT_VIR] = {.name = "VIR", .desc = "[I/O] Virtual I/O Devices"}, [NMI_NMI] = {.name = "NMI", .desc = "[NMI] Machine Check"}, }; diff --git a/drivers/s390/kvm/Makefile b/drivers/s390/kvm/Makefile index 0815690..241891a 100644 --- a/drivers/s390/kvm/Makefile +++ b/drivers/s390/kvm/Makefile @@ -6,4 +6,4 @@ # it under the terms of the GNU General Public License (version 2 only) # as published by the Free Software Foundation. -obj-$(CONFIG_S390_GUEST) += kvm_virtio.o +obj-$(CONFIG_S390_GUEST) += kvm_virtio.o virtio_ccw.o diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c new file mode 100644 index 0000000..1a5aff3 --- /dev/null +++ b/drivers/s390/kvm/virtio_ccw.c @@ -0,0 +1,853 @@ +/* + * ccw based virtio transport + * + * Copyright IBM Corp. 2012 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + * + * Author(s): Cornelia Huck + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * virtio related functions + */ + +struct vq_config_block { + __u16 index; + __u16 num; +} __packed; + +#define VIRTIO_CCW_CONFIG_SIZE 0x100 +/* same as PCI config space size, should be enough for all drivers */ + +struct virtio_ccw_device { + struct virtio_device vdev; + __u8 status; + __u8 config[VIRTIO_CCW_CONFIG_SIZE]; + struct ccw_device *cdev; + struct ccw1 *ccw; + __u32 area; + __u32 curr_io; + int err; + wait_queue_head_t wait_q; + spinlock_t lock; + struct list_head virtqueues; + unsigned long indicators; + unsigned long indicators2; + struct vq_config_block *config_block; +}; + +struct vq_info_block { + __u64 queue; + __u32 align; + __u16 index; + __u16 num; +} __packed; + +struct virtio_feature_desc { + __u32 features; + __u8 index; +} __packed; + +struct virtio_ccw_vq_info { + struct virtqueue *vq; + int num; + void *queue; + struct vq_info_block *info_block; + struct list_head node; +}; + +#define KVM_VIRTIO_CCW_RING_ALIGN 4096 + +#define KVM_S390_VIRTIO_CCW_NOTIFY 3 + +#define CCW_CMD_SET_VQ 0x13 +#define CCW_CMD_VDEV_RESET 0x33 +#define CCW_CMD_SET_IND 0x43 +#define CCW_CMD_SET_CONF_IND 0x53 +#define CCW_CMD_READ_FEAT 0x12 +#define CCW_CMD_WRITE_FEAT 0x11 +#define CCW_CMD_READ_CONF 0x22 +#define CCW_CMD_WRITE_CONF 0x21 +#define CCW_CMD_WRITE_STATUS 0x31 +#define CCW_CMD_READ_VQ_CONF 0x32 + +#define VIRTIO_CCW_DOING_SET_VQ 0x00010000 +#define VIRTIO_CCW_DOING_RESET 0x00040000 +#define VIRTIO_CCW_DOING_READ_FEAT 0x00080000 +#define VIRTIO_CCW_DOING_WRITE_FEAT 0x00100000 +#define VIRTIO_CCW_DOING_READ_CONFIG 0x00200000 +#define VIRTIO_CCW_DOING_WRITE_CONFIG 0x00400000 +#define VIRTIO_CCW_DOING_WRITE_STATUS 0x00800000 +#define VIRTIO_CCW_DOING_SET_IND 0x01000000 +#define VIRTIO_CCW_DOING_READ_VQ_CONF 0x02000000 +#define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000 +#define VIRTIO_CCW_INTPARM_MASK 0xffff0000 + +static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev) +{ + return container_of(vdev, struct virtio_ccw_device, vdev); +} + +static int doing_io(struct virtio_ccw_device *vcdev, __u32 flag) +{ + unsigned long flags; + __u32 ret; + + spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags); + if (vcdev->err) + ret = 0; + else + ret = vcdev->curr_io & flag; + spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags); + return ret; +} + +static int ccw_io_helper(struct virtio_ccw_device *vcdev, __u32 intparm) +{ + int ret; + unsigned long flags; + int flag = intparm & VIRTIO_CCW_INTPARM_MASK; + + spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags); + ret = ccw_device_start(vcdev->cdev, vcdev->ccw, intparm, 0, 0); + if (!ret) + vcdev->curr_io |= flag; + spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags); + wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0); + return ret ? ret : vcdev->err; +} + +static inline long do_kvm_notify(struct subchannel_id schid, + unsigned long queue_index) +{ + register unsigned long __nr asm("1") = KVM_S390_VIRTIO_CCW_NOTIFY; + register struct subchannel_id __schid asm("2") = schid; + register unsigned long __index asm("3") = queue_index; + register long __rc asm("2"); + + asm volatile ("diag 2,4,0x500\n" + : "=d" (__rc) : "d" (__nr), "d" (__schid), "d" (__index) + : "memory", "cc"); + return __rc; +} + +static void virtio_ccw_kvm_notify(struct virtqueue *vq) +{ + struct virtio_ccw_vq_info *info = vq->priv; + struct virtio_ccw_device *vcdev; + struct subchannel_id schid; + + vcdev = to_vc_device(info->vq->vdev); + ccw_device_get_schid(vcdev->cdev, &schid); + do_kvm_notify(schid, virtqueue_get_queue_index(vq)); +} + +static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev, int index) +{ + vcdev->config_block->index = index; + vcdev->ccw->cmd_code = CCW_CMD_READ_VQ_CONF; + vcdev->ccw->flags = 0; + vcdev->ccw->count = sizeof(struct vq_config_block); + vcdev->ccw->cda = (__u32)(unsigned long)(vcdev->config_block); + ccw_io_helper(vcdev, VIRTIO_CCW_DOING_READ_VQ_CONF); + return vcdev->config_block->num; +} + +static void virtio_ccw_del_vq(struct virtqueue *vq) +{ + struct virtio_ccw_device *vcdev = to_vc_device(vq->vdev); + struct virtio_ccw_vq_info *info = vq->priv; + unsigned long flags; + unsigned long size; + int ret; + unsigned int index = virtqueue_get_queue_index(vq); + + /* Remove from our list. */ + spin_lock_irqsave(&vcdev->lock, flags); + list_del(&info->node); + spin_unlock_irqrestore(&vcdev->lock, flags); + + /* Release from host. */ + info->info_block->queue = 0; + info->info_block->align = 0; + info->info_block->index = index; + info->info_block->num = 0; + vcdev->ccw->cmd_code = CCW_CMD_SET_VQ; + vcdev->ccw->flags = 0; + vcdev->ccw->count = sizeof(*info->info_block); + vcdev->ccw->cda = (__u32)(unsigned long)(info->info_block); + ret = ccw_io_helper(vcdev, VIRTIO_CCW_DOING_SET_VQ | index); + /* + * -ENODEV isn't considered an error: The device is gone anyway. + * This may happen on device detach. + */ + if (ret && (ret != -ENODEV)) + dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d", + ret, index); + + vring_del_virtqueue(vq); + size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN)); + free_pages_exact(info->queue, size); + kfree(info->info_block); + kfree(info); +} + +static void virtio_ccw_del_vqs(struct virtio_device *vdev) +{ + struct virtqueue *vq, *n; + + list_for_each_entry_safe(vq, n, &vdev->vqs, list) + virtio_ccw_del_vq(vq); +} + +static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev, + int i, vq_callback_t *callback, + const char *name) +{ + struct virtio_ccw_device *vcdev = to_vc_device(vdev); + int err; + struct virtqueue *vq; + struct virtio_ccw_vq_info *info; + unsigned long size; + unsigned long flags; + + /* Allocate queue. */ + info = kzalloc(sizeof(struct virtio_ccw_vq_info), GFP_KERNEL); + if (!info) { + dev_warn(&vcdev->cdev->dev, "no info\n"); + err = -ENOMEM; + goto out_err; + } + info->info_block = kzalloc(sizeof(*info->info_block), + GFP_DMA | GFP_KERNEL); + if (!info->info_block) { + dev_warn(&vcdev->cdev->dev, "no info block\n"); + err = -ENOMEM; + goto out_err; + } + info->num = virtio_ccw_read_vq_conf(vcdev, i); + size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN)); + info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); + if (info->queue == NULL) { + dev_warn(&vcdev->cdev->dev, "no queue\n"); + err = -ENOMEM; + goto out_err; + } + + vq = vring_new_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN, vdev, + true, info->queue, virtio_ccw_kvm_notify, + callback, name); + if (!vq) { + /* For now, we fail if we can't get the requested size. */ + dev_warn(&vcdev->cdev->dev, "no vq\n"); + err = -ENOMEM; + free_pages_exact(info->queue, size); + goto out_err; + } + info->vq = vq; + vq->priv = info; + + /* Register it with the host. */ + info->info_block->queue = (__u64)info->queue; + info->info_block->align = KVM_VIRTIO_CCW_RING_ALIGN; + info->info_block->index = i; + info->info_block->num = info->num; + vcdev->ccw->cmd_code = CCW_CMD_SET_VQ; + vcdev->ccw->flags = 0; + vcdev->ccw->count = sizeof(*info->info_block); + vcdev->ccw->cda = (__u32)(unsigned long)(info->info_block); + err = ccw_io_helper(vcdev, VIRTIO_CCW_DOING_SET_VQ | i); + if (err) { + dev_warn(&vcdev->cdev->dev, "SET_VQ failed\n"); + free_pages_exact(info->queue, size); + info->vq = NULL; + vq->priv = NULL; + goto out_err; + } + + /* Save it to our list. */ + spin_lock_irqsave(&vcdev->lock, flags); + list_add(&info->node, &vcdev->virtqueues); + spin_unlock_irqrestore(&vcdev->lock, flags); + + return vq; + +out_err: + if (info) + kfree(info->info_block); + kfree(info); + return ERR_PTR(err); +} + +static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char *names[]) +{ + struct virtio_ccw_device *vcdev = to_vc_device(vdev); + unsigned long *indicatorp = NULL; + int ret, i; + + for (i = 0; i < nvqs; ++i) { + vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i]); + if (IS_ERR(vqs[i])) { + ret = PTR_ERR(vqs[i]); + vqs[i] = NULL; + goto out; + } + } + ret = -ENOMEM; + /* We need a data area under 2G to communicate. */ + indicatorp = kmalloc(sizeof(&vcdev->indicators), GFP_DMA | GFP_KERNEL); + if (!indicatorp) + goto out; + *indicatorp = (unsigned long) &vcdev->indicators; + /* Register queue indicators with host. */ + vcdev->indicators = 0; + vcdev->ccw->cmd_code = CCW_CMD_SET_IND; + vcdev->ccw->flags = 0; + vcdev->ccw->count = sizeof(vcdev->indicators); + vcdev->ccw->cda = (__u32)(unsigned long) indicatorp; + ret = ccw_io_helper(vcdev, VIRTIO_CCW_DOING_SET_IND); + if (ret) + goto out; + /* Register indicators2 with host for config changes */ + *indicatorp = (unsigned long) &vcdev->indicators2; + vcdev->indicators2 = 0; + vcdev->ccw->cmd_code = CCW_CMD_SET_CONF_IND; + vcdev->ccw->flags = 0; + vcdev->ccw->count = sizeof(vcdev->indicators2); + vcdev->ccw->cda = (__u32)(unsigned long) indicatorp; + ret = ccw_io_helper(vcdev, VIRTIO_CCW_DOING_SET_CONF_IND); + if (ret) + goto out; + + kfree(indicatorp); + return 0; +out: + kfree(indicatorp); + virtio_ccw_del_vqs(vdev); + return ret; +} + +static void virtio_ccw_reset(struct virtio_device *vdev) +{ + struct virtio_ccw_device *vcdev = to_vc_device(vdev); + + /* Zero status bits. */ + vcdev->status = 0; + + /* Send a reset ccw on device. */ + vcdev->ccw->cmd_code = CCW_CMD_VDEV_RESET; + vcdev->ccw->flags = 0; + vcdev->ccw->count = 0; + vcdev->ccw->cda = 0; + ccw_io_helper(vcdev, VIRTIO_CCW_DOING_RESET); +} + +static u32 virtio_ccw_get_features(struct virtio_device *vdev) +{ + struct virtio_ccw_device *vcdev = to_vc_device(vdev); + struct virtio_feature_desc features; + int ret; + + /* Read the feature bits from the host. */ + /* TODO: Features > 32 bits */ + features.index = 0; + vcdev->ccw->cmd_code = CCW_CMD_READ_FEAT; + vcdev->ccw->flags = 0; + vcdev->ccw->count = sizeof(features); + vcdev->ccw->cda = vcdev->area; + ret = ccw_io_helper(vcdev, VIRTIO_CCW_DOING_READ_FEAT); + if (ret) + return 0; + + memcpy(&features, (void *)(unsigned long)vcdev->area, + sizeof(features)); + return le32_to_cpu(features.features); +} + +static void virtio_ccw_finalize_features(struct virtio_device *vdev) +{ + struct virtio_ccw_device *vcdev = to_vc_device(vdev); + struct virtio_feature_desc features; + int i; + + /* Give virtio_ring a chance to accept features. */ + vring_transport_features(vdev); + + for (i = 0; i < sizeof(*vdev->features) / sizeof(features.features); + i++) { + int highbits = i % 2 ? 32 : 0; + features.index = i; + features.features = cpu_to_le32(vdev->features[i / 2] + >> highbits); + memcpy((void *)(unsigned long)vcdev->area, &features, + sizeof(features)); + /* Write the feature bits to the host. */ + vcdev->ccw->cmd_code = CCW_CMD_WRITE_FEAT; + vcdev->ccw->flags = 0; + vcdev->ccw->count = sizeof(features); + vcdev->ccw->cda = vcdev->area; + ccw_io_helper(vcdev, VIRTIO_CCW_DOING_WRITE_FEAT); + } +} + +static void virtio_ccw_get_config(struct virtio_device *vdev, + unsigned int offset, void *buf, unsigned len) +{ + struct virtio_ccw_device *vcdev = to_vc_device(vdev); + int ret; + + /* Read the config area from the host. */ + vcdev->ccw->cmd_code = CCW_CMD_READ_CONF; + vcdev->ccw->flags = 0; + vcdev->ccw->count = offset + len; + vcdev->ccw->cda = vcdev->area; + ret = ccw_io_helper(vcdev, VIRTIO_CCW_DOING_READ_CONFIG); + if (ret) + return; + + memcpy(vcdev->config, (void *)(unsigned long)vcdev->area, + sizeof(vcdev->config)); + memcpy(buf, &vcdev->config[offset], len); +} + +static void virtio_ccw_set_config(struct virtio_device *vdev, + unsigned int offset, const void *buf, + unsigned len) +{ + struct virtio_ccw_device *vcdev = to_vc_device(vdev); + + memcpy(&vcdev->config[offset], buf, len); + /* Write the config area to the host. */ + memcpy((void *)(unsigned long)vcdev->area, vcdev->config, + sizeof(vcdev->config)); + vcdev->ccw->cmd_code = CCW_CMD_WRITE_CONF; + vcdev->ccw->flags = 0; + vcdev->ccw->count = offset + len; + vcdev->ccw->cda = vcdev->area; + ccw_io_helper(vcdev, VIRTIO_CCW_DOING_WRITE_CONFIG); +} + +static u8 virtio_ccw_get_status(struct virtio_device *vdev) +{ + struct virtio_ccw_device *vcdev = to_vc_device(vdev); + + return vcdev->status; +} + +static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status) +{ + struct virtio_ccw_device *vcdev = to_vc_device(vdev); + + /* Write the status to the host. */ + vcdev->status = status; + memcpy((void *)(unsigned long)vcdev->area, &status, sizeof(status)); + vcdev->ccw->cmd_code = CCW_CMD_WRITE_STATUS; + vcdev->ccw->flags = 0; + vcdev->ccw->count = sizeof(status); + vcdev->ccw->cda = vcdev->area; + ccw_io_helper(vcdev, VIRTIO_CCW_DOING_WRITE_STATUS); +} + +static struct virtio_config_ops virtio_ccw_config_ops = { + .get_features = virtio_ccw_get_features, + .finalize_features = virtio_ccw_finalize_features, + .get = virtio_ccw_get_config, + .set = virtio_ccw_set_config, + .get_status = virtio_ccw_get_status, + .set_status = virtio_ccw_set_status, + .reset = virtio_ccw_reset, + .find_vqs = virtio_ccw_find_vqs, + .del_vqs = virtio_ccw_del_vqs, +}; + + +/* + * ccw bus driver related functions + */ + +static void virtio_ccw_release_dev(struct device *_d) +{ + struct virtio_device *dev = container_of(_d, struct virtio_device, + dev); + struct virtio_ccw_device *vcdev = to_vc_device(dev); + + kfree((void *)(unsigned long)vcdev->area); + kfree(vcdev->config_block); + kfree(vcdev->ccw); + kfree(vcdev); +} + +static int irb_is_error(struct irb *irb) +{ + if (scsw_cstat(&irb->scsw) != 0) + return 1; + if (scsw_dstat(&irb->scsw) & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) + return 1; + if (scsw_cc(&irb->scsw) != 0) + return 1; + return 0; +} + +static struct virtqueue *virtio_ccw_vq_by_ind(struct virtio_ccw_device *vcdev, + int index) +{ + struct virtio_ccw_vq_info *info; + unsigned long flags; + struct virtqueue *vq; + + vq = NULL; + spin_lock_irqsave(&vcdev->lock, flags); + list_for_each_entry(info, &vcdev->virtqueues, node) { + if (virtqueue_get_queue_index(info->vq) == index) { + vq = info->vq; + break; + } + } + spin_unlock_irqrestore(&vcdev->lock, flags); + return vq; +} + +static void virtio_ccw_int_handler(struct ccw_device *cdev, + unsigned long intparm, + struct irb *irb) +{ + __u32 activity = intparm & VIRTIO_CCW_INTPARM_MASK; + struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev); + int i; + struct virtqueue *vq; + struct virtio_driver *drv; + + /* Check if it's a notification from the host. */ + if ((intparm == 0) && + (scsw_stctl(&irb->scsw) == + (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) { + /* OK */ + } + if (irb_is_error(irb)) + vcdev->err = -EIO; /* XXX - use real error */ + if (vcdev->curr_io & activity) { + switch (activity) { + case VIRTIO_CCW_DOING_READ_FEAT: + case VIRTIO_CCW_DOING_WRITE_FEAT: + case VIRTIO_CCW_DOING_READ_CONFIG: + case VIRTIO_CCW_DOING_WRITE_CONFIG: + case VIRTIO_CCW_DOING_WRITE_STATUS: + case VIRTIO_CCW_DOING_SET_VQ: + case VIRTIO_CCW_DOING_SET_IND: + case VIRTIO_CCW_DOING_SET_CONF_IND: + case VIRTIO_CCW_DOING_RESET: + case VIRTIO_CCW_DOING_READ_VQ_CONF: + vcdev->curr_io &= ~activity; + wake_up(&vcdev->wait_q); + break; + default: + /* don't know what to do... */ + dev_warn(&cdev->dev, "Suspicious activity '%08x'\n", + activity); + WARN_ON(1); + break; + } + } + for_each_set_bit(i, &vcdev->indicators, + sizeof(vcdev->indicators) * BITS_PER_BYTE) { + /* The bit clear must happen before the vring kick. */ + clear_bit(i, &vcdev->indicators); + barrier(); + vq = virtio_ccw_vq_by_ind(vcdev, i); + vring_interrupt(0, vq); + } + if (test_bit(0, &vcdev->indicators2)) { + drv = container_of(vcdev->vdev.dev.driver, + struct virtio_driver, driver); + + if (drv && drv->config_changed) + drv->config_changed(&vcdev->vdev); + clear_bit(0, &vcdev->indicators2); + } +} + +/* + * We usually want to autoonline all devices, but give the admin + * a way to exempt devices from this. + */ +#define __DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \ + (8*sizeof(long))) +static unsigned long devs_no_auto[__MAX_SSID + 1][__DEV_WORDS]; + +static char *no_auto = ""; + +module_param(no_auto, charp, 0444); +MODULE_PARM_DESC(no_auto, "list of ccw bus id ranges not to be auto-onlined"); + +static int virtio_ccw_check_autoonline(struct ccw_device *cdev) +{ + struct ccw_dev_id id; + + ccw_device_get_id(cdev, &id); + if (test_bit(id.devno, devs_no_auto[id.ssid])) + return 0; + return 1; +} + +static void virtio_ccw_auto_online(void *data, async_cookie_t cookie) +{ + struct ccw_device *cdev = data; + int ret; + + ret = ccw_device_set_online(cdev); + if (ret) + dev_warn(&cdev->dev, "Failed to set online: %d\n", ret); +} + +static int virtio_ccw_probe(struct ccw_device *cdev) +{ + cdev->handler = virtio_ccw_int_handler; + + if (virtio_ccw_check_autoonline(cdev)) + async_schedule(virtio_ccw_auto_online, cdev); + return 0; +} + +static void virtio_ccw_remove(struct ccw_device *cdev) +{ + struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev); + + if (cdev->online) { + unregister_virtio_device(&vcdev->vdev); + dev_set_drvdata(&cdev->dev, NULL); + } + cdev->handler = NULL; +} + +static int virtio_ccw_offline(struct ccw_device *cdev) +{ + struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev); + + unregister_virtio_device(&vcdev->vdev); + dev_set_drvdata(&cdev->dev, NULL); + return 0; +} + + +/* Area needs to be big enough to fit status, features or configuration. */ +#define VIRTIO_AREA_SIZE VIRTIO_CCW_CONFIG_SIZE /* biggest possible use */ + +static int virtio_ccw_online(struct ccw_device *cdev) +{ + int ret; + struct virtio_ccw_device *vcdev; + + vcdev = kzalloc(sizeof(*vcdev), GFP_KERNEL); + if (!vcdev) { + dev_warn(&cdev->dev, "Could not get memory for virtio\n"); + ret = -ENOMEM; + goto out_free; + } + vcdev->area = (__u32)(unsigned long)kzalloc(VIRTIO_AREA_SIZE, + GFP_DMA | GFP_KERNEL); + if (!vcdev->area) { + dev_warn(&cdev->dev, "Cound not get memory for virtio\n"); + ret = -ENOMEM; + goto out_free; + } + vcdev->config_block = kzalloc(sizeof(*vcdev->config_block), + GFP_DMA | GFP_KERNEL); + if (!vcdev->config_block) { + ret = -ENOMEM; + goto out_free; + } + vcdev->ccw = kzalloc(sizeof(*vcdev->ccw), GFP_DMA | GFP_KERNEL); + if (!vcdev->ccw) { + ret = -ENOMEM; + goto out_free; + } + + vcdev->vdev.dev.parent = &cdev->dev; + vcdev->vdev.dev.release = virtio_ccw_release_dev; + vcdev->vdev.config = &virtio_ccw_config_ops; + vcdev->cdev = cdev; + init_waitqueue_head(&vcdev->wait_q); + INIT_LIST_HEAD(&vcdev->virtqueues); + spin_lock_init(&vcdev->lock); + + dev_set_drvdata(&cdev->dev, vcdev); + vcdev->vdev.id.vendor = cdev->id.cu_type; + vcdev->vdev.id.device = cdev->id.cu_model; + ret = register_virtio_device(&vcdev->vdev); + if (ret) { + dev_warn(&cdev->dev, "Failed to register virtio device: %d\n", + ret); + goto out_put; + } + return 0; +out_put: + dev_set_drvdata(&cdev->dev, NULL); + put_device(&vcdev->vdev.dev); + return ret; +out_free: + if (vcdev) { + kfree((void *)(unsigned long)vcdev->area); + kfree(vcdev->config_block); + kfree(vcdev->ccw); + } + kfree(vcdev); + return ret; +} + +static int virtio_ccw_cio_notify(struct ccw_device *cdev, int event) +{ + /* TODO: Check whether we need special handling here. */ + return 0; +} + +static struct ccw_device_id virtio_ids[] = { + { CCW_DEVICE(0x3832, 0) }, + {}, +}; +MODULE_DEVICE_TABLE(ccw, virtio_ids); + +static struct ccw_driver virtio_ccw_driver = { + .driver = { + .owner = THIS_MODULE, + .name = "virtio_ccw", + }, + .ids = virtio_ids, + .probe = virtio_ccw_probe, + .remove = virtio_ccw_remove, + .set_offline = virtio_ccw_offline, + .set_online = virtio_ccw_online, + .notify = virtio_ccw_cio_notify, + .int_class = IOINT_VIR, +}; + +static int __init pure_hex(char **cp, unsigned int *val, int min_digit, + int max_digit, int max_val) +{ + int diff; + + diff = 0; + *val = 0; + + while (diff <= max_digit) { + int value = hex_to_bin(**cp); + + if (value < 0) + break; + *val = *val * 16 + value; + (*cp)++; + diff++; + } + + if ((diff < min_digit) || (diff > max_digit) || (*val > max_val)) + return 1; + + return 0; +} + +static int __init parse_busid(char *str, unsigned int *cssid, + unsigned int *ssid, unsigned int *devno) +{ + char *str_work; + int rc, ret; + + rc = 1; + + if (*str == '\0') + goto out; + + str_work = str; + ret = pure_hex(&str_work, cssid, 1, 2, __MAX_CSSID); + if (ret || (str_work[0] != '.')) + goto out; + str_work++; + ret = pure_hex(&str_work, ssid, 1, 1, __MAX_SSID); + if (ret || (str_work[0] != '.')) + goto out; + str_work++; + ret = pure_hex(&str_work, devno, 4, 4, __MAX_SUBCHANNEL); + if (ret || (str_work[0] != '\0')) + goto out; + + rc = 0; +out: + return rc; +} + +static void __init no_auto_parse(void) +{ + unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to; + char *parm, *str; + int rc; + + str = no_auto; + while ((parm = strsep(&str, ","))) { + rc = parse_busid(strsep(&parm, "-"), &from_cssid, + &from_ssid, &from); + if (rc) + continue; + if (parm != NULL) { + rc = parse_busid(parm, &to_cssid, + &to_ssid, &to); + if ((from_ssid > to_ssid) || + ((from_ssid == to_ssid) && (from > to))) + rc = -EINVAL; + } else { + to_cssid = from_cssid; + to_ssid = from_ssid; + to = from; + } + if (rc) + continue; + while ((from_ssid < to_ssid) || + ((from_ssid == to_ssid) && (from <= to))) { + set_bit(from, devs_no_auto[from_ssid]); + from++; + if (from > __MAX_SUBCHANNEL) { + from_ssid++; + from = 0; + } + } + } +} + +static int __init virtio_ccw_init(void) +{ + /* parse no_auto string before we do anything further */ + no_auto_parse(); + return ccw_driver_register(&virtio_ccw_driver); +} +module_init(virtio_ccw_init); + +static void __exit virtio_ccw_exit(void) +{ + ccw_driver_unregister(&virtio_ccw_driver); +} +module_exit(virtio_ccw_exit); -- cgit v0.10.2 From 871a069db9f29938071b3d214cb964980071ffbf Mon Sep 17 00:00:00 2001 From: Yang Zhang Date: Wed, 12 Dec 2012 13:05:12 +0800 Subject: KVM: remove a wrong hack of delivery PIT intr to vcpu0 This hack is wrong. The pin number of PIT is connected to 2 not 0. This means this hack never takes effect. So it is ok to remove it. Signed-off-by: Yang Zhang Signed-off-by: Gleb Natapov diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index cfb7e4d..f3abbef 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c @@ -179,15 +179,6 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq) irqe.level = 1; irqe.shorthand = 0; -#ifdef CONFIG_X86 - /* Always delivery PIT interrupt to vcpu 0 */ - if (irq == 0) { - irqe.dest_mode = 0; /* Physical mode. */ - /* need to read apic_id from apic regiest since - * it can be rewritten */ - irqe.dest_id = ioapic->kvm->bsp_vcpu_id; - } -#endif return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe); } -- cgit v0.10.2 From 116c14c0191f3378e6567af296529ac287e85aa2 Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Fri, 21 Dec 2012 08:20:16 -0700 Subject: kvm: Fix memory slot generation updates Previous patch "kvm: Minor memory slot optimization" (b7f69c555ca43) overlooked the generation field of the memory slots. Re-using the original memory slots left us with with two slightly different memory slots with the same generation. To fix this, make update_memslots() take a new parameter to specify the last generation. This also makes generation management more explicit to avoid such problems in the future. Reported-by: Takuya Yoshikawa Signed-off-by: Alex Williamson Signed-off-by: Gleb Natapov diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 32fdc45..cbe0d68 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -428,7 +428,8 @@ void kvm_exit(void); void kvm_get_kvm(struct kvm *kvm); void kvm_put_kvm(struct kvm *kvm); -void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new); +void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new, + u64 last_generation); static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) { diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index bd31096..14cbae8 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -672,7 +672,8 @@ static void sort_memslots(struct kvm_memslots *slots) slots->id_to_index[slots->memslots[i].id] = i; } -void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new) +void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new, + u64 last_generation) { if (new) { int id = new->id; @@ -684,7 +685,7 @@ void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new) sort_memslots(slots); } - slots->generation++; + slots->generation = last_generation + 1; } static int check_memory_region_flags(struct kvm_userspace_memory_region *mem) @@ -819,7 +820,7 @@ int __kvm_set_memory_region(struct kvm *kvm, slot = id_to_memslot(slots, mem->slot); slot->flags |= KVM_MEMSLOT_INVALID; - update_memslots(slots, NULL); + update_memslots(slots, NULL, kvm->memslots->generation); old_memslots = kvm->memslots; rcu_assign_pointer(kvm->memslots, slots); @@ -867,7 +868,7 @@ int __kvm_set_memory_region(struct kvm *kvm, memset(&new.arch, 0, sizeof(new.arch)); } - update_memslots(slots, &new); + update_memslots(slots, &new, kvm->memslots->generation); old_memslots = kvm->memslots; rcu_assign_pointer(kvm->memslots, slots); synchronize_srcu_expedited(&kvm->srcu); -- cgit v0.10.2 From 07f42f5f25dc214a33214159fc8b62b984b713eb Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Wed, 12 Dec 2012 19:10:49 +0200 Subject: KVM: VMX: cleanup rmode_segment_valid() Set segment fields explicitly instead of using binary operations. No behaviour changes. Reviewed-by: Marcelo Tosatti Signed-off-by: Gleb Natapov diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index b3101e3..265fdd3 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3380,13 +3380,16 @@ static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg) u32 ar; vmx_get_segment(vcpu, &var, seg); + var.dpl = 0x3; + var.g = 0; + var.db = 0; ar = vmx_segment_access_rights(&var); if (var.base != (var.selector << 4)) return false; if (var.limit < 0xffff) return false; - if (((ar | (3 << AR_DPL_SHIFT)) & ~(AR_G_MASK | AR_DB_MASK)) != 0xf3) + if (ar != 0xf3) return false; return true; -- cgit v0.10.2 From 0647f4aa8c58a7e5adb873b485c83e0c93d9c6d1 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Wed, 12 Dec 2012 19:10:50 +0200 Subject: KVM: VMX: relax check for CS register in rmode_segment_valid() rmode_segment_valid() checks if segment descriptor can be used to enter vm86 mode. VMX spec mandates that in vm86 mode CS register will be of type data, not code. Lets allow guest entry with vm86 mode if the only problem with CS register is incorrect type. Otherwise entire real mode will be emulated. Reviewed-by: Marcelo Tosatti Signed-off-by: Gleb Natapov diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 265fdd3..7c2c054 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3383,6 +3383,8 @@ static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg) var.dpl = 0x3; var.g = 0; var.db = 0; + if (seg == VCPU_SREG_CS) + var.type = 0x3; ar = vmx_segment_access_rights(&var); if (var.base != (var.selector << 4)) -- cgit v0.10.2 From c6ad115348035f474d6fb81005bb4764bdd128ef Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Wed, 12 Dec 2012 19:10:51 +0200 Subject: KVM: VMX: return correct segment limit and flags for CS/SS registers in real mode VMX without unrestricted mode cannot virtualize real mode, so if emulate_invalid_guest_state=0 kvm uses vm86 mode to approximate it. Sometimes, when guest moves from protected mode to real mode, it leaves segment descriptors in a state not suitable for use by vm86 mode virtualization, so we keep shadow copy of segment descriptors for internal use and load fake register to VMCS for guest entry to succeed. Till now we kept shadow for all segments except SS and CS (for SS and CS we returned parameters directly from VMCS), but since commit a5625189f6810 emulator enforces segment limits in real mode. This causes #GP during move from protected mode to real mode when emulator fetches first instruction after moving to real mode since it uses incorrect CS base and limit to linearize the %rip. Fix by keeping shadow for SS and CS too. Reviewed-by: Marcelo Tosatti Signed-off-by: Gleb Natapov diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 7c2c054..792d9cc 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2856,6 +2856,8 @@ static void enter_rmode(struct kvm_vcpu *vcpu) vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); + vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); + vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); vmx->emulation_required = 1; vmx->rmode.vm86_active = 1; @@ -3171,10 +3173,7 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx = to_vmx(vcpu); u32 ar; - if (vmx->rmode.vm86_active - && (seg == VCPU_SREG_TR || seg == VCPU_SREG_ES - || seg == VCPU_SREG_DS || seg == VCPU_SREG_FS - || seg == VCPU_SREG_GS)) { + if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { *var = vmx->rmode.segs[seg]; if (seg == VCPU_SREG_TR || var->selector == vmx_read_guest_seg_selector(vmx, seg)) -- cgit v0.10.2 From beb853ffeccbfa626f30038e816d61187103c455 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Wed, 12 Dec 2012 19:10:52 +0200 Subject: KVM: VMX: use fix_rmode_seg() to fix all code/data segments The code for SS and CS does the same thing fix_rmode_seg() is doing. Use it instead of hand crafted code. Reviewed-by: Marcelo Tosatti Signed-off-by: Gleb Natapov diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 792d9cc..9e784c2 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3315,30 +3315,8 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu, * unrestricted guest like Westmere to older host that don't have * unrestricted guest like Nehelem. */ - if (vmx->rmode.vm86_active) { - switch (seg) { - case VCPU_SREG_CS: - vmcs_write32(GUEST_CS_AR_BYTES, 0xf3); - vmcs_write32(GUEST_CS_LIMIT, 0xffff); - if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000) - vmcs_writel(GUEST_CS_BASE, 0xf0000); - vmcs_write16(GUEST_CS_SELECTOR, - vmcs_readl(GUEST_CS_BASE) >> 4); - break; - case VCPU_SREG_ES: - case VCPU_SREG_DS: - case VCPU_SREG_GS: - case VCPU_SREG_FS: - fix_rmode_seg(seg, &vmx->rmode.segs[seg]); - break; - case VCPU_SREG_SS: - vmcs_write16(GUEST_SS_SELECTOR, - vmcs_readl(GUEST_SS_BASE) >> 4); - vmcs_write32(GUEST_SS_LIMIT, 0xffff); - vmcs_write32(GUEST_SS_AR_BYTES, 0xf3); - break; - } - } + if (vmx->rmode.vm86_active && var->s) + fix_rmode_seg(seg, &vmx->rmode.segs[seg]); } static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) -- cgit v0.10.2 From 39dcfb95def4646ecdb76ea7a7491e8420dce7a7 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Wed, 12 Dec 2012 19:10:53 +0200 Subject: KVM: VMX: remove redundant code from vmx_set_segment() Segment descriptor's base is fixed by call to fix_rmode_seg(). Not need to do it twice. Reviewed-by: Marcelo Tosatti Signed-off-by: Gleb Natapov diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 9e784c2..9b8edd2 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3268,7 +3268,7 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu, { struct vcpu_vmx *vmx = to_vmx(vcpu); const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; - u32 ar; + u32 ar = 0; vmx_segment_cache_clear(vmx); @@ -3280,15 +3280,9 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu, vmcs_writel(sf->base, var->base); vmcs_write32(sf->limit, var->limit); vmcs_write16(sf->selector, var->selector); - if (vmx->rmode.vm86_active && var->s) { + if (vmx->rmode.vm86_active && var->s) vmx->rmode.segs[seg] = *var; - /* - * Hack real-mode segments into vm86 compatibility. - */ - if (var->base == 0xffff0000 && var->selector == 0xf000) - vmcs_writel(sf->base, 0xf0000); - ar = 0xf3; - } else + else ar = vmx_segment_access_rights(var); /* -- cgit v0.10.2 From 1ecd50a9474c4bfa3129d90cfcf1c1eb4fbf19e7 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Wed, 12 Dec 2012 19:10:54 +0200 Subject: KVM: VMX: clean-up vmx_set_segment() Move all vm86_active logic into one place. Reviewed-by: Marcelo Tosatti Signed-off-by: Gleb Natapov diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 9b8edd2..4bd1c22 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3271,19 +3271,21 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu, u32 ar = 0; vmx_segment_cache_clear(vmx); + __clear_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail); - if (vmx->rmode.vm86_active && seg == VCPU_SREG_TR) { - vmcs_write16(sf->selector, var->selector); - vmx->rmode.segs[VCPU_SREG_TR] = *var; + if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { + vmx->rmode.segs[seg] = *var; + if (seg == VCPU_SREG_TR) + vmcs_write16(sf->selector, var->selector); + else if (var->s) + fix_rmode_seg(seg, &vmx->rmode.segs[seg]); return; } + vmcs_writel(sf->base, var->base); vmcs_write32(sf->limit, var->limit); vmcs_write16(sf->selector, var->selector); - if (vmx->rmode.vm86_active && var->s) - vmx->rmode.segs[seg] = *var; - else - ar = vmx_segment_access_rights(var); + ar = vmx_segment_access_rights(var); /* * Fix the "Accessed" bit in AR field of segment registers for older @@ -3300,17 +3302,6 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu, ar |= 0x1; /* Accessed */ vmcs_write32(sf->ar_bytes, ar); - __clear_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail); - - /* - * Fix segments for real mode guest in hosts that don't have - * "unrestricted_mode" or it was disabled. - * This is done to allow migration of the guests from hosts with - * unrestricted guest like Westmere to older host that don't have - * unrestricted guest like Nehelem. - */ - if (vmx->rmode.vm86_active && var->s) - fix_rmode_seg(seg, &vmx->rmode.segs[seg]); } static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) -- cgit v0.10.2 From f924d66d278d5da890f3098805b0450a4ef66c32 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Wed, 12 Dec 2012 19:10:55 +0200 Subject: KVM: VMX: remove unneeded temporary variable from vmx_set_segment() Reviewed-by: Marcelo Tosatti Signed-off-by: Gleb Natapov diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 4bd1c22..23d5aec 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3268,7 +3268,6 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu, { struct vcpu_vmx *vmx = to_vmx(vcpu); const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; - u32 ar = 0; vmx_segment_cache_clear(vmx); __clear_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail); @@ -3285,7 +3284,6 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu, vmcs_writel(sf->base, var->base); vmcs_write32(sf->limit, var->limit); vmcs_write16(sf->selector, var->selector); - ar = vmx_segment_access_rights(var); /* * Fix the "Accessed" bit in AR field of segment registers for older @@ -3299,9 +3297,9 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu, * kvm hack. */ if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR)) - ar |= 0x1; /* Accessed */ + var->type |= 0x1; /* Accessed */ - vmcs_write32(sf->ar_bytes, ar); + vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var)); } static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) -- cgit v0.10.2 From 7ec4fb44962f611241bcb974fa97c5f49ddcb2f1 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Mon, 24 Dec 2012 17:49:30 +0200 Subject: KVM: move the code that installs new slots array to a separate function. Move repetitive code sequence to a separate function. Reviewed-by: Alex Williamson Signed-off-by: Gleb Natapov diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 14cbae8..e45c20c 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -702,6 +702,17 @@ static int check_memory_region_flags(struct kvm_userspace_memory_region *mem) return 0; } +static struct kvm_memslots *install_new_memslots(struct kvm *kvm, + struct kvm_memslots *slots, struct kvm_memory_slot *new) +{ + struct kvm_memslots *old_memslots = kvm->memslots; + + update_memslots(slots, new, kvm->memslots->generation); + rcu_assign_pointer(kvm->memslots, slots); + synchronize_srcu_expedited(&kvm->srcu); + return old_memslots; +} + /* * Allocate some memory and give it an address in the guest physical address * space. @@ -820,11 +831,8 @@ int __kvm_set_memory_region(struct kvm *kvm, slot = id_to_memslot(slots, mem->slot); slot->flags |= KVM_MEMSLOT_INVALID; - update_memslots(slots, NULL, kvm->memslots->generation); + old_memslots = install_new_memslots(kvm, slots, NULL); - old_memslots = kvm->memslots; - rcu_assign_pointer(kvm->memslots, slots); - synchronize_srcu_expedited(&kvm->srcu); /* slot was deleted or moved, clear iommu mapping */ kvm_iommu_unmap_pages(kvm, &old); /* From this point no new shadow pages pointing to a deleted, @@ -868,10 +876,7 @@ int __kvm_set_memory_region(struct kvm *kvm, memset(&new.arch, 0, sizeof(new.arch)); } - update_memslots(slots, &new, kvm->memslots->generation); - old_memslots = kvm->memslots; - rcu_assign_pointer(kvm->memslots, slots); - synchronize_srcu_expedited(&kvm->srcu); + old_memslots = install_new_memslots(kvm, slots, &new); kvm_arch_commit_memory_region(kvm, mem, old, user_alloc); -- cgit v0.10.2 From 11393a077dcfa7fb827d957f0305fc369d402a5e Mon Sep 17 00:00:00 2001 From: Jesse Larrew Date: Mon, 10 Dec 2012 15:31:51 -0600 Subject: x86: kvm_para: fix typo in hypercall comments Correct a typo in the comment explaining hypercalls. Signed-off-by: Jesse Larrew Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h index eb3e9d8..f49c16d 100644 --- a/arch/x86/include/asm/kvm_para.h +++ b/arch/x86/include/asm/kvm_para.h @@ -122,7 +122,7 @@ static inline bool kvm_check_and_clear_guest_paused(void) * * Up to four arguments may be passed in rbx, rcx, rdx, and rsi respectively. * The hypercall number should be placed in rax and the return value will be - * placed in rax. No other registers will be clobbered unless explicited + * placed in rax. No other registers will be clobbered unless explicitly * noted by the particular hypercall. */ -- cgit v0.10.2 From 3a78a4f46302bfc83602a53dfa4dcbe76a7a1f5f Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Thu, 20 Dec 2012 16:57:42 +0200 Subject: KVM: emulator: drop RPL check from linearize() function According to Intel SDM Vol3 Section 5.5 "Privilege Levels" and 5.6 "Privilege Level Checking When Accessing Data Segments" RPL checking is done during loading of a segment selector, not during data access. We already do checking during segment selector loading, so drop the check during data access. Checking RPL during data access triggers #GP if after transition from real mode to protected mode RPL bits in a segment selector are set. Signed-off-by: Gleb Natapov Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index c7547b3..a3d31e3 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -665,7 +665,7 @@ static int __linearize(struct x86_emulate_ctxt *ctxt, ulong la; u32 lim; u16 sel; - unsigned cpl, rpl; + unsigned cpl; la = seg_base(ctxt, addr.seg) + addr.ea; switch (ctxt->mode) { @@ -699,11 +699,6 @@ static int __linearize(struct x86_emulate_ctxt *ctxt, goto bad; } cpl = ctxt->ops->cpl(ctxt); - if (ctxt->mode == X86EMUL_MODE_REAL) - rpl = 0; - else - rpl = sel & 3; - cpl = max(cpl, rpl); if (!(desc.type & 8)) { /* data segment */ if (cpl > desc.dpl) -- cgit v0.10.2 From 045a282ca41505184e8fc805335d1f5aae0c8a03 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Thu, 20 Dec 2012 16:57:43 +0200 Subject: KVM: emulator: implement fninit, fnstsw, fnstcw Signed-off-by: Gleb Natapov Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index a3d31e3..53c5ad6 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -115,6 +115,7 @@ #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */ #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */ #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */ +#define Escape (5<<15) /* Escape to coprocessor instruction */ #define Sse (1<<18) /* SSE Vector instruction */ /* Generic ModRM decode. */ #define ModRM (1<<19) @@ -166,6 +167,7 @@ struct opcode { const struct opcode *group; const struct group_dual *gdual; const struct gprefix *gprefix; + const struct escape *esc; } u; int (*check_perm)(struct x86_emulate_ctxt *ctxt); }; @@ -182,6 +184,11 @@ struct gprefix { struct opcode pfx_f3; }; +struct escape { + struct opcode op[8]; + struct opcode high[64]; +}; + /* EFLAGS bit definitions. */ #define EFLG_ID (1<<21) #define EFLG_VIP (1<<20) @@ -991,6 +998,53 @@ static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg) ctxt->ops->put_fpu(ctxt); } +static int em_fninit(struct x86_emulate_ctxt *ctxt) +{ + if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) + return emulate_nm(ctxt); + + ctxt->ops->get_fpu(ctxt); + asm volatile("fninit"); + ctxt->ops->put_fpu(ctxt); + return X86EMUL_CONTINUE; +} + +static int em_fnstcw(struct x86_emulate_ctxt *ctxt) +{ + u16 fcw; + + if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) + return emulate_nm(ctxt); + + ctxt->ops->get_fpu(ctxt); + asm volatile("fnstcw %0": "+m"(fcw)); + ctxt->ops->put_fpu(ctxt); + + /* force 2 byte destination */ + ctxt->dst.bytes = 2; + ctxt->dst.val = fcw; + + return X86EMUL_CONTINUE; +} + +static int em_fnstsw(struct x86_emulate_ctxt *ctxt) +{ + u16 fsw; + + if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) + return emulate_nm(ctxt); + + ctxt->ops->get_fpu(ctxt); + asm volatile("fnstsw %0": "+m"(fsw)); + ctxt->ops->put_fpu(ctxt); + + /* force 2 byte destination */ + ctxt->dst.bytes = 2; + ctxt->dst.val = fsw; + + return X86EMUL_CONTINUE; +} + static void decode_register_operand(struct x86_emulate_ctxt *ctxt, struct operand *op) { @@ -3590,6 +3644,7 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt) #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) } #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) } #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) } +#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) } #define I(_f, _e) { .flags = (_f), .u.execute = (_e) } #define II(_f, _e, _i) \ { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i } @@ -3725,6 +3780,69 @@ static const struct gprefix pfx_vmovntpx = { I(0, em_mov), N, N, N, }; +static const struct escape escape_d9 = { { + N, N, N, N, N, N, N, I(DstMem, em_fnstcw), +}, { + /* 0xC0 - 0xC7 */ + N, N, N, N, N, N, N, N, + /* 0xC8 - 0xCF */ + N, N, N, N, N, N, N, N, + /* 0xD0 - 0xC7 */ + N, N, N, N, N, N, N, N, + /* 0xD8 - 0xDF */ + N, N, N, N, N, N, N, N, + /* 0xE0 - 0xE7 */ + N, N, N, N, N, N, N, N, + /* 0xE8 - 0xEF */ + N, N, N, N, N, N, N, N, + /* 0xF0 - 0xF7 */ + N, N, N, N, N, N, N, N, + /* 0xF8 - 0xFF */ + N, N, N, N, N, N, N, N, +} }; + +static const struct escape escape_db = { { + N, N, N, N, N, N, N, N, +}, { + /* 0xC0 - 0xC7 */ + N, N, N, N, N, N, N, N, + /* 0xC8 - 0xCF */ + N, N, N, N, N, N, N, N, + /* 0xD0 - 0xC7 */ + N, N, N, N, N, N, N, N, + /* 0xD8 - 0xDF */ + N, N, N, N, N, N, N, N, + /* 0xE0 - 0xE7 */ + N, N, N, I(ImplicitOps, em_fninit), N, N, N, N, + /* 0xE8 - 0xEF */ + N, N, N, N, N, N, N, N, + /* 0xF0 - 0xF7 */ + N, N, N, N, N, N, N, N, + /* 0xF8 - 0xFF */ + N, N, N, N, N, N, N, N, +} }; + +static const struct escape escape_dd = { { + N, N, N, N, N, N, N, I(DstMem, em_fnstsw), +}, { + /* 0xC0 - 0xC7 */ + N, N, N, N, N, N, N, N, + /* 0xC8 - 0xCF */ + N, N, N, N, N, N, N, N, + /* 0xD0 - 0xC7 */ + N, N, N, N, N, N, N, N, + /* 0xD8 - 0xDF */ + N, N, N, N, N, N, N, N, + /* 0xE0 - 0xE7 */ + N, N, N, N, N, N, N, N, + /* 0xE8 - 0xEF */ + N, N, N, N, N, N, N, N, + /* 0xF0 - 0xF7 */ + N, N, N, N, N, N, N, N, + /* 0xF8 - 0xFF */ + N, N, N, N, N, N, N, N, +} }; + static const struct opcode opcode_table[256] = { /* 0x00 - 0x07 */ I6ALU(Lock, em_add), @@ -3821,7 +3939,7 @@ static const struct opcode opcode_table[256] = { D2bv(DstMem | SrcOne | ModRM), D2bv(DstMem | ModRM), N, I(DstAcc | SrcImmByte | No64, em_aad), N, N, /* 0xD8 - 0xDF */ - N, N, N, N, N, N, N, N, + N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N, /* 0xE0 - 0xE7 */ X3(I(SrcImmByte, em_loop)), I(SrcImmByte, em_jcxz), @@ -4246,6 +4364,12 @@ done_prefixes: case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break; } break; + case Escape: + if (ctxt->modrm > 0xbf) + opcode = opcode.u.esc->high[ctxt->modrm - 0xc0]; + else + opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7]; + break; default: return EMULATION_FAILED; } -- cgit v0.10.2 From 89efbed02cfd7e9ce3324de0b44a70ee1c716fac Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Thu, 20 Dec 2012 16:57:44 +0200 Subject: KVM: VMX: make rmode_segment_valid() more strict. Currently it allows entering vm86 mode if segment limit is greater than 0xffff and db bit is set. Both of those can cause incorrect execution of instruction by cpu since in vm86 mode limit will be set to 0xffff and db will be forced to 0. Signed-off-by: Gleb Natapov Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 23d5aec..7ebcac2 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3341,15 +3341,13 @@ static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg) vmx_get_segment(vcpu, &var, seg); var.dpl = 0x3; - var.g = 0; - var.db = 0; if (seg == VCPU_SREG_CS) var.type = 0x3; ar = vmx_segment_access_rights(&var); if (var.base != (var.selector << 4)) return false; - if (var.limit < 0xffff) + if (var.limit != 0xffff) return false; if (ar != 0xf3) return false; -- cgit v0.10.2 From d99e415275dd3f757b75981adad8645cdc26da45 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Thu, 20 Dec 2012 16:57:45 +0200 Subject: KVM: VMX: fix emulation of invalid guest state. Currently when emulation of invalid guest state is enable (emulate_invalid_guest_state=1) segment registers are still fixed for entry to vm86 mode some times. Segment register fixing is avoided in enter_rmode(), but vmx_set_segment() still does it unconditionally. The patch fixes it. Signed-off-by: Gleb Natapov Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 7ebcac2..9dff310 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -624,6 +624,8 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); static void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); +static bool guest_state_valid(struct kvm_vcpu *vcpu); +static u32 vmx_segment_access_rights(struct kvm_segment *var); static DEFINE_PER_CPU(struct vmcs *, vmxarea); static DEFINE_PER_CPU(struct vmcs *, current_vmcs); @@ -2758,18 +2760,23 @@ static __exit void hardware_unsetup(void) free_kvm_area(); } -static void fix_pmode_dataseg(struct kvm_vcpu *vcpu, int seg, struct kvm_segment *save) +static void fix_pmode_dataseg(struct kvm_vcpu *vcpu, int seg, + struct kvm_segment *save) { - const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; - struct kvm_segment tmp = *save; - - if (!(vmcs_readl(sf->base) == tmp.base && tmp.s)) { - tmp.base = vmcs_readl(sf->base); - tmp.selector = vmcs_read16(sf->selector); - tmp.dpl = tmp.selector & SELECTOR_RPL_MASK; - tmp.s = 1; + if (!emulate_invalid_guest_state) { + /* + * CS and SS RPL should be equal during guest entry according + * to VMX spec, but in reality it is not always so. Since vcpu + * is in the middle of the transition from real mode to + * protected mode it is safe to assume that RPL 0 is a good + * default value. + */ + if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS) + save->selector &= ~SELECTOR_RPL_MASK; + save->dpl = save->selector & SELECTOR_RPL_MASK; + save->s = 1; } - vmx_set_segment(vcpu, &tmp, seg); + vmx_set_segment(vcpu, save, seg); } static void enter_pmode(struct kvm_vcpu *vcpu) @@ -2777,6 +2784,17 @@ static void enter_pmode(struct kvm_vcpu *vcpu) unsigned long flags; struct vcpu_vmx *vmx = to_vmx(vcpu); + /* + * Update real mode segment cache. It may be not up-to-date if sement + * register was written while vcpu was in a guest mode. + */ + vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); + vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); + vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); + vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); + vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); + vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); + vmx->emulation_required = 1; vmx->rmode.vm86_active = 0; @@ -2794,22 +2812,12 @@ static void enter_pmode(struct kvm_vcpu *vcpu) update_exception_bitmap(vcpu); - if (emulate_invalid_guest_state) - return; - + fix_pmode_dataseg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); + fix_pmode_dataseg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); fix_pmode_dataseg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); fix_pmode_dataseg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); fix_pmode_dataseg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); fix_pmode_dataseg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); - - vmx_segment_cache_clear(vmx); - - vmcs_write16(GUEST_SS_SELECTOR, 0); - vmcs_write32(GUEST_SS_AR_BYTES, 0x93); - - vmcs_write16(GUEST_CS_SELECTOR, - vmcs_read16(GUEST_CS_SELECTOR) & ~SELECTOR_RPL_MASK); - vmcs_write32(GUEST_CS_AR_BYTES, 0x9b); } static gva_t rmode_tss_base(struct kvm *kvm) @@ -2831,22 +2839,40 @@ static gva_t rmode_tss_base(struct kvm *kvm) static void fix_rmode_seg(int seg, struct kvm_segment *save) { const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; + struct kvm_segment var = *save; - vmcs_write16(sf->selector, save->base >> 4); - vmcs_write32(sf->base, save->base & 0xffff0); - vmcs_write32(sf->limit, 0xffff); - vmcs_write32(sf->ar_bytes, 0xf3); - if (save->base & 0xf) - printk_once(KERN_WARNING "kvm: segment base is not paragraph" - " aligned when entering protected mode (seg=%d)", - seg); + var.dpl = 0x3; + if (seg == VCPU_SREG_CS) + var.type = 0x3; + + if (!emulate_invalid_guest_state) { + var.selector = var.base >> 4; + var.base = var.base & 0xffff0; + var.limit = 0xffff; + var.g = 0; + var.db = 0; + var.present = 1; + var.s = 1; + var.l = 0; + var.unusable = 0; + var.type = 0x3; + var.avl = 0; + if (save->base & 0xf) + printk_once(KERN_WARNING "kvm: segment base is not " + "paragraph aligned when entering " + "protected mode (seg=%d)", seg); + } + + vmcs_write16(sf->selector, var.selector); + vmcs_write32(sf->base, var.base); + vmcs_write32(sf->limit, var.limit); + vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var)); } static void enter_rmode(struct kvm_vcpu *vcpu) { unsigned long flags; struct vcpu_vmx *vmx = to_vmx(vcpu); - struct kvm_segment var; if (enable_unrestricted_guest) return; @@ -2862,7 +2888,6 @@ static void enter_rmode(struct kvm_vcpu *vcpu) vmx->emulation_required = 1; vmx->rmode.vm86_active = 1; - /* * Very old userspace does not call KVM_SET_TSS_ADDR before entering * vcpu. Call it here with phys address pointing 16M below 4G. @@ -2890,28 +2915,13 @@ static void enter_rmode(struct kvm_vcpu *vcpu) vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME); update_exception_bitmap(vcpu); - if (emulate_invalid_guest_state) - goto continue_rmode; - - vmx_get_segment(vcpu, &var, VCPU_SREG_SS); - vmx_set_segment(vcpu, &var, VCPU_SREG_SS); + fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); + fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); + fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); + fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); + fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); + fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); - vmx_get_segment(vcpu, &var, VCPU_SREG_CS); - vmx_set_segment(vcpu, &var, VCPU_SREG_CS); - - vmx_get_segment(vcpu, &var, VCPU_SREG_ES); - vmx_set_segment(vcpu, &var, VCPU_SREG_ES); - - vmx_get_segment(vcpu, &var, VCPU_SREG_DS); - vmx_set_segment(vcpu, &var, VCPU_SREG_DS); - - vmx_get_segment(vcpu, &var, VCPU_SREG_GS); - vmx_set_segment(vcpu, &var, VCPU_SREG_GS); - - vmx_get_segment(vcpu, &var, VCPU_SREG_FS); - vmx_set_segment(vcpu, &var, VCPU_SREG_FS); - -continue_rmode: kvm_mmu_reset_context(vcpu); } @@ -3278,7 +3288,7 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu, vmcs_write16(sf->selector, var->selector); else if (var->s) fix_rmode_seg(seg, &vmx->rmode.segs[seg]); - return; + goto out; } vmcs_writel(sf->base, var->base); @@ -3300,6 +3310,10 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu, var->type |= 0x1; /* Accessed */ vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var)); + +out: + if (!vmx->emulation_required) + vmx->emulation_required = !guest_state_valid(vcpu); } static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) -- cgit v0.10.2 From d54d07b2ca19a2908aa89e0c67715ca2e8e62a4c Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Thu, 20 Dec 2012 16:57:46 +0200 Subject: KVM: VMX: Do not fix segment register during vcpu initialization. Segment registers will be fixed according to current emulation policy during switching to real mode for the first time. Signed-off-by: Gleb Natapov Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 9dff310..a101dd4 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3621,12 +3621,9 @@ static void seg_setup(int seg) vmcs_write16(sf->selector, 0); vmcs_writel(sf->base, 0); vmcs_write32(sf->limit, 0xffff); - if (enable_unrestricted_guest) { - ar = 0x93; - if (seg == VCPU_SREG_CS) - ar |= 0x08; /* code segment */ - } else - ar = 0xf3; + ar = 0x93; + if (seg == VCPU_SREG_CS) + ar |= 0x08; /* code segment */ vmcs_write32(sf->ar_bytes, ar); } @@ -3967,14 +3964,9 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) vmx_segment_cache_clear(vmx); seg_setup(VCPU_SREG_CS); - /* - * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode - * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4. Sigh. - */ - if (kvm_vcpu_is_bsp(&vmx->vcpu)) { + if (kvm_vcpu_is_bsp(&vmx->vcpu)) vmcs_write16(GUEST_CS_SELECTOR, 0xf000); - vmcs_writel(GUEST_CS_BASE, 0x000f0000); - } else { + else { vmcs_write16(GUEST_CS_SELECTOR, vmx->vcpu.arch.sipi_vector << 8); vmcs_writel(GUEST_CS_BASE, vmx->vcpu.arch.sipi_vector << 12); } -- cgit v0.10.2 From 0ca1b4f4ba3a9f75bb099ccaf6c4bd8bb6db7a74 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Thu, 20 Dec 2012 16:57:47 +0200 Subject: KVM: VMX: handle IO when emulation is due to #GP in real mode. With emulate_invalid_guest_state=0 if a vcpu is in real mode VMX can enter the vcpu with smaller segment limit than guest configured. If the guest tries to access pass this limit it will get #GP at which point instruction will be emulated with correct segment limit applied. If during the emulation IO is detected it is not handled correctly. Vcpu thread should exit to userspace to serve the IO, but it returns to the guest instead. Since emulation is not completed till userspace completes the IO the faulty instruction is re-executed ad infinitum. The patch fixes that by exiting to userspace if IO happens during instruction emulation. Reported-by: Alex Williamson Signed-off-by: Gleb Natapov Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index a101dd4..55dfc37 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -4230,28 +4230,9 @@ static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) return 0; } -static int handle_rmode_exception(struct kvm_vcpu *vcpu, - int vec, u32 err_code) +static bool rmode_exception(struct kvm_vcpu *vcpu, int vec) { - /* - * Instruction with address size override prefix opcode 0x67 - * Cause the #SS fault with 0 error code in VM86 mode. - */ - if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) - if (emulate_instruction(vcpu, 0) == EMULATE_DONE) - return 1; - /* - * Forward all other exceptions that are valid in real mode. - * FIXME: Breaks guest debugging in real mode, needs to be fixed with - * the required debugging infrastructure rework. - */ switch (vec) { - case DB_VECTOR: - if (vcpu->guest_debug & - (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) - return 0; - kvm_queue_exception(vcpu, vec); - return 1; case BP_VECTOR: /* * Update instruction length as we may reinject the exception @@ -4260,7 +4241,12 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu, to_vmx(vcpu)->vcpu.arch.event_exit_inst_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) - return 0; + return false; + /* fall through */ + case DB_VECTOR: + if (vcpu->guest_debug & + (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) + return false; /* fall through */ case DE_VECTOR: case OF_VECTOR: @@ -4270,10 +4256,37 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu, case SS_VECTOR: case GP_VECTOR: case MF_VECTOR: - kvm_queue_exception(vcpu, vec); - return 1; + return true; + break; } - return 0; + return false; +} + +static int handle_rmode_exception(struct kvm_vcpu *vcpu, + int vec, u32 err_code) +{ + /* + * Instruction with address size override prefix opcode 0x67 + * Cause the #SS fault with 0 error code in VM86 mode. + */ + if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) { + if (emulate_instruction(vcpu, 0) == EMULATE_DONE) { + if (vcpu->arch.halt_request) { + vcpu->arch.halt_request = 0; + return kvm_emulate_halt(vcpu); + } + return 1; + } + return 0; + } + + /* + * Forward all other exceptions that are valid in real mode. + * FIXME: Breaks guest debugging in real mode, needs to be fixed with + * the required debugging infrastructure rework. + */ + kvm_queue_exception(vcpu, vec); + return 1; } /* @@ -4361,17 +4374,11 @@ static int handle_exception(struct kvm_vcpu *vcpu) return kvm_mmu_page_fault(vcpu, cr2, error_code, NULL, 0); } - if (vmx->rmode.vm86_active && - handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK, - error_code)) { - if (vcpu->arch.halt_request) { - vcpu->arch.halt_request = 0; - return kvm_emulate_halt(vcpu); - } - return 1; - } - ex_no = intr_info & INTR_INFO_VECTOR_MASK; + + if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no)) + return handle_rmode_exception(vcpu, ex_no, error_code); + switch (ex_no) { case DB_VECTOR: dr6 = vmcs_readl(EXIT_QUALIFICATION); -- cgit v0.10.2 From 77975357956c6450dd7ac3dfe572c1a8f0014c54 Mon Sep 17 00:00:00 2001 From: Cornelia Huck Date: Thu, 20 Dec 2012 15:32:06 +0100 Subject: KVM: s390: Constify intercept handler tables. These tables are never modified. Reviewed-by: Alexander Graf Signed-off-by: Cornelia Huck Signed-off-by: Marcelo Tosatti diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 22798ec..0c08b3f 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -104,7 +104,7 @@ static int handle_lctl(struct kvm_vcpu *vcpu) return 0; } -static intercept_handler_t instruction_handlers[256] = { +static const intercept_handler_t instruction_handlers[256] = { [0x01] = kvm_s390_handle_01, [0x83] = kvm_s390_handle_diag, [0xae] = kvm_s390_handle_sigp, diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index d768906..1aeb933 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -297,7 +297,7 @@ out_fail: return 0; } -static intercept_handler_t priv_handlers[256] = { +static const intercept_handler_t priv_handlers[256] = { [0x02] = handle_stidp, [0x10] = handle_set_prefix, [0x11] = handle_store_prefix, @@ -405,7 +405,7 @@ static int handle_sckpf(struct kvm_vcpu *vcpu) return 0; } -static intercept_handler_t x01_handlers[256] = { +static const intercept_handler_t x01_handlers[256] = { [0x07] = handle_sckpf, }; -- cgit v0.10.2 From b1c571a50dfacf25a24c23271e9b8bf18ff6b102 Mon Sep 17 00:00:00 2001 From: Cornelia Huck Date: Thu, 20 Dec 2012 15:32:07 +0100 Subject: KVM: s390: Decoding helper functions. Introduce helper functions for decoding the various base/displacement instruction formats. Reviewed-by: Alexander Graf Signed-off-by: Cornelia Huck Signed-off-by: Marcelo Tosatti diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 0c08b3f..df6c0ad 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -26,9 +26,6 @@ static int handle_lctlg(struct kvm_vcpu *vcpu) { int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; int reg3 = vcpu->arch.sie_block->ipa & 0x000f; - int base2 = vcpu->arch.sie_block->ipb >> 28; - int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) + - ((vcpu->arch.sie_block->ipb & 0xff00) << 4); u64 useraddr; int reg, rc; @@ -36,17 +33,15 @@ static int handle_lctlg(struct kvm_vcpu *vcpu) if ((vcpu->arch.sie_block->ipb & 0xff) != 0x2f) return -EOPNOTSUPP; - useraddr = disp2; - if (base2) - useraddr += vcpu->run->s.regs.gprs[base2]; + useraddr = kvm_s390_get_base_disp_rsy(vcpu); if (useraddr & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); reg = reg1; - VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2, - disp2); + VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3, + useraddr); trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, useraddr); do { @@ -68,23 +63,19 @@ static int handle_lctl(struct kvm_vcpu *vcpu) { int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; int reg3 = vcpu->arch.sie_block->ipa & 0x000f; - int base2 = vcpu->arch.sie_block->ipb >> 28; - int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); u64 useraddr; u32 val = 0; int reg, rc; vcpu->stat.instruction_lctl++; - useraddr = disp2; - if (base2) - useraddr += vcpu->run->s.regs.gprs[base2]; + useraddr = kvm_s390_get_base_disp_rs(vcpu); if (useraddr & 3) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2, - disp2); + VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3, + useraddr); trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, useraddr); reg = reg1; diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index d75bc5e..dccc024 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -65,6 +65,43 @@ static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix) vcpu->arch.sie_block->ihcpu = 0xffff; } +static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu) +{ + int base2 = vcpu->arch.sie_block->ipb >> 28; + int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); + + return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; +} + +static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu, + u64 *address1, u64 *address2) +{ + int base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28; + int disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16; + int base2 = (vcpu->arch.sie_block->ipb & 0xf000) >> 12; + int disp2 = vcpu->arch.sie_block->ipb & 0x0fff; + + *address1 = (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1; + *address2 = (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; +} + +static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu) +{ + int base2 = vcpu->arch.sie_block->ipb >> 28; + int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) + + ((vcpu->arch.sie_block->ipb & 0xff00) << 4); + + return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; +} + +static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu) +{ + int base2 = vcpu->arch.sie_block->ipb >> 28; + int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); + + return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; +} + int kvm_s390_handle_wait(struct kvm_vcpu *vcpu); enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer); void kvm_s390_tasklet(unsigned long parm); diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 1aeb933..d715842 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -24,17 +24,13 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu) { - int base2 = vcpu->arch.sie_block->ipb >> 28; - int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); u64 operand2; u32 address = 0; u8 tmp; vcpu->stat.instruction_spx++; - operand2 = disp2; - if (base2) - operand2 += vcpu->run->s.regs.gprs[base2]; + operand2 = kvm_s390_get_base_disp_s(vcpu); /* must be word boundary */ if (operand2 & 3) { @@ -67,15 +63,12 @@ out: static int handle_store_prefix(struct kvm_vcpu *vcpu) { - int base2 = vcpu->arch.sie_block->ipb >> 28; - int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); u64 operand2; u32 address; vcpu->stat.instruction_stpx++; - operand2 = disp2; - if (base2) - operand2 += vcpu->run->s.regs.gprs[base2]; + + operand2 = kvm_s390_get_base_disp_s(vcpu); /* must be word boundary */ if (operand2 & 3) { @@ -100,15 +93,12 @@ out: static int handle_store_cpu_address(struct kvm_vcpu *vcpu) { - int base2 = vcpu->arch.sie_block->ipb >> 28; - int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); u64 useraddr; int rc; vcpu->stat.instruction_stap++; - useraddr = disp2; - if (base2) - useraddr += vcpu->run->s.regs.gprs[base2]; + + useraddr = kvm_s390_get_base_disp_s(vcpu); if (useraddr & 1) { kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); @@ -178,15 +168,12 @@ static int handle_stfl(struct kvm_vcpu *vcpu) static int handle_stidp(struct kvm_vcpu *vcpu) { - int base2 = vcpu->arch.sie_block->ipb >> 28; - int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); u64 operand2; int rc; vcpu->stat.instruction_stidp++; - operand2 = disp2; - if (base2) - operand2 += vcpu->run->s.regs.gprs[base2]; + + operand2 = kvm_s390_get_base_disp_s(vcpu); if (operand2 & 7) { kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); @@ -240,17 +227,13 @@ static int handle_stsi(struct kvm_vcpu *vcpu) int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28; int sel1 = vcpu->run->s.regs.gprs[0] & 0xff; int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff; - int base2 = vcpu->arch.sie_block->ipb >> 28; - int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); u64 operand2; unsigned long mem; vcpu->stat.instruction_stsi++; VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2); - operand2 = disp2; - if (base2) - operand2 += vcpu->run->s.regs.gprs[base2]; + operand2 = kvm_s390_get_base_disp_s(vcpu); if (operand2 & 0xfff && fc > 0) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); @@ -335,17 +318,14 @@ int kvm_s390_handle_b2(struct kvm_vcpu *vcpu) static int handle_tprot(struct kvm_vcpu *vcpu) { - int base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28; - int disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16; - int base2 = (vcpu->arch.sie_block->ipb & 0xf000) >> 12; - int disp2 = vcpu->arch.sie_block->ipb & 0x0fff; - u64 address1 = disp1 + base1 ? vcpu->run->s.regs.gprs[base1] : 0; - u64 address2 = disp2 + base2 ? vcpu->run->s.regs.gprs[base2] : 0; + u64 address1, address2; struct vm_area_struct *vma; unsigned long user_address; vcpu->stat.instruction_tprot++; + kvm_s390_get_base_disp_sse(vcpu, &address1, &address2); + /* we only handle the Linux memory detection case: * access key == 0 * guest DAT == off diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 566ddf6..461e841 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -324,8 +324,6 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) { int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; int r3 = vcpu->arch.sie_block->ipa & 0x000f; - int base2 = vcpu->arch.sie_block->ipb >> 28; - int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); u32 parameter; u16 cpu_addr = vcpu->run->s.regs.gprs[r3]; u8 order_code; @@ -336,9 +334,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OPERATION); - order_code = disp2; - if (base2) - order_code += vcpu->run->s.regs.gprs[base2]; + order_code = kvm_s390_get_base_disp_rs(vcpu); if (r1 % 2) parameter = vcpu->run->s.regs.gprs[r1]; -- cgit v0.10.2 From d8346b7d9bab37e6cc712ff1622c65ff98bdfef8 Mon Sep 17 00:00:00 2001 From: Cornelia Huck Date: Thu, 20 Dec 2012 15:32:08 +0100 Subject: KVM: s390: Support for I/O interrupts. Add support for handling I/O interrupts (standard, subchannel-related ones and rudimentary adapter interrupts). The subchannel-identifying parameters are encoded into the interrupt type. I/O interrupts are floating, so they can't be injected on a specific vcpu. Reviewed-by: Alexander Graf Reviewed-by: Marcelo Tosatti Signed-off-by: Cornelia Huck Signed-off-by: Marcelo Tosatti diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index a4df553..83bd92b 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -2069,6 +2069,10 @@ KVM_S390_INT_VIRTIO (vm) - virtio external interrupt; external interrupt KVM_S390_INT_SERVICE (vm) - sclp external interrupt; sclp parameter in parm KVM_S390_INT_EMERGENCY (vcpu) - sigp emergency; source cpu in parm KVM_S390_INT_EXTERNAL_CALL (vcpu) - sigp external call; source cpu in parm +KVM_S390_INT_IO(ai,cssid,ssid,schid) (vm) - compound value to indicate an + I/O interrupt (ai - adapter interrupt; cssid,ssid,schid - subchannel); + I/O interruption parameters in parm (subchannel) and parm64 (intparm, + interruption subclass) Note that the vcpu ioctl is asynchronous to vcpu execution. diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 711c5ab..a8e35c4 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -74,6 +74,7 @@ struct kvm_s390_sie_block { __u64 epoch; /* 0x0038 */ __u8 reserved40[4]; /* 0x0040 */ #define LCTL_CR0 0x8000 +#define LCTL_CR6 0x0200 __u16 lctl; /* 0x0044 */ __s16 icpua; /* 0x0046 */ __u32 ictl; /* 0x0048 */ @@ -125,6 +126,7 @@ struct kvm_vcpu_stat { u32 deliver_prefix_signal; u32 deliver_restart_signal; u32 deliver_program_int; + u32 deliver_io_int; u32 exit_wait_state; u32 instruction_stidp; u32 instruction_spx; diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index c30615e..52cdf20 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -21,11 +21,26 @@ #include "gaccess.h" #include "trace-s390.h" +#define IOINT_SCHID_MASK 0x0000ffff +#define IOINT_SSID_MASK 0x00030000 +#define IOINT_CSSID_MASK 0x03fc0000 +#define IOINT_AI_MASK 0x04000000 + +static int is_ioint(u64 type) +{ + return ((type & 0xfffe0000u) != 0xfffe0000u); +} + static int psw_extint_disabled(struct kvm_vcpu *vcpu) { return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); } +static int psw_ioint_disabled(struct kvm_vcpu *vcpu) +{ + return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO); +} + static int psw_interrupts_disabled(struct kvm_vcpu *vcpu) { if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) || @@ -67,7 +82,15 @@ static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu, case KVM_S390_SIGP_SET_PREFIX: case KVM_S390_RESTART: return 1; + case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: + if (psw_ioint_disabled(vcpu)) + return 0; + if (vcpu->arch.sie_block->gcr[6] & inti->io.io_int_word) + return 1; + return 0; default: + printk(KERN_WARNING "illegal interrupt type %llx\n", + inti->type); BUG(); } return 0; @@ -116,6 +139,12 @@ static void __set_intercept_indicator(struct kvm_vcpu *vcpu, case KVM_S390_SIGP_STOP: __set_cpuflag(vcpu, CPUSTAT_STOP_INT); break; + case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: + if (psw_ioint_disabled(vcpu)) + __set_cpuflag(vcpu, CPUSTAT_IO_INT); + else + vcpu->arch.sie_block->lctl |= LCTL_CR6; + break; default: BUG(); } @@ -297,6 +326,47 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, exception = 1; break; + case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: + { + __u32 param0 = ((__u32)inti->io.subchannel_id << 16) | + inti->io.subchannel_nr; + __u64 param1 = ((__u64)inti->io.io_int_parm << 32) | + inti->io.io_int_word; + VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type); + vcpu->stat.deliver_io_int++; + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, + param0, param1); + rc = put_guest_u16(vcpu, __LC_SUBCHANNEL_ID, + inti->io.subchannel_id); + if (rc == -EFAULT) + exception = 1; + + rc = put_guest_u16(vcpu, __LC_SUBCHANNEL_NR, + inti->io.subchannel_nr); + if (rc == -EFAULT) + exception = 1; + + rc = put_guest_u32(vcpu, __LC_IO_INT_PARM, + inti->io.io_int_parm); + if (rc == -EFAULT) + exception = 1; + + rc = put_guest_u32(vcpu, __LC_IO_INT_WORD, + inti->io.io_int_word); + if (rc == -EFAULT) + exception = 1; + + rc = copy_to_guest(vcpu, __LC_IO_OLD_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + if (rc == -EFAULT) + exception = 1; + + rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, + __LC_IO_NEW_PSW, sizeof(psw_t)); + if (rc == -EFAULT) + exception = 1; + break; + } default: BUG(); } @@ -545,7 +615,7 @@ int kvm_s390_inject_vm(struct kvm *kvm, { struct kvm_s390_local_interrupt *li; struct kvm_s390_float_interrupt *fi; - struct kvm_s390_interrupt_info *inti; + struct kvm_s390_interrupt_info *inti, *iter; int sigcpu; inti = kzalloc(sizeof(*inti), GFP_KERNEL); @@ -569,6 +639,22 @@ int kvm_s390_inject_vm(struct kvm *kvm, case KVM_S390_SIGP_STOP: case KVM_S390_INT_EXTERNAL_CALL: case KVM_S390_INT_EMERGENCY: + kfree(inti); + return -EINVAL; + case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: + if (s390int->type & IOINT_AI_MASK) + VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)"); + else + VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x", + s390int->type & IOINT_CSSID_MASK, + s390int->type & IOINT_SSID_MASK, + s390int->type & IOINT_SCHID_MASK); + inti->type = s390int->type; + inti->io.subchannel_id = s390int->parm >> 16; + inti->io.subchannel_nr = s390int->parm & 0x0000ffffu; + inti->io.io_int_parm = s390int->parm64 >> 32; + inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull; + break; default: kfree(inti); return -EINVAL; @@ -579,7 +665,19 @@ int kvm_s390_inject_vm(struct kvm *kvm, mutex_lock(&kvm->lock); fi = &kvm->arch.float_int; spin_lock(&fi->lock); - list_add_tail(&inti->list, &fi->list); + if (!is_ioint(inti->type)) + list_add_tail(&inti->list, &fi->list); + else { + /* Keep I/O interrupts sorted in isc order. */ + list_for_each_entry(iter, &fi->list, list) { + if (!is_ioint(iter->type)) + continue; + if (iter->io.io_int_word <= inti->io.io_int_word) + continue; + break; + } + list_add_tail(&inti->list, &iter->list); + } atomic_set(&fi->active, 1); sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS); if (sigcpu == KVM_MAX_VCPUS) { @@ -653,6 +751,7 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, break; case KVM_S390_INT_VIRTIO: case KVM_S390_INT_SERVICE: + case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: default: kfree(inti); return -EINVAL; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index e6e5d4b..54540bd 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -401,6 +401,15 @@ struct kvm_s390_psw { #define KVM_S390_INT_SERVICE 0xffff2401u #define KVM_S390_INT_EMERGENCY 0xffff1201u #define KVM_S390_INT_EXTERNAL_CALL 0xffff1202u +/* Anything below 0xfffe0000u is taken by INT_IO */ +#define KVM_S390_INT_IO(ai,cssid,ssid,schid) \ + (((schid)) | \ + ((ssid) << 16) | \ + ((cssid) << 18) | \ + ((ai) << 26)) +#define KVM_S390_INT_IO_MIN 0x00000000u +#define KVM_S390_INT_IO_MAX 0xfffdffffu + struct kvm_s390_interrupt { __u32 type; -- cgit v0.10.2 From 48a3e950f4cee6a345ffbe9baf599f1e9a54c479 Mon Sep 17 00:00:00 2001 From: Cornelia Huck Date: Thu, 20 Dec 2012 15:32:09 +0100 Subject: KVM: s390: Add support for machine checks. Add support for injecting machine checks (only repressible conditions for now). This is a bit more involved than I/O interrupts, for these reasons: - Machine checks come in both floating and cpu varieties. - We don't have a bit for machine checks enabling, but have to use a roundabout approach with trapping PSW changing instructions and watching for opened machine checks. Reviewed-by: Alexander Graf Reviewed-by: Marcelo Tosatti Signed-off-by: Cornelia Huck Signed-off-by: Marcelo Tosatti diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 83bd92b..8a0de30 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -2073,6 +2073,10 @@ KVM_S390_INT_IO(ai,cssid,ssid,schid) (vm) - compound value to indicate an I/O interrupt (ai - adapter interrupt; cssid,ssid,schid - subchannel); I/O interruption parameters in parm (subchannel) and parm64 (intparm, interruption subclass) +KVM_S390_MCHK (vm, vcpu) - machine check interrupt; cr 14 bits in parm, + machine check interrupt code in parm64 (note that + machine checks needing further payload are not + supported by this ioctl) Note that the vcpu ioctl is asynchronous to vcpu execution. diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index a8e35c4..29363d1 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -75,8 +75,10 @@ struct kvm_s390_sie_block { __u8 reserved40[4]; /* 0x0040 */ #define LCTL_CR0 0x8000 #define LCTL_CR6 0x0200 +#define LCTL_CR14 0x0002 __u16 lctl; /* 0x0044 */ __s16 icpua; /* 0x0046 */ +#define ICTL_LPSW 0x00400000 __u32 ictl; /* 0x0048 */ __u32 eca; /* 0x004c */ __u8 icptcode; /* 0x0050 */ @@ -187,6 +189,11 @@ struct kvm_s390_emerg_info { __u16 code; }; +struct kvm_s390_mchk_info { + __u64 cr14; + __u64 mcic; +}; + struct kvm_s390_interrupt_info { struct list_head list; u64 type; @@ -197,6 +204,7 @@ struct kvm_s390_interrupt_info { struct kvm_s390_emerg_info emerg; struct kvm_s390_extcall_info extcall; struct kvm_s390_prefix_info prefix; + struct kvm_s390_mchk_info mchk; }; }; diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index df6c0ad..950c13e 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -97,10 +97,12 @@ static int handle_lctl(struct kvm_vcpu *vcpu) static const intercept_handler_t instruction_handlers[256] = { [0x01] = kvm_s390_handle_01, + [0x82] = kvm_s390_handle_lpsw, [0x83] = kvm_s390_handle_diag, [0xae] = kvm_s390_handle_sigp, [0xb2] = kvm_s390_handle_b2, [0xb7] = handle_lctl, + [0xb9] = kvm_s390_handle_b9, [0xe5] = kvm_s390_handle_e5, [0xeb] = handle_lctlg, }; diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 52cdf20..b3b4748 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -41,6 +41,11 @@ static int psw_ioint_disabled(struct kvm_vcpu *vcpu) return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO); } +static int psw_mchk_disabled(struct kvm_vcpu *vcpu) +{ + return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK); +} + static int psw_interrupts_disabled(struct kvm_vcpu *vcpu) { if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) || @@ -82,6 +87,12 @@ static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu, case KVM_S390_SIGP_SET_PREFIX: case KVM_S390_RESTART: return 1; + case KVM_S390_MCHK: + if (psw_mchk_disabled(vcpu)) + return 0; + if (vcpu->arch.sie_block->gcr[14] & inti->mchk.cr14) + return 1; + return 0; case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: if (psw_ioint_disabled(vcpu)) return 0; @@ -116,6 +127,7 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags); vcpu->arch.sie_block->lctl = 0x0000; + vcpu->arch.sie_block->ictl &= ~ICTL_LPSW; } static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) @@ -139,6 +151,12 @@ static void __set_intercept_indicator(struct kvm_vcpu *vcpu, case KVM_S390_SIGP_STOP: __set_cpuflag(vcpu, CPUSTAT_STOP_INT); break; + case KVM_S390_MCHK: + if (psw_mchk_disabled(vcpu)) + vcpu->arch.sie_block->ictl |= ICTL_LPSW; + else + vcpu->arch.sie_block->lctl |= LCTL_CR14; + break; case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: if (psw_ioint_disabled(vcpu)) __set_cpuflag(vcpu, CPUSTAT_IO_INT); @@ -326,6 +344,32 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, exception = 1; break; + case KVM_S390_MCHK: + VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx", + inti->mchk.mcic); + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, + inti->mchk.cr14, + inti->mchk.mcic); + rc = kvm_s390_vcpu_store_status(vcpu, + KVM_S390_STORE_STATUS_PREFIXED); + if (rc == -EFAULT) + exception = 1; + + rc = put_guest_u64(vcpu, __LC_MCCK_CODE, inti->mchk.mcic); + if (rc == -EFAULT) + exception = 1; + + rc = copy_to_guest(vcpu, __LC_MCK_OLD_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + if (rc == -EFAULT) + exception = 1; + + rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, + __LC_MCK_NEW_PSW, sizeof(psw_t)); + if (rc == -EFAULT) + exception = 1; + break; + case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: { __u32 param0 = ((__u32)inti->io.subchannel_id << 16) | @@ -588,6 +632,61 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) } } +void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu) +{ + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; + struct kvm_s390_interrupt_info *n, *inti = NULL; + int deliver; + + __reset_intercept_indicators(vcpu); + if (atomic_read(&li->active)) { + do { + deliver = 0; + spin_lock_bh(&li->lock); + list_for_each_entry_safe(inti, n, &li->list, list) { + if ((inti->type == KVM_S390_MCHK) && + __interrupt_is_deliverable(vcpu, inti)) { + list_del(&inti->list); + deliver = 1; + break; + } + __set_intercept_indicator(vcpu, inti); + } + if (list_empty(&li->list)) + atomic_set(&li->active, 0); + spin_unlock_bh(&li->lock); + if (deliver) { + __do_deliver_interrupt(vcpu, inti); + kfree(inti); + } + } while (deliver); + } + + if (atomic_read(&fi->active)) { + do { + deliver = 0; + spin_lock(&fi->lock); + list_for_each_entry_safe(inti, n, &fi->list, list) { + if ((inti->type == KVM_S390_MCHK) && + __interrupt_is_deliverable(vcpu, inti)) { + list_del(&inti->list); + deliver = 1; + break; + } + __set_intercept_indicator(vcpu, inti); + } + if (list_empty(&fi->list)) + atomic_set(&fi->active, 0); + spin_unlock(&fi->lock); + if (deliver) { + __do_deliver_interrupt(vcpu, inti); + kfree(inti); + } + } while (deliver); + } +} + int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; @@ -641,6 +740,13 @@ int kvm_s390_inject_vm(struct kvm *kvm, case KVM_S390_INT_EMERGENCY: kfree(inti); return -EINVAL; + case KVM_S390_MCHK: + VM_EVENT(kvm, 5, "inject: machine check parm64:%llx", + s390int->parm64); + inti->type = s390int->type; + inti->mchk.cr14 = s390int->parm; /* upper bits are not used */ + inti->mchk.mcic = s390int->parm64; + break; case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: if (s390int->type & IOINT_AI_MASK) VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)"); @@ -749,6 +855,12 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, inti->type = s390int->type; inti->emerg.code = s390int->parm; break; + case KVM_S390_MCHK: + VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx", + s390int->parm64); + inti->type = s390int->type; + inti->mchk.mcic = s390int->parm64; + break; case KVM_S390_INT_VIRTIO: case KVM_S390_INT_SERVICE: case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index dccc024..1f7cc6c 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -106,6 +106,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu); enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer); void kvm_s390_tasklet(unsigned long parm); void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu); +void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu); int kvm_s390_inject_vm(struct kvm *kvm, struct kvm_s390_interrupt *s390int); int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, @@ -117,6 +118,8 @@ int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action); int kvm_s390_handle_b2(struct kvm_vcpu *vcpu); int kvm_s390_handle_e5(struct kvm_vcpu *vcpu); int kvm_s390_handle_01(struct kvm_vcpu *vcpu); +int kvm_s390_handle_b9(struct kvm_vcpu *vcpu); +int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu); /* implemented in sigp.c */ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu); diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index d715842..d3cbcd3 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -18,6 +18,8 @@ #include #include #include +#include +#include #include "gaccess.h" #include "kvm-s390.h" #include "trace.h" @@ -166,6 +168,99 @@ static int handle_stfl(struct kvm_vcpu *vcpu) return 0; } +static void handle_new_psw(struct kvm_vcpu *vcpu) +{ + /* Check whether the new psw is enabled for machine checks. */ + if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK) + kvm_s390_deliver_pending_machine_checks(vcpu); +} + +#define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA) +#define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL +#define PSW_ADDR_24 0x00000000000fffffUL +#define PSW_ADDR_31 0x000000007fffffffUL + +int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu) +{ + u64 addr; + psw_compat_t new_psw; + + if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) + return kvm_s390_inject_program_int(vcpu, + PGM_PRIVILEGED_OPERATION); + + addr = kvm_s390_get_base_disp_s(vcpu); + + if (addr & 7) { + kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); + goto out; + } + + if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) { + kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); + goto out; + } + + if (!(new_psw.mask & PSW32_MASK_BASE)) { + kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); + goto out; + } + + vcpu->arch.sie_block->gpsw.mask = + (new_psw.mask & ~PSW32_MASK_BASE) << 32; + vcpu->arch.sie_block->gpsw.addr = new_psw.addr; + + if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_UNASSIGNED) || + (!(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) && + (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_24)) || + ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) == + PSW_MASK_EA)) { + kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); + goto out; + } + + handle_new_psw(vcpu); +out: + return 0; +} + +static int handle_lpswe(struct kvm_vcpu *vcpu) +{ + u64 addr; + psw_t new_psw; + + addr = kvm_s390_get_base_disp_s(vcpu); + + if (addr & 7) { + kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); + goto out; + } + + if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) { + kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); + goto out; + } + + vcpu->arch.sie_block->gpsw.mask = new_psw.mask; + vcpu->arch.sie_block->gpsw.addr = new_psw.addr; + + if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_UNASSIGNED) || + (((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) == + PSW_MASK_BA) && + (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_31)) || + (!(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) && + (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_24)) || + ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) == + PSW_MASK_EA)) { + kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); + goto out; + } + + handle_new_psw(vcpu); +out: + return 0; +} + static int handle_stidp(struct kvm_vcpu *vcpu) { u64 operand2; @@ -292,6 +387,7 @@ static const intercept_handler_t priv_handlers[256] = { [0x5f] = handle_chsc, [0x7d] = handle_stsi, [0xb1] = handle_stfl, + [0xb2] = handle_lpswe, }; int kvm_s390_handle_b2(struct kvm_vcpu *vcpu) @@ -316,6 +412,45 @@ int kvm_s390_handle_b2(struct kvm_vcpu *vcpu) return -EOPNOTSUPP; } +static int handle_epsw(struct kvm_vcpu *vcpu) +{ + int reg1, reg2; + + reg1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 24; + reg2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16; + + /* This basically extracts the mask half of the psw. */ + vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000; + vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32; + if (reg2) { + vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000; + vcpu->run->s.regs.gprs[reg2] |= + vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffff; + } + return 0; +} + +static const intercept_handler_t b9_handlers[256] = { + [0x8d] = handle_epsw, +}; + +int kvm_s390_handle_b9(struct kvm_vcpu *vcpu) +{ + intercept_handler_t handler; + + /* This is handled just as for the B2 instructions. */ + handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; + if (handler) { + if ((handler != handle_epsw) && + (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)) + return kvm_s390_inject_program_int(vcpu, + PGM_PRIVILEGED_OPERATION); + else + return handler(vcpu); + } + return -EOPNOTSUPP; +} + static int handle_tprot(struct kvm_vcpu *vcpu) { u64 address1, address2; diff --git a/arch/s390/kvm/trace-s390.h b/arch/s390/kvm/trace-s390.h index 90fdf85..95fbc1ab 100644 --- a/arch/s390/kvm/trace-s390.h +++ b/arch/s390/kvm/trace-s390.h @@ -141,13 +141,13 @@ TRACE_EVENT(kvm_s390_inject_vcpu, * Trace point for the actual delivery of interrupts. */ TRACE_EVENT(kvm_s390_deliver_interrupt, - TP_PROTO(unsigned int id, __u64 type, __u32 data0, __u64 data1), + TP_PROTO(unsigned int id, __u64 type, __u64 data0, __u64 data1), TP_ARGS(id, type, data0, data1), TP_STRUCT__entry( __field(int, id) __field(__u32, inttype) - __field(__u32, data0) + __field(__u64, data0) __field(__u64, data1) ), @@ -159,7 +159,7 @@ TRACE_EVENT(kvm_s390_deliver_interrupt, ), TP_printk("deliver interrupt (vcpu %d): type:%x (%s) " \ - "data:%08x %016llx", + "data:%08llx %016llx", __entry->id, __entry->inttype, __print_symbolic(__entry->inttype, kvm_s390_int_type), __entry->data0, __entry->data1) diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 54540bd..80bb3b8 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -397,6 +397,7 @@ struct kvm_s390_psw { #define KVM_S390_PROGRAM_INT 0xfffe0001u #define KVM_S390_SIGP_SET_PREFIX 0xfffe0002u #define KVM_S390_RESTART 0xfffe0003u +#define KVM_S390_MCHK 0xfffe1000u #define KVM_S390_INT_VIRTIO 0xffff2603u #define KVM_S390_INT_SERVICE 0xffff2401u #define KVM_S390_INT_EMERGENCY 0xffff1201u -- cgit v0.10.2 From f379aae558b8daff0f7a1c5fc225af5d35c741f7 Mon Sep 17 00:00:00 2001 From: Cornelia Huck Date: Thu, 20 Dec 2012 15:32:10 +0100 Subject: KVM: s390: In-kernel handling of I/O instructions. Explicitely catch all channel I/O related instructions intercepts in the kernel and set condition code 3 for them. This paves the way for properly handling these instructions later on. Note: This is not architecture compliant (the previous code wasn't either) since setting cc 3 is not the correct thing to do for some of these instructions. For Linux guests, however, it still has the intended effect of stopping css probing. Reviewed-by: Marcelo Tosatti Reviewed-by: Alexander Graf Signed-off-by: Cornelia Huck Signed-off-by: Marcelo Tosatti diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 950c13e..71af87d 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -30,8 +30,6 @@ static int handle_lctlg(struct kvm_vcpu *vcpu) int reg, rc; vcpu->stat.instruction_lctlg++; - if ((vcpu->arch.sie_block->ipb & 0xff) != 0x2f) - return -EOPNOTSUPP; useraddr = kvm_s390_get_base_disp_rsy(vcpu); @@ -95,6 +93,21 @@ static int handle_lctl(struct kvm_vcpu *vcpu) return 0; } +static const intercept_handler_t eb_handlers[256] = { + [0x2f] = handle_lctlg, + [0x8a] = kvm_s390_handle_priv_eb, +}; + +static int handle_eb(struct kvm_vcpu *vcpu) +{ + intercept_handler_t handler; + + handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff]; + if (handler) + return handler(vcpu); + return -EOPNOTSUPP; +} + static const intercept_handler_t instruction_handlers[256] = { [0x01] = kvm_s390_handle_01, [0x82] = kvm_s390_handle_lpsw, @@ -104,7 +117,7 @@ static const intercept_handler_t instruction_handlers[256] = { [0xb7] = handle_lctl, [0xb9] = kvm_s390_handle_b9, [0xe5] = kvm_s390_handle_e5, - [0xeb] = handle_lctlg, + [0xeb] = handle_eb, }; static int handle_noop(struct kvm_vcpu *vcpu) diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 1f7cc6c..211b340 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -120,6 +120,7 @@ int kvm_s390_handle_e5(struct kvm_vcpu *vcpu); int kvm_s390_handle_01(struct kvm_vcpu *vcpu); int kvm_s390_handle_b9(struct kvm_vcpu *vcpu); int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu); +int kvm_s390_handle_priv_eb(struct kvm_vcpu *vcpu); /* implemented in sigp.c */ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu); diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index d3cbcd3..8ad776f 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -127,20 +127,9 @@ static int handle_skey(struct kvm_vcpu *vcpu) return 0; } -static int handle_stsch(struct kvm_vcpu *vcpu) +static int handle_io_inst(struct kvm_vcpu *vcpu) { - vcpu->stat.instruction_stsch++; - VCPU_EVENT(vcpu, 4, "%s", "store subchannel - CC3"); - /* condition code 3 */ - vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); - vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44; - return 0; -} - -static int handle_chsc(struct kvm_vcpu *vcpu) -{ - vcpu->stat.instruction_chsc++; - VCPU_EVENT(vcpu, 4, "%s", "channel subsystem call - CC3"); + VCPU_EVENT(vcpu, 4, "%s", "I/O instruction"); /* condition code 3 */ vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44; @@ -375,7 +364,7 @@ out_fail: return 0; } -static const intercept_handler_t priv_handlers[256] = { +static const intercept_handler_t b2_handlers[256] = { [0x02] = handle_stidp, [0x10] = handle_set_prefix, [0x11] = handle_store_prefix, @@ -383,8 +372,22 @@ static const intercept_handler_t priv_handlers[256] = { [0x29] = handle_skey, [0x2a] = handle_skey, [0x2b] = handle_skey, - [0x34] = handle_stsch, - [0x5f] = handle_chsc, + [0x30] = handle_io_inst, + [0x31] = handle_io_inst, + [0x32] = handle_io_inst, + [0x33] = handle_io_inst, + [0x34] = handle_io_inst, + [0x35] = handle_io_inst, + [0x36] = handle_io_inst, + [0x37] = handle_io_inst, + [0x38] = handle_io_inst, + [0x39] = handle_io_inst, + [0x3a] = handle_io_inst, + [0x3b] = handle_io_inst, + [0x3c] = handle_io_inst, + [0x5f] = handle_io_inst, + [0x74] = handle_io_inst, + [0x76] = handle_io_inst, [0x7d] = handle_stsi, [0xb1] = handle_stfl, [0xb2] = handle_lpswe, @@ -401,7 +404,7 @@ int kvm_s390_handle_b2(struct kvm_vcpu *vcpu) * state bit and (a) handle the instruction or (b) send a code 2 * program check. * Anything else goes to userspace.*/ - handler = priv_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; + handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; if (handler) { if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, @@ -432,6 +435,7 @@ static int handle_epsw(struct kvm_vcpu *vcpu) static const intercept_handler_t b9_handlers[256] = { [0x8d] = handle_epsw, + [0x9c] = handle_io_inst, }; int kvm_s390_handle_b9(struct kvm_vcpu *vcpu) @@ -451,6 +455,24 @@ int kvm_s390_handle_b9(struct kvm_vcpu *vcpu) return -EOPNOTSUPP; } +static const intercept_handler_t eb_handlers[256] = { + [0x8a] = handle_io_inst, +}; + +int kvm_s390_handle_priv_eb(struct kvm_vcpu *vcpu) +{ + intercept_handler_t handler; + + /* All eb instructions that end up here are privileged. */ + if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) + return kvm_s390_inject_program_int(vcpu, + PGM_PRIVILEGED_OPERATION); + handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff]; + if (handler) + return handler(vcpu); + return -EOPNOTSUPP; +} + static int handle_tprot(struct kvm_vcpu *vcpu) { u64 address1, address2; -- cgit v0.10.2 From d6712df95bcfea597fc3ea2405ec13e8b69a7b8c Mon Sep 17 00:00:00 2001 From: Cornelia Huck Date: Thu, 20 Dec 2012 15:32:11 +0100 Subject: KVM: s390: Base infrastructure for enabling capabilities. Make s390 support KVM_ENABLE_CAP. Reviewed-by: Marcelo Tosatti Acked-by: Alexander Graf Signed-off-by: Cornelia Huck Signed-off-by: Marcelo Tosatti diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 8a0de30..73bd159 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -913,7 +913,7 @@ documentation when it pops into existence). 4.37 KVM_ENABLE_CAP Capability: KVM_CAP_ENABLE_CAP -Architectures: ppc +Architectures: ppc, s390 Type: vcpu ioctl Parameters: struct kvm_enable_cap (in) Returns: 0 on success; -1 on error diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index f718bc6..5ff2603 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -140,6 +140,7 @@ int kvm_dev_ioctl_check_extension(long ext) #endif case KVM_CAP_SYNC_REGS: case KVM_CAP_ONE_REG: + case KVM_CAP_ENABLE_CAP: r = 1; break; case KVM_CAP_NR_VCPUS: @@ -808,6 +809,22 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) return 0; } +static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, + struct kvm_enable_cap *cap) +{ + int r; + + if (cap->flags) + return -EINVAL; + + switch (cap->cap) { + default: + r = -EINVAL; + break; + } + return r; +} + long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { @@ -894,6 +911,15 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = 0; break; } + case KVM_ENABLE_CAP: + { + struct kvm_enable_cap cap; + r = -EFAULT; + if (copy_from_user(&cap, argp, sizeof(cap))) + break; + r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); + break; + } default: r = -ENOTTY; } -- cgit v0.10.2 From fa6b7fe9928d50444c29b29c8563746c6b0c6299 Mon Sep 17 00:00:00 2001 From: Cornelia Huck Date: Thu, 20 Dec 2012 15:32:12 +0100 Subject: KVM: s390: Add support for channel I/O instructions. Add a new capability, KVM_CAP_S390_CSS_SUPPORT, which will pass intercepts for channel I/O instructions to userspace. Only I/O instructions interacting with I/O interrupts need to be handled in-kernel: - TEST PENDING INTERRUPTION (tpi) dequeues and stores pending interrupts entirely in-kernel. - TEST SUBCHANNEL (tsch) dequeues pending interrupts in-kernel and exits via KVM_EXIT_S390_TSCH to userspace for subchannel- related processing. Reviewed-by: Marcelo Tosatti Reviewed-by: Alexander Graf Signed-off-by: Cornelia Huck Signed-off-by: Marcelo Tosatti diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 73bd159..f2d6391 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -2350,6 +2350,22 @@ The possible hypercalls are defined in the Power Architecture Platform Requirements (PAPR) document available from www.power.org (free developer registration required to access it). + /* KVM_EXIT_S390_TSCH */ + struct { + __u16 subchannel_id; + __u16 subchannel_nr; + __u32 io_int_parm; + __u32 io_int_word; + __u32 ipb; + __u8 dequeued; + } s390_tsch; + +s390 specific. This exit occurs when KVM_CAP_S390_CSS_SUPPORT has been enabled +and TEST SUBCHANNEL was intercepted. If dequeued is set, a pending I/O +interrupt for the target subchannel has been dequeued and subchannel_id, +subchannel_nr, io_int_parm and io_int_word contain the parameters for that +interrupt. ipb is needed for instruction parameter decoding. + /* Fix the size of the union. */ char padding[256]; }; @@ -2471,3 +2487,17 @@ For mmu types KVM_MMU_FSL_BOOKE_NOHV and KVM_MMU_FSL_BOOKE_HV: where "num_sets" is the tlb_sizes[] value divided by the tlb_ways[] value. - The tsize field of mas1 shall be set to 4K on TLB0, even though the hardware ignores this value for TLB0. + +6.4 KVM_CAP_S390_CSS_SUPPORT + +Architectures: s390 +Parameters: none +Returns: 0 on success; -1 on error + +This capability enables support for handling of channel I/O instructions. + +TEST PENDING INTERRUPTION and the interrupt portion of TEST SUBCHANNEL are +handled in-kernel, while the other I/O instructions are passed to userspace. + +When this capability is enabled, KVM_EXIT_S390_TSCH will occur on TEST +SUBCHANNEL intercepts. diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 29363d1..16bd5d1 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -262,6 +262,7 @@ struct kvm_arch{ debug_info_t *dbf; struct kvm_s390_float_interrupt float_int; struct gmap *gmap; + int css_support; }; extern int sie64a(struct kvm_s390_sie_block *, u64 *); diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 71af87d..f26ff1e 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -264,6 +264,7 @@ static const intercept_handler_t intercept_funcs[] = { [0x0C >> 2] = handle_instruction_and_prog, [0x10 >> 2] = handle_noop, [0x14 >> 2] = handle_noop, + [0x18 >> 2] = handle_noop, [0x1C >> 2] = kvm_s390_handle_wait, [0x20 >> 2] = handle_validity, [0x28 >> 2] = handle_stop, diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index b3b4748..9a12835 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -709,6 +709,43 @@ int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) return 0; } +struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, + u64 cr6, u64 schid) +{ + struct kvm_s390_float_interrupt *fi; + struct kvm_s390_interrupt_info *inti, *iter; + + if ((!schid && !cr6) || (schid && cr6)) + return NULL; + mutex_lock(&kvm->lock); + fi = &kvm->arch.float_int; + spin_lock(&fi->lock); + inti = NULL; + list_for_each_entry(iter, &fi->list, list) { + if (!is_ioint(iter->type)) + continue; + if (cr6 && ((cr6 & iter->io.io_int_word) == 0)) + continue; + if (schid) { + if (((schid & 0x00000000ffff0000) >> 16) != + iter->io.subchannel_id) + continue; + if ((schid & 0x000000000000ffff) != + iter->io.subchannel_nr) + continue; + } + inti = iter; + break; + } + if (inti) + list_del_init(&inti->list); + if (list_empty(&fi->list)) + atomic_set(&fi->active, 0); + spin_unlock(&fi->lock); + mutex_unlock(&kvm->lock); + return inti; +} + int kvm_s390_inject_vm(struct kvm *kvm, struct kvm_s390_interrupt *s390int) { diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 5ff2603..5b01f09 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -141,6 +141,7 @@ int kvm_dev_ioctl_check_extension(long ext) case KVM_CAP_SYNC_REGS: case KVM_CAP_ONE_REG: case KVM_CAP_ENABLE_CAP: + case KVM_CAP_S390_CSS_SUPPORT: r = 1; break; case KVM_CAP_NR_VCPUS: @@ -235,6 +236,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) if (!kvm->arch.gmap) goto out_nogmap; } + + kvm->arch.css_support = 0; + return 0; out_nogmap: debug_unregister(kvm->arch.dbf); @@ -658,6 +662,7 @@ rerun_vcpu: case KVM_EXIT_INTR: case KVM_EXIT_S390_RESET: case KVM_EXIT_S390_UCONTROL: + case KVM_EXIT_S390_TSCH: break; default: BUG(); @@ -818,6 +823,13 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, return -EINVAL; switch (cap->cap) { + case KVM_CAP_S390_CSS_SUPPORT: + if (!vcpu->kvm->arch.css_support) { + vcpu->kvm->arch.css_support = 1; + trace_kvm_s390_enable_css(vcpu->kvm); + } + r = 0; + break; default: r = -EINVAL; break; diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 211b340..3e05def 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -113,6 +113,8 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_interrupt *s390int); int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action); +struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, + u64 cr6, u64 schid); /* implemented in priv.c */ int kvm_s390_handle_b2(struct kvm_vcpu *vcpu); diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 8ad776f..0ef9894 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -127,15 +127,98 @@ static int handle_skey(struct kvm_vcpu *vcpu) return 0; } -static int handle_io_inst(struct kvm_vcpu *vcpu) +static int handle_tpi(struct kvm_vcpu *vcpu) { - VCPU_EVENT(vcpu, 4, "%s", "I/O instruction"); - /* condition code 3 */ + u64 addr; + struct kvm_s390_interrupt_info *inti; + int cc; + + addr = kvm_s390_get_base_disp_s(vcpu); + + inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->run->s.regs.crs[6], 0); + if (inti) { + if (addr) { + /* + * Store the two-word I/O interruption code into the + * provided area. + */ + put_guest_u16(vcpu, addr, inti->io.subchannel_id); + put_guest_u16(vcpu, addr + 2, inti->io.subchannel_nr); + put_guest_u32(vcpu, addr + 4, inti->io.io_int_parm); + } else { + /* + * Store the three-word I/O interruption code into + * the appropriate lowcore area. + */ + put_guest_u16(vcpu, 184, inti->io.subchannel_id); + put_guest_u16(vcpu, 186, inti->io.subchannel_nr); + put_guest_u32(vcpu, 188, inti->io.io_int_parm); + put_guest_u32(vcpu, 192, inti->io.io_int_word); + } + cc = 1; + } else + cc = 0; + kfree(inti); + /* Set condition code and we're done. */ vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); - vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44; + vcpu->arch.sie_block->gpsw.mask |= (cc & 3ul) << 44; return 0; } +static int handle_tsch(struct kvm_vcpu *vcpu) +{ + struct kvm_s390_interrupt_info *inti; + + inti = kvm_s390_get_io_int(vcpu->kvm, 0, + vcpu->run->s.regs.gprs[1]); + + /* + * Prepare exit to userspace. + * We indicate whether we dequeued a pending I/O interrupt + * so that userspace can re-inject it if the instruction gets + * a program check. While this may re-order the pending I/O + * interrupts, this is no problem since the priority is kept + * intact. + */ + vcpu->run->exit_reason = KVM_EXIT_S390_TSCH; + vcpu->run->s390_tsch.dequeued = !!inti; + if (inti) { + vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id; + vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr; + vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm; + vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word; + } + vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb; + kfree(inti); + return -EREMOTE; +} + +static int handle_io_inst(struct kvm_vcpu *vcpu) +{ + VCPU_EVENT(vcpu, 4, "%s", "I/O instruction"); + + if (vcpu->kvm->arch.css_support) { + /* + * Most I/O instructions will be handled by userspace. + * Exceptions are tpi and the interrupt portion of tsch. + */ + if (vcpu->arch.sie_block->ipa == 0xb236) + return handle_tpi(vcpu); + if (vcpu->arch.sie_block->ipa == 0xb235) + return handle_tsch(vcpu); + /* Handle in userspace. */ + return -EOPNOTSUPP; + } else { + /* + * Set condition code 3 to stop the guest from issueing channel + * I/O instructions. + */ + vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); + vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44; + return 0; + } +} + static int handle_stfl(struct kvm_vcpu *vcpu) { unsigned int facility_list; diff --git a/arch/s390/kvm/trace-s390.h b/arch/s390/kvm/trace-s390.h index 95fbc1ab..13f30f5 100644 --- a/arch/s390/kvm/trace-s390.h +++ b/arch/s390/kvm/trace-s390.h @@ -204,6 +204,26 @@ TRACE_EVENT(kvm_s390_stop_request, ); +/* + * Trace point for enabling channel I/O instruction support. + */ +TRACE_EVENT(kvm_s390_enable_css, + TP_PROTO(void *kvm), + TP_ARGS(kvm), + + TP_STRUCT__entry( + __field(void *, kvm) + ), + + TP_fast_assign( + __entry->kvm = kvm; + ), + + TP_printk("enabling channel I/O support (kvm @ %p)\n", + __entry->kvm) + ); + + #endif /* _TRACE_KVMS390_H */ /* This part must be outside protection */ diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h index 7ef9e75..a23f47c 100644 --- a/include/trace/events/kvm.h +++ b/include/trace/events/kvm.h @@ -14,7 +14,7 @@ ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR), \ ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\ ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI), ERSN(PAPR_HCALL), \ - ERSN(S390_UCONTROL) + ERSN(S390_UCONTROL), ERSN(S390_TSCH) TRACE_EVENT(kvm_userspace_exit, TP_PROTO(__u32 reason, int errno), diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 80bb3b8..8bb0bf8 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -168,6 +168,7 @@ struct kvm_pit_config { #define KVM_EXIT_PAPR_HCALL 19 #define KVM_EXIT_S390_UCONTROL 20 #define KVM_EXIT_WATCHDOG 21 +#define KVM_EXIT_S390_TSCH 22 /* For KVM_EXIT_INTERNAL_ERROR */ /* Emulate instruction failed. */ @@ -285,6 +286,15 @@ struct kvm_run { __u64 ret; __u64 args[9]; } papr_hcall; + /* KVM_EXIT_S390_TSCH */ + struct { + __u16 subchannel_id; + __u16 subchannel_nr; + __u32 io_int_parm; + __u32 io_int_word; + __u32 ipb; + __u8 dequeued; + } s390_tsch; /* Fix the size of the union. */ char padding[256]; }; @@ -645,6 +655,7 @@ struct kvm_ppc_smmu_info { #define KVM_CAP_IRQFD_RESAMPLE 82 #define KVM_CAP_PPC_BOOKE_WATCHDOG 83 #define KVM_CAP_PPC_HTAB_FD 84 +#define KVM_CAP_S390_CSS_SUPPORT 85 #ifdef KVM_CAP_IRQ_ROUTING -- cgit v0.10.2 From ee04e0cea8b170ae9c8542162091c716de36666b Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Tue, 25 Dec 2012 14:34:06 +0200 Subject: KVM: mmu: remove unused trace event trace_kvm_mmu_delay_free_pages() is no longer used. Signed-off-by: Gleb Natapov Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h index cd6e983..b8f6172 100644 --- a/arch/x86/kvm/mmutrace.h +++ b/arch/x86/kvm/mmutrace.h @@ -195,12 +195,6 @@ DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_prepare_zap_page, TP_ARGS(sp) ); -DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_delay_free_pages, - TP_PROTO(struct kvm_mmu_page *sp), - - TP_ARGS(sp) -); - TRACE_EVENT( mark_mmio_spte, TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access), -- cgit v0.10.2 From 908e7d7999bcce70ac52e7f390a8f5cbc55948de Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Thu, 27 Dec 2012 14:44:58 +0200 Subject: KVM: MMU: simplify folding of dirty bit into accessed_dirty MMU code tries to avoid if()s HW is not able to predict reliably by using bitwise operation to streamline code execution, but in case of a dirty bit folding this gives us nothing since write_fault is checked right before the folding code. Lets just piggyback onto the if() to make code more clear. Signed-off-by: Gleb Natapov Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 891eb6d..a7b24cf 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -249,16 +249,12 @@ retry_walk: if (!write_fault) protect_clean_gpte(&pte_access, pte); - - /* - * On a write fault, fold the dirty bit into accessed_dirty by shifting it one - * place right. - * - * On a read fault, do nothing. - */ - shift = write_fault >> ilog2(PFERR_WRITE_MASK); - shift *= PT_DIRTY_SHIFT - PT_ACCESSED_SHIFT; - accessed_dirty &= pte >> shift; + else + /* + * On a write fault, fold the dirty bit into accessed_dirty by + * shifting it one place right. + */ + accessed_dirty &= pte >> (PT_DIRTY_SHIFT - PT_ACCESSED_SHIFT); if (unlikely(!accessed_dirty)) { ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault); -- cgit v0.10.2 From b0cfeb5dedf9109b8a8bd594abe60791b135a643 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Tue, 8 Jan 2013 10:49:00 +0200 Subject: KVM: x86: remove unused variable from walk_addr_generic() Fix compilation warning. Signed-off-by: Gleb Natapov Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index a7b24cf..2ad76b9 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -151,7 +151,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, pt_element_t pte; pt_element_t __user *uninitialized_var(ptep_user); gfn_t table_gfn; - unsigned index, pt_access, pte_access, accessed_dirty, shift; + unsigned index, pt_access, pte_access, accessed_dirty; gpa_t pte_gpa; int offset; const int write_fault = access & PFERR_WRITE_MASK; -- cgit v0.10.2 From b09408d00fd82be80289a329dd94d1a0d6b77dc2 Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Mon, 7 Jan 2013 19:27:06 -0200 Subject: KVM: VMX: fix incorrect cached cpl value with real/v8086 modes CPL is always 0 when in real mode, and always 3 when virtual 8086 mode. Using values other than those can cause failures on operations that check CPL. Reviewed-by: Gleb Natapov Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 55dfc37..dd2a85c 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1696,7 +1696,6 @@ static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) { __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail); - __clear_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail); to_vmx(vcpu)->rflags = rflags; if (to_vmx(vcpu)->rmode.vm86_active) { to_vmx(vcpu)->rmode.save_rflags = rflags; @@ -3110,7 +3109,6 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) vmcs_writel(CR0_READ_SHADOW, cr0); vmcs_writel(GUEST_CR0, hw_cr0); vcpu->arch.cr0 = cr0; - __clear_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail); } static u64 construct_eptp(unsigned long root_hpa) @@ -3220,8 +3218,10 @@ static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg) return vmx_read_guest_seg_base(to_vmx(vcpu), seg); } -static int __vmx_get_cpl(struct kvm_vcpu *vcpu) +static int vmx_get_cpl(struct kvm_vcpu *vcpu) { + struct vcpu_vmx *vmx = to_vmx(vcpu); + if (!is_protmode(vcpu)) return 0; @@ -3229,13 +3229,6 @@ static int __vmx_get_cpl(struct kvm_vcpu *vcpu) && (kvm_get_rflags(vcpu) & X86_EFLAGS_VM)) /* if virtual 8086 */ return 3; - return vmx_read_guest_seg_selector(to_vmx(vcpu), VCPU_SREG_CS) & 3; -} - -static int vmx_get_cpl(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - /* * If we enter real mode with cs.sel & 3 != 0, the normal CPL calculations * fail; use the cache instead. @@ -3246,7 +3239,7 @@ static int vmx_get_cpl(struct kvm_vcpu *vcpu) if (!test_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail)) { __set_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail); - vmx->cpl = __vmx_get_cpl(vcpu); + vmx->cpl = vmx_read_guest_seg_selector(vmx, VCPU_SREG_CS) & 3; } return vmx->cpl; -- cgit v0.10.2 From e28bbd44dad134046ef9463cbb8c1cf81f53de5e Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Fri, 4 Jan 2013 16:18:48 +0200 Subject: KVM: x86 emulator: framework for streamlining arithmetic opcodes We emulate arithmetic opcodes by executing a "similar" (same operation, different operands) on the cpu. This ensures accurate emulation, esp. wrt. eflags. However, the prologue and epilogue around the opcode is fairly long, consisting of a switch (for the operand size) and code to load and save the operands. This is repeated for every opcode. This patch introduces an alternative way to emulate arithmetic opcodes. Instead of the above, we have four (three on i386) functions consisting of just the opcode and a ret; one for each operand size. For example: .align 8 em_notb: not %al ret .align 8 em_notw: not %ax ret .align 8 em_notl: not %eax ret .align 8 em_notq: not %rax ret The prologue and epilogue are shared across all opcodes. Note the functions use a special calling convention; notably eflags is an input/output parameter and is not clobbered. Rather than dispatching the four functions through a jump table, the functions are declared as a constant size (8) so their address can be calculated. Acked-by: Gleb Natapov Signed-off-by: Avi Kivity Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 53c5ad6..dd71567 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -149,6 +149,7 @@ #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */ #define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */ #define Avx ((u64)1 << 43) /* Advanced Vector Extensions */ +#define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */ #define X2(x...) x, x #define X3(x...) X2(x), x @@ -159,6 +160,27 @@ #define X8(x...) X4(x), X4(x) #define X16(x...) X8(x), X8(x) +#define NR_FASTOP (ilog2(sizeof(ulong)) + 1) +#define FASTOP_SIZE 8 + +/* + * fastop functions have a special calling convention: + * + * dst: [rdx]:rax (in/out) + * src: rbx (in/out) + * src2: rcx (in) + * flags: rflags (in/out) + * + * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for + * different operand sizes can be reached by calculation, rather than a jump + * table (which would be bigger than the code). + * + * fastop functions are declared as taking a never-defined fastop parameter, + * so they can't be called from C directly. + */ + +struct fastop; + struct opcode { u64 flags : 56; u64 intercept : 8; @@ -168,6 +190,7 @@ struct opcode { const struct group_dual *gdual; const struct gprefix *gprefix; const struct escape *esc; + void (*fastop)(struct fastop *fake); } u; int (*check_perm)(struct x86_emulate_ctxt *ctxt); }; @@ -3646,6 +3669,7 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt) #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) } #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) } #define I(_f, _e) { .flags = (_f), .u.execute = (_e) } +#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) } #define II(_f, _e, _i) \ { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i } #define IIP(_f, _e, _i, _p) \ @@ -4502,6 +4526,16 @@ static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt, read_mmx_reg(ctxt, &op->mm_val, op->addr.mm); } +static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)) +{ + ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF; + fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE; + asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n" + : "+a"(ctxt->dst.val), "+b"(ctxt->src.val), [flags]"+D"(flags) + : "c"(ctxt->src2.val), [fastop]"S"(fop)); + ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK); + return X86EMUL_CONTINUE; +} int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) { @@ -4631,6 +4665,13 @@ special_insn: } if (ctxt->execute) { + if (ctxt->d & Fastop) { + void (*fop)(struct fastop *) = (void *)ctxt->execute; + rc = fastop(ctxt, fop); + if (rc != X86EMUL_CONTINUE) + goto done; + goto writeback; + } rc = ctxt->execute(ctxt); if (rc != X86EMUL_CONTINUE) goto done; -- cgit v0.10.2 From b7d491e7f065b5b957a27a65c9e7cd3ef96b2736 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Fri, 4 Jan 2013 16:18:49 +0200 Subject: KVM: x86 emulator: Support for declaring single operand fastops Acked-by: Gleb Natapov Signed-off-by: Avi Kivity Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index dd71567..42c53c80 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -24,6 +24,7 @@ #include "kvm_cache_regs.h" #include #include +#include #include "x86.h" #include "tss.h" @@ -439,6 +440,30 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt) } \ } while (0) +#define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t" +#define FOP_RET "ret \n\t" + +#define FOP_START(op) \ + extern void em_##op(struct fastop *fake); \ + asm(".pushsection .text, \"ax\" \n\t" \ + ".global em_" #op " \n\t" \ + FOP_ALIGN \ + "em_" #op ": \n\t" + +#define FOP_END \ + ".popsection") + +#define FOP1E(op, dst) \ + FOP_ALIGN #op " %" #dst " \n\t" FOP_RET + +#define FASTOP1(op) \ + FOP_START(op) \ + FOP1E(op##b, al) \ + FOP1E(op##w, ax) \ + FOP1E(op##l, eax) \ + ON64(FOP1E(op##q, rax)) \ + FOP_END + #define __emulate_1op_rax_rdx(ctxt, _op, _suffix, _ex) \ do { \ unsigned long _tmp; \ -- cgit v0.10.2 From b6744dc3fb55fda7cfcb53beecfa056f02415e6d Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Fri, 4 Jan 2013 16:18:50 +0200 Subject: KVM: x86 emulator: introduce NoWrite flag Instead of disabling writeback via OP_NONE, just specify NoWrite. Acked-by: Gleb Natapov Signed-off-by: Avi Kivity Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 42c53c80..fe113fb 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -151,6 +151,7 @@ #define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */ #define Avx ((u64)1 << 43) /* Advanced Vector Extensions */ #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */ +#define NoWrite ((u64)1 << 45) /* No writeback */ #define X2(x...) x, x #define X3(x...) X2(x), x @@ -1633,6 +1634,9 @@ static int writeback(struct x86_emulate_ctxt *ctxt) { int rc; + if (ctxt->d & NoWrite) + return X86EMUL_CONTINUE; + switch (ctxt->dst.type) { case OP_REG: write_register_operand(&ctxt->dst); -- cgit v0.10.2 From 75f728456f368140e6b34b6a79f3408774076325 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Fri, 4 Jan 2013 16:18:51 +0200 Subject: KVM: x86 emulator: mark CMP, CMPS, SCAS, TEST as NoWrite Acked-by: Gleb Natapov Signed-off-by: Avi Kivity Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index fe113fb..2af0c44 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -3069,16 +3069,12 @@ static int em_xor(struct x86_emulate_ctxt *ctxt) static int em_cmp(struct x86_emulate_ctxt *ctxt) { emulate_2op_SrcV(ctxt, "cmp"); - /* Disable writeback. */ - ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int em_test(struct x86_emulate_ctxt *ctxt) { emulate_2op_SrcV(ctxt, "test"); - /* Disable writeback. */ - ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } @@ -3747,7 +3743,7 @@ static const struct opcode group1[] = { I(Lock | PageTable, em_and), I(Lock, em_sub), I(Lock, em_xor), - I(0, em_cmp), + I(NoWrite, em_cmp), }; static const struct opcode group1A[] = { @@ -3755,8 +3751,8 @@ static const struct opcode group1A[] = { }; static const struct opcode group3[] = { - I(DstMem | SrcImm, em_test), - I(DstMem | SrcImm, em_test), + I(DstMem | SrcImm | NoWrite, em_test), + I(DstMem | SrcImm | NoWrite, em_test), I(DstMem | SrcNone | Lock, em_not), I(DstMem | SrcNone | Lock, em_neg), I(SrcMem, em_mul_ex), @@ -3920,7 +3916,7 @@ static const struct opcode opcode_table[256] = { /* 0x30 - 0x37 */ I6ALU(Lock, em_xor), N, N, /* 0x38 - 0x3F */ - I6ALU(0, em_cmp), N, N, + I6ALU(NoWrite, em_cmp), N, N, /* 0x40 - 0x4F */ X16(D(DstReg)), /* 0x50 - 0x57 */ @@ -3946,7 +3942,7 @@ static const struct opcode opcode_table[256] = { G(DstMem | SrcImm, group1), G(ByteOp | DstMem | SrcImm | No64, group1), G(DstMem | SrcImmByte, group1), - I2bv(DstMem | SrcReg | ModRM, em_test), + I2bv(DstMem | SrcReg | ModRM | NoWrite, em_test), I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg), /* 0x88 - 0x8F */ I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov), @@ -3966,12 +3962,12 @@ static const struct opcode opcode_table[256] = { I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov), I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov), I2bv(SrcSI | DstDI | Mov | String, em_mov), - I2bv(SrcSI | DstDI | String, em_cmp), + I2bv(SrcSI | DstDI | String | NoWrite, em_cmp), /* 0xA8 - 0xAF */ - I2bv(DstAcc | SrcImm, em_test), + I2bv(DstAcc | SrcImm | NoWrite, em_test), I2bv(SrcAcc | DstDI | Mov | String, em_mov), I2bv(SrcSI | DstAcc | Mov | String, em_mov), - I2bv(SrcAcc | DstDI | String, em_cmp), + I2bv(SrcAcc | DstDI | String | NoWrite, em_cmp), /* 0xB0 - 0xB7 */ X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)), /* 0xB8 - 0xBF */ -- cgit v0.10.2 From 45a1467d7edff741d97a8be28342440ee65aa03c Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Fri, 4 Jan 2013 16:18:52 +0200 Subject: KVM: x86 emulator: convert NOT, NEG to fastop Acked-by: Gleb Natapov Signed-off-by: Avi Kivity Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 2af0c44..09dbdc5 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -2050,17 +2050,8 @@ static int em_grp2(struct x86_emulate_ctxt *ctxt) return X86EMUL_CONTINUE; } -static int em_not(struct x86_emulate_ctxt *ctxt) -{ - ctxt->dst.val = ~ctxt->dst.val; - return X86EMUL_CONTINUE; -} - -static int em_neg(struct x86_emulate_ctxt *ctxt) -{ - emulate_1op(ctxt, "neg"); - return X86EMUL_CONTINUE; -} +FASTOP1(not); +FASTOP1(neg); static int em_mul_ex(struct x86_emulate_ctxt *ctxt) { @@ -3753,8 +3744,8 @@ static const struct opcode group1A[] = { static const struct opcode group3[] = { I(DstMem | SrcImm | NoWrite, em_test), I(DstMem | SrcImm | NoWrite, em_test), - I(DstMem | SrcNone | Lock, em_not), - I(DstMem | SrcNone | Lock, em_neg), + F(DstMem | SrcNone | Lock, em_not), + F(DstMem | SrcNone | Lock, em_neg), I(SrcMem, em_mul_ex), I(SrcMem, em_imul_ex), I(SrcMem, em_div_ex), -- cgit v0.10.2 From f7857f35dbf8e7ca36ebff3f43888fd3fb0f0e70 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Fri, 4 Jan 2013 16:18:53 +0200 Subject: KVM: x86 emulator: add macros for defining 2-operand fastop emulation Acked-by: Gleb Natapov Signed-off-by: Avi Kivity Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 09dbdc5..3b5d4dd 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -465,6 +465,17 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt) ON64(FOP1E(op##q, rax)) \ FOP_END +#define FOP2E(op, dst, src) \ + FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET + +#define FASTOP2(op) \ + FOP_START(op) \ + FOP2E(op##b, al, bl) \ + FOP2E(op##w, ax, bx) \ + FOP2E(op##l, eax, ebx) \ + ON64(FOP2E(op##q, rax, rbx)) \ + FOP_END + #define __emulate_1op_rax_rdx(ctxt, _op, _suffix, _ex) \ do { \ unsigned long _tmp; \ @@ -3696,6 +3707,7 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt) #define D2bv(_f) D((_f) | ByteOp), D(_f) #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p) #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e) +#define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e) #define I2bvIP(_f, _e, _i, _p) \ IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p) -- cgit v0.10.2 From fb864fbc72fd4e2175fb64072fe9134d3a3ab89a Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Fri, 4 Jan 2013 16:18:54 +0200 Subject: KVM: x86 emulator: convert basic ALU ops to fastop Opcodes: TEST CMP ADD ADC SUB SBB XOR OR AND Acked-by: Gleb Natapov Signed-off-by: Avi Kivity Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 3b5d4dd..619a33d 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -3026,59 +3026,15 @@ static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt) return X86EMUL_CONTINUE; } -static int em_add(struct x86_emulate_ctxt *ctxt) -{ - emulate_2op_SrcV(ctxt, "add"); - return X86EMUL_CONTINUE; -} - -static int em_or(struct x86_emulate_ctxt *ctxt) -{ - emulate_2op_SrcV(ctxt, "or"); - return X86EMUL_CONTINUE; -} - -static int em_adc(struct x86_emulate_ctxt *ctxt) -{ - emulate_2op_SrcV(ctxt, "adc"); - return X86EMUL_CONTINUE; -} - -static int em_sbb(struct x86_emulate_ctxt *ctxt) -{ - emulate_2op_SrcV(ctxt, "sbb"); - return X86EMUL_CONTINUE; -} - -static int em_and(struct x86_emulate_ctxt *ctxt) -{ - emulate_2op_SrcV(ctxt, "and"); - return X86EMUL_CONTINUE; -} - -static int em_sub(struct x86_emulate_ctxt *ctxt) -{ - emulate_2op_SrcV(ctxt, "sub"); - return X86EMUL_CONTINUE; -} - -static int em_xor(struct x86_emulate_ctxt *ctxt) -{ - emulate_2op_SrcV(ctxt, "xor"); - return X86EMUL_CONTINUE; -} - -static int em_cmp(struct x86_emulate_ctxt *ctxt) -{ - emulate_2op_SrcV(ctxt, "cmp"); - return X86EMUL_CONTINUE; -} - -static int em_test(struct x86_emulate_ctxt *ctxt) -{ - emulate_2op_SrcV(ctxt, "test"); - return X86EMUL_CONTINUE; -} +FASTOP2(add); +FASTOP2(or); +FASTOP2(adc); +FASTOP2(sbb); +FASTOP2(and); +FASTOP2(sub); +FASTOP2(xor); +FASTOP2(cmp); +FASTOP2(test); static int em_xchg(struct x86_emulate_ctxt *ctxt) { @@ -3711,9 +3667,9 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt) #define I2bvIP(_f, _e, _i, _p) \ IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p) -#define I6ALU(_f, _e) I2bv((_f) | DstMem | SrcReg | ModRM, _e), \ - I2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \ - I2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e) +#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \ + F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \ + F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e) static const struct opcode group7_rm1[] = { DI(SrcNone | Priv, monitor), @@ -3739,14 +3695,14 @@ static const struct opcode group7_rm7[] = { }; static const struct opcode group1[] = { - I(Lock, em_add), - I(Lock | PageTable, em_or), - I(Lock, em_adc), - I(Lock, em_sbb), - I(Lock | PageTable, em_and), - I(Lock, em_sub), - I(Lock, em_xor), - I(NoWrite, em_cmp), + F(Lock, em_add), + F(Lock | PageTable, em_or), + F(Lock, em_adc), + F(Lock, em_sbb), + F(Lock | PageTable, em_and), + F(Lock, em_sub), + F(Lock, em_xor), + F(NoWrite, em_cmp), }; static const struct opcode group1A[] = { @@ -3754,8 +3710,8 @@ static const struct opcode group1A[] = { }; static const struct opcode group3[] = { - I(DstMem | SrcImm | NoWrite, em_test), - I(DstMem | SrcImm | NoWrite, em_test), + F(DstMem | SrcImm | NoWrite, em_test), + F(DstMem | SrcImm | NoWrite, em_test), F(DstMem | SrcNone | Lock, em_not), F(DstMem | SrcNone | Lock, em_neg), I(SrcMem, em_mul_ex), @@ -3897,29 +3853,29 @@ static const struct escape escape_dd = { { static const struct opcode opcode_table[256] = { /* 0x00 - 0x07 */ - I6ALU(Lock, em_add), + F6ALU(Lock, em_add), I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg), I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg), /* 0x08 - 0x0F */ - I6ALU(Lock | PageTable, em_or), + F6ALU(Lock | PageTable, em_or), I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg), N, /* 0x10 - 0x17 */ - I6ALU(Lock, em_adc), + F6ALU(Lock, em_adc), I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg), I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg), /* 0x18 - 0x1F */ - I6ALU(Lock, em_sbb), + F6ALU(Lock, em_sbb), I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg), I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg), /* 0x20 - 0x27 */ - I6ALU(Lock | PageTable, em_and), N, N, + F6ALU(Lock | PageTable, em_and), N, N, /* 0x28 - 0x2F */ - I6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das), + F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das), /* 0x30 - 0x37 */ - I6ALU(Lock, em_xor), N, N, + F6ALU(Lock, em_xor), N, N, /* 0x38 - 0x3F */ - I6ALU(NoWrite, em_cmp), N, N, + F6ALU(NoWrite, em_cmp), N, N, /* 0x40 - 0x4F */ X16(D(DstReg)), /* 0x50 - 0x57 */ @@ -3945,7 +3901,7 @@ static const struct opcode opcode_table[256] = { G(DstMem | SrcImm, group1), G(ByteOp | DstMem | SrcImm | No64, group1), G(DstMem | SrcImmByte, group1), - I2bv(DstMem | SrcReg | ModRM | NoWrite, em_test), + F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test), I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg), /* 0x88 - 0x8F */ I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov), @@ -3965,12 +3921,12 @@ static const struct opcode opcode_table[256] = { I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov), I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov), I2bv(SrcSI | DstDI | Mov | String, em_mov), - I2bv(SrcSI | DstDI | String | NoWrite, em_cmp), + F2bv(SrcSI | DstDI | String | NoWrite, em_cmp), /* 0xA8 - 0xAF */ - I2bv(DstAcc | SrcImm | NoWrite, em_test), + F2bv(DstAcc | SrcImm | NoWrite, em_test), I2bv(SrcAcc | DstDI | Mov | String, em_mov), I2bv(SrcSI | DstAcc | Mov | String, em_mov), - I2bv(SrcAcc | DstDI | String | NoWrite, em_cmp), + F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp), /* 0xB0 - 0xB7 */ X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)), /* 0xB8 - 0xBF */ -- cgit v0.10.2 From 73fa21ea4fc662a2e8e85f84c4ca3fcb55fa4da2 Mon Sep 17 00:00:00 2001 From: Cornelia Huck Date: Mon, 7 Jan 2013 15:51:51 +0100 Subject: KVM: s390: Dynamic allocation of virtio-ccw I/O data. Dynamically allocate any data structures like ccw used when doing channel I/O. Otherwise, we'd need to add extra serialization for the different callbacks using the same data structures. Reported-by: Christian Borntraeger Signed-off-by: Cornelia Huck Signed-off-by: Marcelo Tosatti diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c index 1a5aff3..70419a7 100644 --- a/drivers/s390/kvm/virtio_ccw.c +++ b/drivers/s390/kvm/virtio_ccw.c @@ -46,11 +46,9 @@ struct vq_config_block { struct virtio_ccw_device { struct virtio_device vdev; - __u8 status; + __u8 *status; __u8 config[VIRTIO_CCW_CONFIG_SIZE]; struct ccw_device *cdev; - struct ccw1 *ccw; - __u32 area; __u32 curr_io; int err; wait_queue_head_t wait_q; @@ -127,14 +125,15 @@ static int doing_io(struct virtio_ccw_device *vcdev, __u32 flag) return ret; } -static int ccw_io_helper(struct virtio_ccw_device *vcdev, __u32 intparm) +static int ccw_io_helper(struct virtio_ccw_device *vcdev, + struct ccw1 *ccw, __u32 intparm) { int ret; unsigned long flags; int flag = intparm & VIRTIO_CCW_INTPARM_MASK; spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags); - ret = ccw_device_start(vcdev->cdev, vcdev->ccw, intparm, 0, 0); + ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0); if (!ret) vcdev->curr_io |= flag; spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags); @@ -167,18 +166,19 @@ static void virtio_ccw_kvm_notify(struct virtqueue *vq) do_kvm_notify(schid, virtqueue_get_queue_index(vq)); } -static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev, int index) +static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev, + struct ccw1 *ccw, int index) { vcdev->config_block->index = index; - vcdev->ccw->cmd_code = CCW_CMD_READ_VQ_CONF; - vcdev->ccw->flags = 0; - vcdev->ccw->count = sizeof(struct vq_config_block); - vcdev->ccw->cda = (__u32)(unsigned long)(vcdev->config_block); - ccw_io_helper(vcdev, VIRTIO_CCW_DOING_READ_VQ_CONF); + ccw->cmd_code = CCW_CMD_READ_VQ_CONF; + ccw->flags = 0; + ccw->count = sizeof(struct vq_config_block); + ccw->cda = (__u32)(unsigned long)(vcdev->config_block); + ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF); return vcdev->config_block->num; } -static void virtio_ccw_del_vq(struct virtqueue *vq) +static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw) { struct virtio_ccw_device *vcdev = to_vc_device(vq->vdev); struct virtio_ccw_vq_info *info = vq->priv; @@ -197,11 +197,12 @@ static void virtio_ccw_del_vq(struct virtqueue *vq) info->info_block->align = 0; info->info_block->index = index; info->info_block->num = 0; - vcdev->ccw->cmd_code = CCW_CMD_SET_VQ; - vcdev->ccw->flags = 0; - vcdev->ccw->count = sizeof(*info->info_block); - vcdev->ccw->cda = (__u32)(unsigned long)(info->info_block); - ret = ccw_io_helper(vcdev, VIRTIO_CCW_DOING_SET_VQ | index); + ccw->cmd_code = CCW_CMD_SET_VQ; + ccw->flags = 0; + ccw->count = sizeof(*info->info_block); + ccw->cda = (__u32)(unsigned long)(info->info_block); + ret = ccw_io_helper(vcdev, ccw, + VIRTIO_CCW_DOING_SET_VQ | index); /* * -ENODEV isn't considered an error: The device is gone anyway. * This may happen on device detach. @@ -220,14 +221,23 @@ static void virtio_ccw_del_vq(struct virtqueue *vq) static void virtio_ccw_del_vqs(struct virtio_device *vdev) { struct virtqueue *vq, *n; + struct ccw1 *ccw; + + ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); + if (!ccw) + return; + list_for_each_entry_safe(vq, n, &vdev->vqs, list) - virtio_ccw_del_vq(vq); + virtio_ccw_del_vq(vq, ccw); + + kfree(ccw); } static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev, int i, vq_callback_t *callback, - const char *name) + const char *name, + struct ccw1 *ccw) { struct virtio_ccw_device *vcdev = to_vc_device(vdev); int err; @@ -250,7 +260,7 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev, err = -ENOMEM; goto out_err; } - info->num = virtio_ccw_read_vq_conf(vcdev, i); + info->num = virtio_ccw_read_vq_conf(vcdev, ccw, i); size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN)); info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); if (info->queue == NULL) { @@ -277,11 +287,11 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev, info->info_block->align = KVM_VIRTIO_CCW_RING_ALIGN; info->info_block->index = i; info->info_block->num = info->num; - vcdev->ccw->cmd_code = CCW_CMD_SET_VQ; - vcdev->ccw->flags = 0; - vcdev->ccw->count = sizeof(*info->info_block); - vcdev->ccw->cda = (__u32)(unsigned long)(info->info_block); - err = ccw_io_helper(vcdev, VIRTIO_CCW_DOING_SET_VQ | i); + ccw->cmd_code = CCW_CMD_SET_VQ; + ccw->flags = 0; + ccw->count = sizeof(*info->info_block); + ccw->cda = (__u32)(unsigned long)(info->info_block); + err = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_VQ | i); if (err) { dev_warn(&vcdev->cdev->dev, "SET_VQ failed\n"); free_pages_exact(info->queue, size); @@ -312,9 +322,15 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtio_ccw_device *vcdev = to_vc_device(vdev); unsigned long *indicatorp = NULL; int ret, i; + struct ccw1 *ccw; + + ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); + if (!ccw) + return -ENOMEM; for (i = 0; i < nvqs; ++i) { - vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i]); + vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i], + ccw); if (IS_ERR(vqs[i])) { ret = PTR_ERR(vqs[i]); vqs[i] = NULL; @@ -329,28 +345,30 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, *indicatorp = (unsigned long) &vcdev->indicators; /* Register queue indicators with host. */ vcdev->indicators = 0; - vcdev->ccw->cmd_code = CCW_CMD_SET_IND; - vcdev->ccw->flags = 0; - vcdev->ccw->count = sizeof(vcdev->indicators); - vcdev->ccw->cda = (__u32)(unsigned long) indicatorp; - ret = ccw_io_helper(vcdev, VIRTIO_CCW_DOING_SET_IND); + ccw->cmd_code = CCW_CMD_SET_IND; + ccw->flags = 0; + ccw->count = sizeof(vcdev->indicators); + ccw->cda = (__u32)(unsigned long) indicatorp; + ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND); if (ret) goto out; /* Register indicators2 with host for config changes */ *indicatorp = (unsigned long) &vcdev->indicators2; vcdev->indicators2 = 0; - vcdev->ccw->cmd_code = CCW_CMD_SET_CONF_IND; - vcdev->ccw->flags = 0; - vcdev->ccw->count = sizeof(vcdev->indicators2); - vcdev->ccw->cda = (__u32)(unsigned long) indicatorp; - ret = ccw_io_helper(vcdev, VIRTIO_CCW_DOING_SET_CONF_IND); + ccw->cmd_code = CCW_CMD_SET_CONF_IND; + ccw->flags = 0; + ccw->count = sizeof(vcdev->indicators2); + ccw->cda = (__u32)(unsigned long) indicatorp; + ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_CONF_IND); if (ret) goto out; kfree(indicatorp); + kfree(ccw); return 0; out: kfree(indicatorp); + kfree(ccw); virtio_ccw_del_vqs(vdev); return ret; } @@ -358,64 +376,95 @@ out: static void virtio_ccw_reset(struct virtio_device *vdev) { struct virtio_ccw_device *vcdev = to_vc_device(vdev); + struct ccw1 *ccw; + + ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); + if (!ccw) + return; /* Zero status bits. */ - vcdev->status = 0; + *vcdev->status = 0; /* Send a reset ccw on device. */ - vcdev->ccw->cmd_code = CCW_CMD_VDEV_RESET; - vcdev->ccw->flags = 0; - vcdev->ccw->count = 0; - vcdev->ccw->cda = 0; - ccw_io_helper(vcdev, VIRTIO_CCW_DOING_RESET); + ccw->cmd_code = CCW_CMD_VDEV_RESET; + ccw->flags = 0; + ccw->count = 0; + ccw->cda = 0; + ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_RESET); + kfree(ccw); } static u32 virtio_ccw_get_features(struct virtio_device *vdev) { struct virtio_ccw_device *vcdev = to_vc_device(vdev); - struct virtio_feature_desc features; - int ret; + struct virtio_feature_desc *features; + int ret, rc; + struct ccw1 *ccw; + ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); + if (!ccw) + return 0; + + features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL); + if (!features) { + rc = 0; + goto out_free; + } /* Read the feature bits from the host. */ /* TODO: Features > 32 bits */ - features.index = 0; - vcdev->ccw->cmd_code = CCW_CMD_READ_FEAT; - vcdev->ccw->flags = 0; - vcdev->ccw->count = sizeof(features); - vcdev->ccw->cda = vcdev->area; - ret = ccw_io_helper(vcdev, VIRTIO_CCW_DOING_READ_FEAT); - if (ret) - return 0; + features->index = 0; + ccw->cmd_code = CCW_CMD_READ_FEAT; + ccw->flags = 0; + ccw->count = sizeof(*features); + ccw->cda = (__u32)(unsigned long)features; + ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT); + if (ret) { + rc = 0; + goto out_free; + } + + rc = le32_to_cpu(features->features); - memcpy(&features, (void *)(unsigned long)vcdev->area, - sizeof(features)); - return le32_to_cpu(features.features); +out_free: + kfree(features); + kfree(ccw); + return rc; } static void virtio_ccw_finalize_features(struct virtio_device *vdev) { struct virtio_ccw_device *vcdev = to_vc_device(vdev); - struct virtio_feature_desc features; + struct virtio_feature_desc *features; int i; + struct ccw1 *ccw; + + ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); + if (!ccw) + return; + + features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL); + if (!features) + goto out_free; /* Give virtio_ring a chance to accept features. */ vring_transport_features(vdev); - for (i = 0; i < sizeof(*vdev->features) / sizeof(features.features); + for (i = 0; i < sizeof(*vdev->features) / sizeof(features->features); i++) { int highbits = i % 2 ? 32 : 0; - features.index = i; - features.features = cpu_to_le32(vdev->features[i / 2] - >> highbits); - memcpy((void *)(unsigned long)vcdev->area, &features, - sizeof(features)); + features->index = i; + features->features = cpu_to_le32(vdev->features[i / 2] + >> highbits); /* Write the feature bits to the host. */ - vcdev->ccw->cmd_code = CCW_CMD_WRITE_FEAT; - vcdev->ccw->flags = 0; - vcdev->ccw->count = sizeof(features); - vcdev->ccw->cda = vcdev->area; - ccw_io_helper(vcdev, VIRTIO_CCW_DOING_WRITE_FEAT); + ccw->cmd_code = CCW_CMD_WRITE_FEAT; + ccw->flags = 0; + ccw->count = sizeof(*features); + ccw->cda = (__u32)(unsigned long)features; + ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT); } +out_free: + kfree(features); + kfree(ccw); } static void virtio_ccw_get_config(struct virtio_device *vdev, @@ -423,19 +472,32 @@ static void virtio_ccw_get_config(struct virtio_device *vdev, { struct virtio_ccw_device *vcdev = to_vc_device(vdev); int ret; + struct ccw1 *ccw; + void *config_area; + + ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); + if (!ccw) + return; + + config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL); + if (!config_area) + goto out_free; /* Read the config area from the host. */ - vcdev->ccw->cmd_code = CCW_CMD_READ_CONF; - vcdev->ccw->flags = 0; - vcdev->ccw->count = offset + len; - vcdev->ccw->cda = vcdev->area; - ret = ccw_io_helper(vcdev, VIRTIO_CCW_DOING_READ_CONFIG); + ccw->cmd_code = CCW_CMD_READ_CONF; + ccw->flags = 0; + ccw->count = offset + len; + ccw->cda = (__u32)(unsigned long)config_area; + ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_CONFIG); if (ret) - return; + goto out_free; - memcpy(vcdev->config, (void *)(unsigned long)vcdev->area, - sizeof(vcdev->config)); + memcpy(vcdev->config, config_area, sizeof(vcdev->config)); memcpy(buf, &vcdev->config[offset], len); + +out_free: + kfree(config_area); + kfree(ccw); } static void virtio_ccw_set_config(struct virtio_device *vdev, @@ -443,37 +505,55 @@ static void virtio_ccw_set_config(struct virtio_device *vdev, unsigned len) { struct virtio_ccw_device *vcdev = to_vc_device(vdev); + struct ccw1 *ccw; + void *config_area; + + ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); + if (!ccw) + return; + + config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL); + if (!config_area) + goto out_free; memcpy(&vcdev->config[offset], buf, len); /* Write the config area to the host. */ - memcpy((void *)(unsigned long)vcdev->area, vcdev->config, - sizeof(vcdev->config)); - vcdev->ccw->cmd_code = CCW_CMD_WRITE_CONF; - vcdev->ccw->flags = 0; - vcdev->ccw->count = offset + len; - vcdev->ccw->cda = vcdev->area; - ccw_io_helper(vcdev, VIRTIO_CCW_DOING_WRITE_CONFIG); + memcpy(config_area, vcdev->config, sizeof(vcdev->config)); + ccw->cmd_code = CCW_CMD_WRITE_CONF; + ccw->flags = 0; + ccw->count = offset + len; + ccw->cda = (__u32)(unsigned long)config_area; + ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_CONFIG); + +out_free: + kfree(config_area); + kfree(ccw); } static u8 virtio_ccw_get_status(struct virtio_device *vdev) { struct virtio_ccw_device *vcdev = to_vc_device(vdev); - return vcdev->status; + return *vcdev->status; } static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status) { struct virtio_ccw_device *vcdev = to_vc_device(vdev); + struct ccw1 *ccw; + + ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); + if (!ccw) + return; /* Write the status to the host. */ - vcdev->status = status; - memcpy((void *)(unsigned long)vcdev->area, &status, sizeof(status)); - vcdev->ccw->cmd_code = CCW_CMD_WRITE_STATUS; - vcdev->ccw->flags = 0; - vcdev->ccw->count = sizeof(status); - vcdev->ccw->cda = vcdev->area; - ccw_io_helper(vcdev, VIRTIO_CCW_DOING_WRITE_STATUS); + *vcdev->status = status; + ccw->cmd_code = CCW_CMD_WRITE_STATUS; + ccw->flags = 0; + ccw->count = sizeof(status); + ccw->cda = (__u32)(unsigned long)vcdev->status; + ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS); + kfree(ccw); } static struct virtio_config_ops virtio_ccw_config_ops = { @@ -499,9 +579,8 @@ static void virtio_ccw_release_dev(struct device *_d) dev); struct virtio_ccw_device *vcdev = to_vc_device(dev); - kfree((void *)(unsigned long)vcdev->area); + kfree(vcdev->status); kfree(vcdev->config_block); - kfree(vcdev->ccw); kfree(vcdev); } @@ -657,9 +736,6 @@ static int virtio_ccw_offline(struct ccw_device *cdev) } -/* Area needs to be big enough to fit status, features or configuration. */ -#define VIRTIO_AREA_SIZE VIRTIO_CCW_CONFIG_SIZE /* biggest possible use */ - static int virtio_ccw_online(struct ccw_device *cdev) { int ret; @@ -671,21 +747,14 @@ static int virtio_ccw_online(struct ccw_device *cdev) ret = -ENOMEM; goto out_free; } - vcdev->area = (__u32)(unsigned long)kzalloc(VIRTIO_AREA_SIZE, - GFP_DMA | GFP_KERNEL); - if (!vcdev->area) { - dev_warn(&cdev->dev, "Cound not get memory for virtio\n"); - ret = -ENOMEM; - goto out_free; - } vcdev->config_block = kzalloc(sizeof(*vcdev->config_block), GFP_DMA | GFP_KERNEL); if (!vcdev->config_block) { ret = -ENOMEM; goto out_free; } - vcdev->ccw = kzalloc(sizeof(*vcdev->ccw), GFP_DMA | GFP_KERNEL); - if (!vcdev->ccw) { + vcdev->status = kzalloc(sizeof(*vcdev->status), GFP_DMA | GFP_KERNEL); + if (!vcdev->status) { ret = -ENOMEM; goto out_free; } @@ -714,9 +783,8 @@ out_put: return ret; out_free: if (vcdev) { - kfree((void *)(unsigned long)vcdev->area); + kfree(vcdev->status); kfree(vcdev->config_block); - kfree(vcdev->ccw); } kfree(vcdev); return ret; -- cgit v0.10.2 From b26ba22bb4f12289f9d5eb878c490e674934a197 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Mon, 7 Jan 2013 15:51:52 +0100 Subject: KVM: s390: Gracefully handle busy conditions on ccw_device_start In rare cases a virtio command might try to issue a ccw before a former ccw was answered with a tsch. This will cause CC=2 (busy). Lets just retry in that case. Signed-off-by: Christian Borntraeger Signed-off-by: Cornelia Huck Signed-off-by: Marcelo Tosatti diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c index 70419a7..2edd94a 100644 --- a/drivers/s390/kvm/virtio_ccw.c +++ b/drivers/s390/kvm/virtio_ccw.c @@ -132,11 +132,14 @@ static int ccw_io_helper(struct virtio_ccw_device *vcdev, unsigned long flags; int flag = intparm & VIRTIO_CCW_INTPARM_MASK; - spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags); - ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0); - if (!ret) - vcdev->curr_io |= flag; - spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags); + do { + spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags); + ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0); + if (!ret) + vcdev->curr_io |= flag; + spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags); + cpu_relax(); + } while (ret == -EBUSY); wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0); return ret ? ret : vcdev->err; } -- cgit v0.10.2 From 68e2ffed358c92341c126e606b500098fc72f5f7 Mon Sep 17 00:00:00 2001 From: Mihai Caraman Date: Tue, 11 Dec 2012 03:38:23 +0000 Subject: KVM: PPC: Fix SREGS documentation reference Reflect the uapi folder change in SREGS API documentation. Signed-off-by: Mihai Caraman Reviewed-by: Amos Kong Signed-off-by: Alexander Graf diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index f2d6391..4fc2bfc 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -345,7 +345,7 @@ struct kvm_sregs { __u64 interrupt_bitmap[(KVM_NR_INTERRUPTS + 63) / 64]; }; -/* ppc -- see arch/powerpc/include/asm/kvm.h */ +/* ppc -- see arch/powerpc/include/uapi/asm/kvm.h */ interrupt_bitmap is a bitmap of pending external interrupts. At most one bit may be set. This interrupt has been acknowledged by the APIC -- cgit v0.10.2 From 5a33169ed29060df71627103e6968078b42de945 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Fri, 14 Dec 2012 23:46:03 +0100 Subject: KVM: PPC: Only WARN on invalid emulation When we hit an emulation result that we didn't expect, that is an error, but it's nothing that warrants a BUG(), because it can be guest triggered. So instead, let's only WARN() the user that this happened. Signed-off-by: Alexander Graf diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index be83fca..e2225e5 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -237,7 +237,8 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) r = RESUME_HOST; break; default: - BUG(); + WARN_ON(1); + r = RESUME_GUEST; } return r; -- cgit v0.10.2 From 50c7bb80b5bd5a9962905306dd2292eeb9857d46 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Fri, 14 Dec 2012 23:42:05 +0100 Subject: KVM: PPC: Book3S: PR: Enable alternative instruction for SC 1 When running on top of pHyp, the hypercall instruction "sc 1" goes straight into pHyp without trapping in supervisor mode. So if we want to support PAPR guest in this configuration we need to add a second way of accessing PAPR hypercalls, preferably with the exact same semantics except for the instruction. So let's overlay an officially reserved instruction and emulate PAPR hypercalls whenever we hit that one. Signed-off-by: Alexander Graf diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 572aa75..5f5f69a 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -44,6 +44,7 @@ enum emulation_result { EMULATE_DO_DCR, /* kvm_run filled with DCR request */ EMULATE_FAIL, /* can't emulate this instruction */ EMULATE_AGAIN, /* something went wrong. go again */ + EMULATE_DO_PAPR, /* kvm_run filled with PAPR request */ }; extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index d31a716..c88161b 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c @@ -34,6 +34,8 @@ #define OP_31_XOP_MTSRIN 242 #define OP_31_XOP_TLBIEL 274 #define OP_31_XOP_TLBIE 306 +/* Opcode is officially reserved, reuse it as sc 1 when sc 1 doesn't trap */ +#define OP_31_XOP_FAKE_SC1 308 #define OP_31_XOP_SLBMTE 402 #define OP_31_XOP_SLBIE 434 #define OP_31_XOP_SLBIA 498 @@ -170,6 +172,32 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, vcpu->arch.mmu.tlbie(vcpu, addr, large); break; } +#ifdef CONFIG_KVM_BOOK3S_64_PR + case OP_31_XOP_FAKE_SC1: + { + /* SC 1 papr hypercalls */ + ulong cmd = kvmppc_get_gpr(vcpu, 3); + int i; + + if ((vcpu->arch.shared->msr & MSR_PR) || + !vcpu->arch.papr_enabled) { + emulated = EMULATE_FAIL; + break; + } + + if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) + break; + + run->papr_hcall.nr = cmd; + for (i = 0; i < 9; ++i) { + ulong gpr = kvmppc_get_gpr(vcpu, 4 + i); + run->papr_hcall.args[i] = gpr; + } + + emulated = EMULATE_DO_PAPR; + break; + } +#endif case OP_31_XOP_EIOIO: break; case OP_31_XOP_SLBMTE: diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 28d38ad..73ed11c 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -760,6 +760,11 @@ program_interrupt: run->exit_reason = KVM_EXIT_MMIO; r = RESUME_HOST_NV; break; + case EMULATE_DO_PAPR: + run->exit_reason = KVM_EXIT_PAPR_HCALL; + vcpu->arch.hcall_needed = 1; + r = RESUME_HOST_NV; + break; default: BUG(); } -- cgit v0.10.2 From f2be655004ddc36f2c5fc5e541d481dcd782ab83 Mon Sep 17 00:00:00 2001 From: Mihai Caraman Date: Thu, 20 Dec 2012 04:52:39 +0000 Subject: KVM: PPC: Fix mfspr/mtspr MMUCFG emulation On mfspr/mtspr emulation path Book3E's MMUCFG SPR with value 1015 clashes with G4's MSSSR0 SPR. Move MSSSR0 emulation from generic part to Books3S. MSSSR0 also clashes with Book3S's DABRX SPR. DABRX was not explicitly handled so Book3S execution flow will behave as before. Signed-off-by: Mihai Caraman Signed-off-by: Alexander Graf diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index c88161b..836c569 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c @@ -455,6 +455,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) case SPRN_PMC3_GEKKO: case SPRN_PMC4_GEKKO: case SPRN_WPAR_GEKKO: + case SPRN_MSSSR0: break; unprivileged: default: @@ -551,6 +552,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) case SPRN_PMC3_GEKKO: case SPRN_PMC4_GEKKO: case SPRN_WPAR_GEKKO: + case SPRN_MSSSR0: *spr_val = 0; break; default: diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index b0855e5..71abcf4 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c @@ -149,8 +149,6 @@ static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) case SPRN_TBWL: break; case SPRN_TBWU: break; - case SPRN_MSSSR0: break; - case SPRN_DEC: vcpu->arch.dec = spr_val; kvmppc_emulate_dec(vcpu); @@ -201,9 +199,6 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) case SPRN_PIR: spr_val = vcpu->vcpu_id; break; - case SPRN_MSSSR0: - spr_val = 0; - break; /* Note: mftb and TBRL/TBWL are user-accessible, so * the guest can always access the real TB anyways. -- cgit v0.10.2 From b8c649a99d582a6d8afd8457ba6145c624b8a76f Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Thu, 20 Dec 2012 04:52:39 +0000 Subject: KVM: PPC: BookE: Allow irq deliveries to inject requests When injecting an interrupt into guest context, we usually don't need to check for requests anymore. At least not until today. With the introduction of EPR, we will have to create a request when the guest has successfully accepted an external interrupt though. So we need to prepare the interrupt delivery to abort guest entry gracefully. Otherwise we'd delay the EPR request. Signed-off-by: Alexander Graf diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 69f1140..964f447 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -581,6 +581,11 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) kvmppc_core_check_exceptions(vcpu); + if (vcpu->requests) { + /* Exception delivery raised request; start over */ + return 1; + } + if (vcpu->arch.shared->msr & MSR_WE) { local_irq_enable(); kvm_vcpu_block(vcpu); -- cgit v0.10.2 From 37ecb257f68ce4fb7c7048a1123bbcbbe36d9575 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Fri, 4 Jan 2013 18:02:14 +0100 Subject: KVM: PPC: BookE: Emulate mfspr on EPR The EPR register is potentially valid for PR KVM as well, so we need to emulate accesses to it. It's only defined for reading, so only handle the mfspr case. Signed-off-by: Alexander Graf diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c index 4685b8c..27a4b28 100644 --- a/arch/powerpc/kvm/booke_emulate.c +++ b/arch/powerpc/kvm/booke_emulate.c @@ -269,6 +269,9 @@ int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) case SPRN_ESR: *spr_val = vcpu->arch.shared->esr; break; + case SPRN_EPR: + *spr_val = vcpu->arch.epr; + break; case SPRN_CSRR0: *spr_val = vcpu->arch.csrr0; break; -- cgit v0.10.2 From 1c810636556c8d53a37406b34a64d9b9b0161aa6 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Fri, 4 Jan 2013 18:12:48 +0100 Subject: KVM: PPC: BookE: Implement EPR exit The External Proxy Facility in FSL BookE chips allows the interrupt controller to automatically acknowledge an interrupt as soon as a core gets its pending external interrupt delivered. Today, user space implements the interrupt controller, so we need to check on it during such a cycle. This patch implements logic for user space to enable EPR exiting, disable EPR exiting and EPR exiting itself, so that user space can acknowledge an interrupt when an external interrupt has successfully been delivered into the guest vcpu. Signed-off-by: Alexander Graf diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 4fc2bfc..a98ed09 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -2246,8 +2246,8 @@ executed a memory-mapped I/O instruction which could not be satisfied by kvm. The 'data' member contains the written data if 'is_write' is true, and should be filled by application code otherwise. -NOTE: For KVM_EXIT_IO, KVM_EXIT_MMIO, KVM_EXIT_OSI, KVM_EXIT_DCR - and KVM_EXIT_PAPR the corresponding +NOTE: For KVM_EXIT_IO, KVM_EXIT_MMIO, KVM_EXIT_OSI, KVM_EXIT_DCR, + KVM_EXIT_PAPR and KVM_EXIT_EPR the corresponding operations are complete (and guest state is consistent) only after userspace has re-entered the kernel with KVM_RUN. The kernel side will first finish incomplete operations and then check for pending signals. Userspace @@ -2366,6 +2366,25 @@ interrupt for the target subchannel has been dequeued and subchannel_id, subchannel_nr, io_int_parm and io_int_word contain the parameters for that interrupt. ipb is needed for instruction parameter decoding. + /* KVM_EXIT_EPR */ + struct { + __u32 epr; + } epr; + +On FSL BookE PowerPC chips, the interrupt controller has a fast patch +interrupt acknowledge path to the core. When the core successfully +delivers an interrupt, it automatically populates the EPR register with +the interrupt vector number and acknowledges the interrupt inside +the interrupt controller. + +In case the interrupt controller lives in user space, we need to do +the interrupt acknowledge cycle through it to fetch the next to be +delivered interrupt vector using this exit. + +It gets triggered whenever both KVM_CAP_PPC_EPR are enabled and an +external interrupt has just been delivered into the guest. User space +should put the acknowledged interrupt vector into the 'epr' field. + /* Fix the size of the union. */ char padding[256]; }; @@ -2501,3 +2520,20 @@ handled in-kernel, while the other I/O instructions are passed to userspace. When this capability is enabled, KVM_EXIT_S390_TSCH will occur on TEST SUBCHANNEL intercepts. + +6.5 KVM_CAP_PPC_EPR + +Architectures: ppc +Parameters: args[0] defines whether the proxy facility is active +Returns: 0 on success; -1 on error + +This capability enables or disables the delivery of interrupts through the +external proxy facility. + +When enabled (args[0] != 0), every time the guest gets an external interrupt +delivered, it automatically exits into user space with a KVM_EXIT_EPR exit +to receive the topmost interrupt vector. + +When disabled (args[0] == 0), behavior is as if this facility is unsupported. + +When this capability is enabled, KVM_EXIT_EPR can occur. diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index ab49c6c..8a72d59 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -520,6 +520,8 @@ struct kvm_vcpu_arch { u8 sane; u8 cpu_type; u8 hcall_needed; + u8 epr_enabled; + u8 epr_needed; u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */ diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 5f5f69a..493630e 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -264,6 +264,15 @@ static inline void kvm_linear_init(void) {} #endif +static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr) +{ +#ifdef CONFIG_KVM_BOOKE_HV + mtspr(SPRN_GEPR, epr); +#elif defined(CONFIG_BOOKE) + vcpu->arch.epr = epr; +#endif +} + int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu, struct kvm_config_tlb *cfg); int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu, diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 964f447..940ec80 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -306,7 +306,7 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, { int allowed = 0; ulong msr_mask = 0; - bool update_esr = false, update_dear = false; + bool update_esr = false, update_dear = false, update_epr = false; ulong crit_raw = vcpu->arch.shared->critical; ulong crit_r1 = kvmppc_get_gpr(vcpu, 1); bool crit; @@ -330,6 +330,9 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, keep_irq = true; } + if ((priority == BOOKE_IRQPRIO_EXTERNAL) && vcpu->arch.epr_enabled) + update_epr = true; + switch (priority) { case BOOKE_IRQPRIO_DTLB_MISS: case BOOKE_IRQPRIO_DATA_STORAGE: @@ -408,6 +411,8 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, set_guest_esr(vcpu, vcpu->arch.queued_esr); if (update_dear == true) set_guest_dear(vcpu, vcpu->arch.queued_dear); + if (update_epr == true) + kvm_make_request(KVM_REQ_EPR_EXIT, vcpu); new_msr &= msr_mask; #if defined(CONFIG_64BIT) @@ -615,6 +620,13 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) r = 0; } + if (kvm_check_request(KVM_REQ_EPR_EXIT, vcpu)) { + vcpu->run->epr.epr = 0; + vcpu->arch.epr_needed = true; + vcpu->run->exit_reason = KVM_EXIT_EPR; + r = 0; + } + return r; } diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index e2225e5..934413c 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -306,6 +306,7 @@ int kvm_dev_ioctl_check_extension(long ext) #ifdef CONFIG_BOOKE case KVM_CAP_PPC_BOOKE_SREGS: case KVM_CAP_PPC_BOOKE_WATCHDOG: + case KVM_CAP_PPC_EPR: #else case KVM_CAP_PPC_SEGSTATE: case KVM_CAP_PPC_HIOR: @@ -721,6 +722,11 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) for (i = 0; i < 9; ++i) kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); vcpu->arch.hcall_needed = 0; +#ifdef CONFIG_BOOKE + } else if (vcpu->arch.epr_needed) { + kvmppc_set_epr(vcpu, run->epr.epr); + vcpu->arch.epr_needed = 0; +#endif } r = kvmppc_vcpu_run(run, vcpu); @@ -762,6 +768,10 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, r = 0; vcpu->arch.papr_enabled = true; break; + case KVM_CAP_PPC_EPR: + r = 0; + vcpu->arch.epr_enabled = cap->args[0]; + break; #ifdef CONFIG_BOOKE case KVM_CAP_PPC_BOOKE_WATCHDOG: r = 0; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index cbe0d68..4dd7d75 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -122,6 +122,7 @@ static inline bool is_error_page(struct page *page) #define KVM_REQ_WATCHDOG 18 #define KVM_REQ_MASTERCLOCK_UPDATE 19 #define KVM_REQ_MCLOCK_INPROGRESS 20 +#define KVM_REQ_EPR_EXIT 21 #define KVM_USERSPACE_IRQ_SOURCE_ID 0 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 8bb0bf8..9a2db57 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -169,6 +169,7 @@ struct kvm_pit_config { #define KVM_EXIT_S390_UCONTROL 20 #define KVM_EXIT_WATCHDOG 21 #define KVM_EXIT_S390_TSCH 22 +#define KVM_EXIT_EPR 23 /* For KVM_EXIT_INTERNAL_ERROR */ /* Emulate instruction failed. */ @@ -295,6 +296,10 @@ struct kvm_run { __u32 ipb; __u8 dequeued; } s390_tsch; + /* KVM_EXIT_EPR */ + struct { + __u32 epr; + } epr; /* Fix the size of the union. */ char padding[256]; }; @@ -656,6 +661,7 @@ struct kvm_ppc_smmu_info { #define KVM_CAP_PPC_BOOKE_WATCHDOG 83 #define KVM_CAP_PPC_HTAB_FD 84 #define KVM_CAP_S390_CSS_SUPPORT 85 +#define KVM_CAP_PPC_EPR 86 #ifdef KVM_CAP_IRQ_ROUTING -- cgit v0.10.2 From 324b3e63167bce69e6622c2be182595790bf7e38 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Fri, 4 Jan 2013 18:28:51 +0100 Subject: KVM: PPC: BookE: Add EPR ONE_REG sync We need to be able to read and write the contents of the EPR register from user space. This patch implements that logic through the ONE_REG API and declares its (never implemented) SREGS counterpart as deprecated. Signed-off-by: Alexander Graf diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index a98ed09..09905cb 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -1774,6 +1774,7 @@ registers, find a list below: PPC | KVM_REG_PPC_VPA_SLB | 128 PPC | KVM_REG_PPC_VPA_DTL | 128 PPC | KVM_REG_PPC_EPCR | 32 + PPC | KVM_REG_PPC_EPR | 32 4.69 KVM_GET_ONE_REG diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index 2fba8a6..16064d0 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h @@ -114,7 +114,10 @@ struct kvm_regs { /* Embedded Floating Point (SPE) -- IVOR32-34 if KVM_SREGS_E_IVOR */ #define KVM_SREGS_E_SPE (1 << 9) -/* External Proxy (EXP) -- EPR */ +/* + * DEPRECATED! USE ONE_REG FOR THIS ONE! + * External Proxy (EXP) -- EPR + */ #define KVM_SREGS_EXP (1 << 10) /* External PID (E.PD) -- EPSC/EPLC */ @@ -412,5 +415,6 @@ struct kvm_get_htab_header { #define KVM_REG_PPC_VPA_DTL (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x84) #define KVM_REG_PPC_EPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x85) +#define KVM_REG_PPC_EPR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x86) #endif /* __LINUX_KVM_POWERPC_H */ diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 940ec80..8779cd4c 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -300,6 +300,15 @@ static void set_guest_esr(struct kvm_vcpu *vcpu, u32 esr) #endif } +static unsigned long get_guest_epr(struct kvm_vcpu *vcpu) +{ +#ifdef CONFIG_KVM_BOOKE_HV + return mfspr(SPRN_GEPR); +#else + return vcpu->arch.epr; +#endif +} + /* Deliver the interrupt of the corresponding priority, if possible. */ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) @@ -1405,6 +1414,11 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) &vcpu->arch.dbg_reg.dac[dac], sizeof(u64)); break; } + case KVM_REG_PPC_EPR: { + u32 epr = get_guest_epr(vcpu); + r = put_user(epr, (u32 __user *)(long)reg->addr); + break; + } #if defined(CONFIG_64BIT) case KVM_REG_PPC_EPCR: r = put_user(vcpu->arch.epcr, (u32 __user *)(long)reg->addr); @@ -1437,6 +1451,13 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) (u64 __user *)(long)reg->addr, sizeof(u64)); break; } + case KVM_REG_PPC_EPR: { + u32 new_epr; + r = get_user(new_epr, (u32 __user *)(long)reg->addr); + if (!r) + kvmppc_set_epr(vcpu, new_epr); + break; + } #if defined(CONFIG_64BIT) case KVM_REG_PPC_EPCR: { u32 new_epcr; -- cgit v0.10.2 From c22885050e651c2f5d2a1706cdc2eb38698db968 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Tue, 8 Jan 2013 14:36:04 +0800 Subject: KVM: MMU: fix Dirty bit missed if CR0.WP = 0 If the write-fault access is from supervisor and CR0.WP is not set on the vcpu, kvm will fix it by adjusting pte access - it sets the W bit on pte and clears U bit. This is the chance that kvm can change pte access from readonly to writable Unfortunately, the pte access is the access of 'direct' shadow page table, means direct sp.role.access = pte_access, then we will create a writable spte entry on the readonly shadow page table. It will cause Dirty bit is not tracked when two guest ptes point to the same large page. Note, it does not have other impact except Dirty bit since cr0.wp is encoded into sp.role It can be fixed by adjusting pte access before establishing shadow page table. Also, after that, no mmu specified code exists in the common function and drop two parameters in set_spte Signed-off-by: Xiao Guangrong Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 01d7c2ad..2a3c890 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2342,8 +2342,7 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, } static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, - unsigned pte_access, int user_fault, - int write_fault, int level, + unsigned pte_access, int level, gfn_t gfn, pfn_t pfn, bool speculative, bool can_unsync, bool host_writable) { @@ -2378,9 +2377,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, spte |= (u64)pfn << PAGE_SHIFT; - if ((pte_access & ACC_WRITE_MASK) - || (!vcpu->arch.mmu.direct_map && write_fault - && !is_write_protection(vcpu) && !user_fault)) { + if (pte_access & ACC_WRITE_MASK) { /* * There are two cases: @@ -2399,19 +2396,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE; - if (!vcpu->arch.mmu.direct_map - && !(pte_access & ACC_WRITE_MASK)) { - spte &= ~PT_USER_MASK; - /* - * If we converted a user page to a kernel page, - * so that the kernel can write to it when cr0.wp=0, - * then we should prevent the kernel from executing it - * if SMEP is enabled. - */ - if (kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)) - spte |= PT64_NX_MASK; - } - /* * Optimization: for pte sync, if spte was writable the hash * lookup is unnecessary (and expensive). Write protection @@ -2442,18 +2426,15 @@ done: static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pt_access, unsigned pte_access, - int user_fault, int write_fault, - int *emulate, int level, gfn_t gfn, - pfn_t pfn, bool speculative, - bool host_writable) + int write_fault, int *emulate, int level, gfn_t gfn, + pfn_t pfn, bool speculative, bool host_writable) { int was_rmapped = 0; int rmap_count; - pgprintk("%s: spte %llx access %x write_fault %d" - " user_fault %d gfn %llx\n", + pgprintk("%s: spte %llx access %x write_fault %d gfn %llx\n", __func__, *sptep, pt_access, - write_fault, user_fault, gfn); + write_fault, gfn); if (is_rmap_spte(*sptep)) { /* @@ -2477,9 +2458,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, was_rmapped = 1; } - if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault, - level, gfn, pfn, speculative, true, - host_writable)) { + if (set_spte(vcpu, sptep, pte_access, level, gfn, pfn, speculative, + true, host_writable)) { if (write_fault) *emulate = 1; kvm_mmu_flush_tlb(vcpu); @@ -2571,10 +2551,9 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, return -1; for (i = 0; i < ret; i++, gfn++, start++) - mmu_set_spte(vcpu, start, ACC_ALL, - access, 0, 0, NULL, - sp->role.level, gfn, - page_to_pfn(pages[i]), true, true); + mmu_set_spte(vcpu, start, ACC_ALL, access, 0, NULL, + sp->role.level, gfn, page_to_pfn(pages[i]), + true, true); return 0; } @@ -2636,8 +2615,8 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, unsigned pte_access = ACC_ALL; mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, pte_access, - 0, write, &emulate, - level, gfn, pfn, prefault, map_writable); + write, &emulate, level, gfn, pfn, + prefault, map_writable); direct_pte_prefetch(vcpu, iterator.sptep); ++vcpu->stat.pf_fixed; break; diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 2ad76b9..0d2e711 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -326,7 +326,7 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, * we call mmu_set_spte() with host_writable = true because * pte_prefetch_gfn_to_pfn always gets a writable pfn. */ - mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0, + mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, NULL, PT_PAGE_TABLE_LEVEL, gfn, pfn, true, true); return true; @@ -401,7 +401,7 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw, */ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, struct guest_walker *gw, - int user_fault, int write_fault, int hlevel, + int write_fault, int hlevel, pfn_t pfn, bool map_writable, bool prefault) { struct kvm_mmu_page *sp = NULL; @@ -474,7 +474,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, clear_sp_write_flooding_count(it.sptep); mmu_set_spte(vcpu, it.sptep, access, gw->pte_access, - user_fault, write_fault, &emulate, it.level, + write_fault, &emulate, it.level, gw->gfn, pfn, prefault, map_writable); FNAME(pte_prefetch)(vcpu, gw, it.sptep); @@ -560,6 +560,26 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, walker.gfn, pfn, walker.pte_access, &r)) return r; + /* + * Do not change pte_access if the pfn is a mmio page, otherwise + * we will cache the incorrect access into mmio spte. + */ + if (write_fault && !(walker.pte_access & ACC_WRITE_MASK) && + !is_write_protection(vcpu) && !user_fault && + !is_noslot_pfn(pfn)) { + walker.pte_access |= ACC_WRITE_MASK; + walker.pte_access &= ~ACC_USER_MASK; + + /* + * If we converted a user page to a kernel page, + * so that the kernel can write to it when cr0.wp=0, + * then we should prevent the kernel from executing it + * if SMEP is enabled. + */ + if (kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)) + walker.pte_access &= ~ACC_EXEC_MASK; + } + spin_lock(&vcpu->kvm->mmu_lock); if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) goto out_unlock; @@ -568,7 +588,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, kvm_mmu_free_some_pages(vcpu); if (!force_pt_level) transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level); - r = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault, + r = FNAME(fetch)(vcpu, addr, &walker, write_fault, level, pfn, map_writable, prefault); ++vcpu->stat.pf_fixed; kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT); @@ -743,7 +763,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) host_writable = sp->spt[i] & SPTE_HOST_WRITEABLE; - set_spte(vcpu, &sp->spt[i], pte_access, 0, 0, + set_spte(vcpu, &sp->spt[i], pte_access, PT_PAGE_TABLE_LEVEL, gfn, spte_to_pfn(sp->spt[i]), true, false, host_writable); -- cgit v0.10.2 From 7751babd3c6d365316e7a405f516bdd0bc7cec60 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Tue, 8 Jan 2013 14:36:51 +0800 Subject: KVM: MMU: fix infinite fault access retry We have two issues in current code: - if target gfn is used as its page table, guest will refault then kvm will use small page size to map it. We need two #PF to fix its shadow page table - sometimes, say a exception is triggered during vm-exit caused by #PF (see handle_exception() in vmx.c), we remove all the shadow pages shadowed by the target gfn before go into page fault path, it will cause infinite loop: delete shadow pages shadowed by the gfn -> try to use large page size to map the gfn -> retry the access ->... To fix these, we can adjust page size early if the target gfn is used as page table Signed-off-by: Xiao Guangrong Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 2a3c890..54fc61e 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2380,15 +2380,10 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, if (pte_access & ACC_WRITE_MASK) { /* - * There are two cases: - * - the one is other vcpu creates new sp in the window - * between mapping_level() and acquiring mmu-lock. - * - the another case is the new sp is created by itself - * (page-fault path) when guest uses the target gfn as - * its page table. - * Both of these cases can be fixed by allowing guest to - * retry the access, it will refault, then we can establish - * the mapping by using small page. + * Other vcpu creates new sp in the window between + * mapping_level() and acquiring mmu-lock. We can + * allow guest to retry the access, the mapping can + * be fixed if guest refault. */ if (level > PT_PAGE_TABLE_LEVEL && has_wrprotected_page(vcpu->kvm, gfn, level)) diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 0d2e711..3d1a352 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -487,6 +487,38 @@ out_gpte_changed: return 0; } + /* + * To see whether the mapped gfn can write its page table in the current + * mapping. + * + * It is the helper function of FNAME(page_fault). When guest uses large page + * size to map the writable gfn which is used as current page table, we should + * force kvm to use small page size to map it because new shadow page will be + * created when kvm establishes shadow page table that stop kvm using large + * page size. Do it early can avoid unnecessary #PF and emulation. + * + * Note: the PDPT page table is not checked for PAE-32 bit guest. It is ok + * since the PDPT is always shadowed, that means, we can not use large page + * size to map the gfn which is used as PDPT. + */ +static bool +FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu, + struct guest_walker *walker, int user_fault) +{ + int level; + gfn_t mask = ~(KVM_PAGES_PER_HPAGE(walker->level) - 1); + + if (!(walker->pte_access & ACC_WRITE_MASK || + (!is_write_protection(vcpu) && !user_fault))) + return false; + + for (level = walker->level; level <= walker->max_level; level++) + if (!((walker->gfn ^ walker->table_gfn[level - 1]) & mask)) + return true; + + return false; +} + /* * Page fault handler. There are several causes for a page fault: * - there is no shadow pte for the guest pte @@ -541,7 +573,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, } if (walker.level >= PT_DIRECTORY_LEVEL) - force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn); + force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn) + || FNAME(is_self_change_mapping)(vcpu, &walker, user_fault); else force_pt_level = 1; if (!force_pt_level) { -- cgit v0.10.2 From f79ed82da494bc2ea677c6fc28b5439eacf4f5cc Mon Sep 17 00:00:00 2001 From: Cornelia Huck Date: Tue, 8 Jan 2013 13:00:01 +0100 Subject: KVM: trace: Fix exit decoding. trace_kvm_userspace_exit has been missing the KVM_EXIT_WATCHDOG exit. CC: Bharat Bhushan Signed-off-by: Cornelia Huck Signed-off-by: Marcelo Tosatti diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h index a23f47c..19911dd 100644 --- a/include/trace/events/kvm.h +++ b/include/trace/events/kvm.h @@ -14,7 +14,7 @@ ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR), \ ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\ ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI), ERSN(PAPR_HCALL), \ - ERSN(S390_UCONTROL), ERSN(S390_TSCH) + ERSN(S390_UCONTROL), ERSN(WATCHDOG), ERSN(S390_TSCH) TRACE_EVENT(kvm_userspace_exit, TP_PROTO(__u32 reason, int errno), -- cgit v0.10.2 From c972f3b125d8818748429b94cd2e59f473943a33 Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Tue, 8 Jan 2013 19:43:28 +0900 Subject: KVM: Write protect the updated slot only when dirty logging is enabled Calling kvm_mmu_slot_remove_write_access() for a deleted slot does nothing but search for non-existent mmu pages which have mappings to that deleted memory; this is safe but a waste of time. Since we want to make the function rmap based in a later patch, in a manner which makes it unsafe to be called for a deleted slot, we makes the caller see if the slot is non-zero and being dirty logged. Reviewed-by: Marcelo Tosatti Signed-off-by: Takuya Yoshikawa Signed-off-by: Gleb Natapov diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 1c9c834..add5e48 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6897,7 +6897,13 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, spin_lock(&kvm->mmu_lock); if (nr_mmu_pages) kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); - kvm_mmu_slot_remove_write_access(kvm, mem->slot); + /* + * Write protect all pages for dirty logging. + * Existing largepage mappings are destroyed here and new ones will + * not be created until the end of the logging. + */ + if (npages && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES)) + kvm_mmu_slot_remove_write_access(kvm, mem->slot); spin_unlock(&kvm->mmu_lock); /* * If memory slot is created, or moved, we need to clear all diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index e45c20c..f689a6d 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -817,7 +817,6 @@ int __kvm_set_memory_region(struct kvm *kvm, if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { if (kvm_create_dirty_bitmap(&new) < 0) goto out_free; - /* destroy any largepage mappings for dirty tracking */ } if (!npages || base_gfn != old.base_gfn) { -- cgit v0.10.2 From 245c3912eae642a4b7a3ce0adfcde5cc7672d5fe Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Tue, 8 Jan 2013 19:44:09 +0900 Subject: KVM: MMU: Remove unused parameter level from __rmap_write_protect() No longer need to care about the mapping level in this function. Reviewed-by: Marcelo Tosatti Signed-off-by: Takuya Yoshikawa Signed-off-by: Gleb Natapov diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 54fc61e..2a1cde5 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1142,7 +1142,7 @@ spte_write_protect(struct kvm *kvm, u64 *sptep, bool *flush, bool pt_protect) } static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, - int level, bool pt_protect) + bool pt_protect) { u64 *sptep; struct rmap_iterator iter; @@ -1180,7 +1180,7 @@ void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, while (mask) { rmapp = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), PT_PAGE_TABLE_LEVEL, slot); - __rmap_write_protect(kvm, rmapp, PT_PAGE_TABLE_LEVEL, false); + __rmap_write_protect(kvm, rmapp, false); /* clear the first set bit */ mask &= mask - 1; @@ -1199,7 +1199,7 @@ static bool rmap_write_protect(struct kvm *kvm, u64 gfn) for (i = PT_PAGE_TABLE_LEVEL; i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { rmapp = __gfn_to_rmap(gfn, i, slot); - write_protected |= __rmap_write_protect(kvm, rmapp, i, true); + write_protected |= __rmap_write_protect(kvm, rmapp, true); } return write_protected; -- cgit v0.10.2 From b99db1d35295cb26b61a1c665f542504110b0ac3 Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Tue, 8 Jan 2013 19:44:48 +0900 Subject: KVM: MMU: Make kvm_mmu_slot_remove_write_access() rmap based This makes it possible to release mmu_lock and reschedule conditionally in a later patch. Although this may increase the time needed to protect the whole slot when we start dirty logging, the kernel should not allow the userspace to trigger something that will hold a spinlock for such a long time as tens of milliseconds: actually there is no limit since it is roughly proportional to the number of guest pages. Another point to note is that this patch removes the only user of slot_bitmap which will cause some problems when we increase the number of slots further. Reviewed-by: Marcelo Tosatti Signed-off-by: Takuya Yoshikawa Signed-off-by: Gleb Natapov diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 2a1cde5..aeb7666 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -4172,25 +4172,27 @@ int kvm_mmu_setup(struct kvm_vcpu *vcpu) void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) { - struct kvm_mmu_page *sp; - bool flush = false; + struct kvm_memory_slot *memslot; + gfn_t last_gfn; + int i; - list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) { - int i; - u64 *pt; + memslot = id_to_memslot(kvm->memslots, slot); + last_gfn = memslot->base_gfn + memslot->npages - 1; - if (!test_bit(slot, sp->slot_bitmap)) - continue; + for (i = PT_PAGE_TABLE_LEVEL; + i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { + unsigned long *rmapp; + unsigned long last_index, index; - pt = sp->spt; - for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { - if (!is_shadow_present_pte(pt[i]) || - !is_last_spte(pt[i], sp->role.level)) - continue; + rmapp = memslot->arch.rmap[i - PT_PAGE_TABLE_LEVEL]; + last_index = gfn_to_index(last_gfn, memslot->base_gfn, i); - spte_write_protect(kvm, &pt[i], &flush, false); + for (index = 0; index <= last_index; ++index, ++rmapp) { + if (*rmapp) + __rmap_write_protect(kvm, rmapp, false); } } + kvm_flush_remote_tlbs(kvm); } -- cgit v0.10.2 From e12091ce7bdd3c82fa392a868d1bdccecee655d5 Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Tue, 8 Jan 2013 19:45:28 +0900 Subject: KVM: Remove unused slot_bitmap from kvm_mmu_page Not needed any more. Reviewed-by: Marcelo Tosatti Signed-off-by: Takuya Yoshikawa Signed-off-by: Gleb Natapov diff --git a/Documentation/virtual/kvm/mmu.txt b/Documentation/virtual/kvm/mmu.txt index fa5f1db..43fcb76 100644 --- a/Documentation/virtual/kvm/mmu.txt +++ b/Documentation/virtual/kvm/mmu.txt @@ -187,13 +187,6 @@ Shadow pages contain the following information: perform a reverse map from a pte to a gfn. When role.direct is set, any element of this array can be calculated from the gfn field when used, in this case, the array of gfns is not allocated. See role.direct and gfn. - slot_bitmap: - A bitmap containing one bit per memory slot. If the page contains a pte - mapping a page from memory slot n, then bit n of slot_bitmap will be set - (if a page is aliased among several slots, then it is not guaranteed that - all slots will be marked). - Used during dirty logging to avoid scanning a shadow page if none if its - pages need tracking. root_count: A counter keeping track of how many hardware registers (guest cr3 or pdptrs) are now pointing at the page. While this counter is nonzero, the diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index c431b33..f75e1fe 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -219,11 +219,6 @@ struct kvm_mmu_page { u64 *spt; /* hold the gfn of each spte inside spt */ gfn_t *gfns; - /* - * One bit set per slot which has memory - * in this shadow page. - */ - DECLARE_BITMAP(slot_bitmap, KVM_MEM_SLOTS_NUM); bool unsync; int root_count; /* Currently serving as active root */ unsigned int unsync_children; diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index aeb7666..9c1b2d6 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1522,7 +1522,6 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache); set_page_private(virt_to_page(sp->spt), (unsigned long)sp); list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); - bitmap_zero(sp->slot_bitmap, KVM_MEM_SLOTS_NUM); sp->parent_ptes = 0; mmu_page_add_parent_pte(vcpu, sp, parent_pte); kvm_mod_used_mmu_pages(vcpu->kvm, +1); @@ -2183,14 +2182,6 @@ int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) } EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page); -static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn) -{ - int slot = memslot_id(kvm, gfn); - struct kvm_mmu_page *sp = page_header(__pa(pte)); - - __set_bit(slot, sp->slot_bitmap); -} - /* * The function is based on mtrr_type_lookup() in * arch/x86/kernel/cpu/mtrr/generic.c @@ -2472,7 +2463,6 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, ++vcpu->kvm->stat.lpages; if (is_shadow_present_pte(*sptep)) { - page_header_update_slot(vcpu->kvm, sptep, gfn); if (!was_rmapped) { rmap_count = rmap_add(vcpu, sptep, gfn); if (rmap_count > RMAP_RECYCLE_THRESHOLD) -- cgit v0.10.2 From b34cb590fb099f7929dd78d9464b70319ee12a98 Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Tue, 8 Jan 2013 19:46:07 +0900 Subject: KVM: Make kvm_mmu_change_mmu_pages() take mmu_lock by itself No reason to make callers take mmu_lock since we do not need to protect kvm_mmu_change_mmu_pages() and kvm_mmu_slot_remove_write_access() together by mmu_lock in kvm_arch_commit_memory_region(): the former calls kvm_mmu_commit_zap_page() and flushes TLBs by itself. Note: we do not need to protect kvm->arch.n_requested_mmu_pages by mmu_lock as can be seen from the fact that it is read locklessly. Reviewed-by: Marcelo Tosatti Signed-off-by: Takuya Yoshikawa Signed-off-by: Gleb Natapov diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 9c1b2d6..f557280 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2143,6 +2143,8 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages) * change the value */ + spin_lock(&kvm->mmu_lock); + if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) { while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages && !list_empty(&kvm->arch.active_mmu_pages)) { @@ -2157,6 +2159,8 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages) } kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages; + + spin_unlock(&kvm->mmu_lock); } int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index add5e48..080bbdc 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3270,12 +3270,10 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, return -EINVAL; mutex_lock(&kvm->slots_lock); - spin_lock(&kvm->mmu_lock); kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; - spin_unlock(&kvm->mmu_lock); mutex_unlock(&kvm->slots_lock); return 0; } @@ -6894,7 +6892,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, if (!kvm->arch.n_requested_mmu_pages) nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); - spin_lock(&kvm->mmu_lock); if (nr_mmu_pages) kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); /* @@ -6902,9 +6899,11 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, * Existing largepage mappings are destroyed here and new ones will * not be created until the end of the logging. */ - if (npages && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES)) + if (npages && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES)) { + spin_lock(&kvm->mmu_lock); kvm_mmu_slot_remove_write_access(kvm, mem->slot); - spin_unlock(&kvm->mmu_lock); + spin_unlock(&kvm->mmu_lock); + } /* * If memory slot is created, or moved, we need to clear all * mmio sptes. -- cgit v0.10.2 From 9d1beefb71146bbf5f820ab17c450808b0d0b2df Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Tue, 8 Jan 2013 19:46:48 +0900 Subject: KVM: Make kvm_mmu_slot_remove_write_access() take mmu_lock by itself Better to place mmu_lock handling and TLB flushing code together since this is a self-contained function. Reviewed-by: Marcelo Tosatti Signed-off-by: Takuya Yoshikawa Signed-off-by: Gleb Natapov diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index f557280..e5dcae3 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -4173,6 +4173,8 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) memslot = id_to_memslot(kvm->memslots, slot); last_gfn = memslot->base_gfn + memslot->npages - 1; + spin_lock(&kvm->mmu_lock); + for (i = PT_PAGE_TABLE_LEVEL; i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { unsigned long *rmapp; @@ -4188,6 +4190,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) } kvm_flush_remote_tlbs(kvm); + spin_unlock(&kvm->mmu_lock); } void kvm_mmu_zap_all(struct kvm *kvm) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 080bbdc..5483228 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6899,11 +6899,8 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, * Existing largepage mappings are destroyed here and new ones will * not be created until the end of the logging. */ - if (npages && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES)) { - spin_lock(&kvm->mmu_lock); + if (npages && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES)) kvm_mmu_slot_remove_write_access(kvm, mem->slot); - spin_unlock(&kvm->mmu_lock); - } /* * If memory slot is created, or moved, we need to clear all * mmio sptes. -- cgit v0.10.2 From 6b81b05e449e15abb60eaa4f62cdc7954f4d74f0 Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Tue, 8 Jan 2013 19:47:33 +0900 Subject: KVM: MMU: Conditionally reschedule when kvm_mmu_slot_remove_write_access() takes a long time If the userspace starts dirty logging for a large slot, say 64GB of memory, kvm_mmu_slot_remove_write_access() needs to hold mmu_lock for a long time such as tens of milliseconds. This patch controls the lock hold time by asking the scheduler if we need to reschedule for others. One penalty for this is that we need to flush TLBs before releasing mmu_lock. But since holding mmu_lock for a long time does affect not only the guest, vCPU threads in other words, but also the host as a whole, we should pay for that. In practice, the cost will not be so high because we can protect a fair amount of memory before being rescheduled: on my test environment, cond_resched_lock() was called only once for protecting 12GB of memory even without THP. We can also revisit Avi's "unlocked TLB flush" work later for completely suppressing extra TLB flushes if needed. Reviewed-by: Marcelo Tosatti Signed-off-by: Takuya Yoshikawa Signed-off-by: Gleb Natapov diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index e5dcae3..9f628f7 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -4186,6 +4186,11 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) for (index = 0; index <= last_index; ++index, ++rmapp) { if (*rmapp) __rmap_write_protect(kvm, rmapp, false); + + if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { + kvm_flush_remote_tlbs(kvm); + cond_resched_lock(&kvm->mmu_lock); + } } } -- cgit v0.10.2 From a046b816a4587c7898772af7cc7e6798cc8b56dd Mon Sep 17 00:00:00 2001 From: Cong Ding Date: Tue, 15 Jan 2013 11:17:29 +0100 Subject: KVM: s390: kvm/sigp.c: fix memory leakage the variable inti should be freed in the branch CPUSTAT_STOPPED. Signed-off-by: Cong Ding Signed-off-by: Cornelia Huck Signed-off-by: Gleb Natapov diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 461e841..1c48ab2 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -137,8 +137,10 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action) inti->type = KVM_S390_SIGP_STOP; spin_lock_bh(&li->lock); - if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) + if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { + kfree(inti); goto out; + } list_add_tail(&inti->list, &li->list); atomic_set(&li->active, 1); atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); -- cgit v0.10.2 From 0ea75e1d26dd4ccbbfe4d1a5931903b15b82ba2a Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Fri, 11 Jan 2013 18:26:10 +0900 Subject: KVM: set_memory_region: Don't jump to out_free unnecessarily This makes the separation between the sanity checks and the rest of the code a bit clearer. Reviewed-by: Marcelo Tosatti Signed-off-by: Takuya Yoshikawa Signed-off-by: Gleb Natapov diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index f689a6d..28eb160 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -778,9 +778,9 @@ int __kvm_set_memory_region(struct kvm *kvm, */ r = -EINVAL; if (npages && old.npages && npages != old.npages) - goto out_free; + goto out; if (!npages && !old.npages) - goto out_free; + goto out; /* Check for overlaps */ r = -EEXIST; @@ -789,7 +789,7 @@ int __kvm_set_memory_region(struct kvm *kvm, continue; if (!((base_gfn + npages <= slot->base_gfn) || (base_gfn >= slot->base_gfn + slot->npages))) - goto out_free; + goto out; } /* Free page dirty bitmap if unneeded */ @@ -890,7 +890,6 @@ out_free: kvm_free_physmem_slot(&new, &old); out: return r; - } EXPORT_SYMBOL_GPL(__kvm_set_memory_region); -- cgit v0.10.2 From 0a706beefb1de71bf8551f9860dc529ef14d6753 Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Fri, 11 Jan 2013 18:26:55 +0900 Subject: KVM: set_memory_region: Don't check for overlaps unless we create or move a slot Don't need the check for deleting an existing slot or just modifiying the flags. Reviewed-by: Marcelo Tosatti Signed-off-by: Takuya Yoshikawa Signed-off-by: Gleb Natapov diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 28eb160..fca6487 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -782,14 +782,16 @@ int __kvm_set_memory_region(struct kvm *kvm, if (!npages && !old.npages) goto out; - /* Check for overlaps */ - r = -EEXIST; - kvm_for_each_memslot(slot, kvm->memslots) { - if (slot->id >= KVM_USER_MEM_SLOTS || slot == memslot) - continue; - if (!((base_gfn + npages <= slot->base_gfn) || - (base_gfn >= slot->base_gfn + slot->npages))) - goto out; + if ((npages && !old.npages) || (base_gfn != old.base_gfn)) { + /* Check for overlaps */ + r = -EEXIST; + kvm_for_each_memslot(slot, kvm->memslots) { + if (slot->id >= KVM_USER_MEM_SLOTS || slot == memslot) + continue; + if (!((base_gfn + npages <= slot->base_gfn) || + (base_gfn >= slot->base_gfn + slot->npages))) + goto out; + } } /* Free page dirty bitmap if unneeded */ -- cgit v0.10.2 From a843fac2536d6d81335d7011dd7ec4e438161dd7 Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Fri, 11 Jan 2013 18:27:43 +0900 Subject: KVM: set_memory_region: Remove unnecessary variable memslot One such variable, slot, is enough for holding a pointer temporarily. We also remove another local variable named slot, which is limited in a block, since it is confusing to have the same name in this function. Reviewed-by: Marcelo Tosatti Signed-off-by: Takuya Yoshikawa Signed-off-by: Gleb Natapov diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index fca6487..5e709eb 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -728,7 +728,7 @@ int __kvm_set_memory_region(struct kvm *kvm, int r; gfn_t base_gfn; unsigned long npages; - struct kvm_memory_slot *memslot, *slot; + struct kvm_memory_slot *slot; struct kvm_memory_slot old, new; struct kvm_memslots *slots = NULL, *old_memslots; @@ -754,7 +754,7 @@ int __kvm_set_memory_region(struct kvm *kvm, if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) goto out; - memslot = id_to_memslot(kvm->memslots, mem->slot); + slot = id_to_memslot(kvm->memslots, mem->slot); base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; npages = mem->memory_size >> PAGE_SHIFT; @@ -765,7 +765,7 @@ int __kvm_set_memory_region(struct kvm *kvm, if (!npages) mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES; - new = old = *memslot; + new = old = *slot; new.id = mem->slot; new.base_gfn = base_gfn; @@ -786,7 +786,8 @@ int __kvm_set_memory_region(struct kvm *kvm, /* Check for overlaps */ r = -EEXIST; kvm_for_each_memslot(slot, kvm->memslots) { - if (slot->id >= KVM_USER_MEM_SLOTS || slot == memslot) + if ((slot->id >= KVM_USER_MEM_SLOTS) || + (slot->id == mem->slot)) continue; if (!((base_gfn + npages <= slot->base_gfn) || (base_gfn >= slot->base_gfn + slot->npages))) @@ -822,8 +823,6 @@ int __kvm_set_memory_region(struct kvm *kvm, } if (!npages || base_gfn != old.base_gfn) { - struct kvm_memory_slot *slot; - r = -ENOMEM; slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots), GFP_KERNEL); -- cgit v0.10.2 From 22368028febf81f5c6d1e9fdd737d50543219b00 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Sun, 13 Jan 2013 23:44:12 +0800 Subject: KVM: x86: clean up reexecute_instruction Little cleanup for reexecute_instruction, also use gpa_to_gfn in retry_instruction Reviewed-by: Gleb Natapov Signed-off-by: Xiao Guangrong Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 5483228..f0288c5 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4759,19 +4759,18 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva) if (tdp_enabled) return false; + gpa = kvm_mmu_gva_to_gpa_system(vcpu, gva, NULL); + if (gpa == UNMAPPED_GVA) + return true; /* let cpu generate fault */ + /* * if emulation was due to access to shadowed page table * and it failed try to unshadow page and re-enter the * guest to let CPU execute the instruction. */ - if (kvm_mmu_unprotect_page_virt(vcpu, gva)) + if (kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa))) return true; - gpa = kvm_mmu_gva_to_gpa_system(vcpu, gva, NULL); - - if (gpa == UNMAPPED_GVA) - return true; /* let cpu generate fault */ - /* * Do not retry the unhandleable instruction if it faults on the * readonly host memory, otherwise it will goto a infinite loop: @@ -4826,7 +4825,7 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt, if (!vcpu->arch.mmu.direct_map) gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL); - kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); + kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); return true; } -- cgit v0.10.2 From 95b3cf69bdf8b27a02d878e24ca353cebb4e009e Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Sun, 13 Jan 2013 23:46:52 +0800 Subject: KVM: x86: let reexecute_instruction work for tdp Currently, reexecute_instruction refused to retry all instructions if tdp is enabled. If nested npt is used, the emulation may be caused by shadow page, it can be fixed by dropping the shadow page. And the only condition that tdp can not retry the instruction is the access fault on error pfn Reviewed-by: Gleb Natapov Signed-off-by: Xiao Guangrong Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f0288c5..6f9cab0 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4751,25 +4751,25 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu) return r; } -static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva) +static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2) { - gpa_t gpa; + gpa_t gpa = cr2; pfn_t pfn; - if (tdp_enabled) - return false; - - gpa = kvm_mmu_gva_to_gpa_system(vcpu, gva, NULL); - if (gpa == UNMAPPED_GVA) - return true; /* let cpu generate fault */ + if (!vcpu->arch.mmu.direct_map) { + /* + * Write permission should be allowed since only + * write access need to be emulated. + */ + gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL); - /* - * if emulation was due to access to shadowed page table - * and it failed try to unshadow page and re-enter the - * guest to let CPU execute the instruction. - */ - if (kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa))) - return true; + /* + * If the mapping is invalid in guest, let cpu retry + * it to generate fault. + */ + if (gpa == UNMAPPED_GVA) + return true; + } /* * Do not retry the unhandleable instruction if it faults on the @@ -4778,12 +4778,37 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva) * instruction -> ... */ pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa)); - if (!is_error_noslot_pfn(pfn)) { - kvm_release_pfn_clean(pfn); + + /* + * If the instruction failed on the error pfn, it can not be fixed, + * report the error to userspace. + */ + if (is_error_noslot_pfn(pfn)) + return false; + + kvm_release_pfn_clean(pfn); + + /* The instructions are well-emulated on direct mmu. */ + if (vcpu->arch.mmu.direct_map) { + unsigned int indirect_shadow_pages; + + spin_lock(&vcpu->kvm->mmu_lock); + indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages; + spin_unlock(&vcpu->kvm->mmu_lock); + + if (indirect_shadow_pages) + kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); + return true; } - return false; + /* + * if emulation was due to access to shadowed page table + * and it failed try to unshadow page and re-enter the + * guest to let CPU execute the instruction. + */ + kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); + return true; } static bool retry_instruction(struct x86_emulate_ctxt *ctxt, -- cgit v0.10.2 From 93c05d3ef25275829d421a255271595ac219a518 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Sun, 13 Jan 2013 23:49:07 +0800 Subject: KVM: x86: improve reexecute_instruction The current reexecute_instruction can not well detect the failed instruction emulation. It allows guest to retry all the instructions except it accesses on error pfn For example, some cases are nested-write-protect - if the page we want to write is used as PDE but it chains to itself. Under this case, we should stop the emulation and report the case to userspace Reviewed-by: Gleb Natapov Signed-off-by: Xiao Guangrong Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index f75e1fe..77d56a4 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -497,6 +497,13 @@ struct kvm_vcpu_arch { u64 msr_val; struct gfn_to_hva_cache data; } pv_eoi; + + /* + * Indicate whether the access faults on its page table in guest + * which is set when fix page fault and used to detect unhandeable + * instruction. + */ + bool write_fault_to_shadow_pgtable; }; struct kvm_lpage_info { diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 3d1a352..ca69dcc 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -497,26 +497,34 @@ out_gpte_changed: * created when kvm establishes shadow page table that stop kvm using large * page size. Do it early can avoid unnecessary #PF and emulation. * + * @write_fault_to_shadow_pgtable will return true if the fault gfn is + * currently used as its page table. + * * Note: the PDPT page table is not checked for PAE-32 bit guest. It is ok * since the PDPT is always shadowed, that means, we can not use large page * size to map the gfn which is used as PDPT. */ static bool FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu, - struct guest_walker *walker, int user_fault) + struct guest_walker *walker, int user_fault, + bool *write_fault_to_shadow_pgtable) { int level; gfn_t mask = ~(KVM_PAGES_PER_HPAGE(walker->level) - 1); + bool self_changed = false; if (!(walker->pte_access & ACC_WRITE_MASK || (!is_write_protection(vcpu) && !user_fault))) return false; - for (level = walker->level; level <= walker->max_level; level++) - if (!((walker->gfn ^ walker->table_gfn[level - 1]) & mask)) - return true; + for (level = walker->level; level <= walker->max_level; level++) { + gfn_t gfn = walker->gfn ^ walker->table_gfn[level - 1]; + + self_changed |= !(gfn & mask); + *write_fault_to_shadow_pgtable |= !gfn; + } - return false; + return self_changed; } /* @@ -544,7 +552,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, int level = PT_PAGE_TABLE_LEVEL; int force_pt_level; unsigned long mmu_seq; - bool map_writable; + bool map_writable, is_self_change_mapping; pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code); @@ -572,9 +580,14 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, return 0; } + vcpu->arch.write_fault_to_shadow_pgtable = false; + + is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu, + &walker, user_fault, &vcpu->arch.write_fault_to_shadow_pgtable); + if (walker.level >= PT_DIRECTORY_LEVEL) force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn) - || FNAME(is_self_change_mapping)(vcpu, &walker, user_fault); + || is_self_change_mapping; else force_pt_level = 1; if (!force_pt_level) { diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6f9cab0..e00dd05 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4751,7 +4751,8 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu) return r; } -static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2) +static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2, + bool write_fault_to_shadow_pgtable) { gpa_t gpa = cr2; pfn_t pfn; @@ -4808,7 +4809,13 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2) * guest to let CPU execute the instruction. */ kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); - return true; + + /* + * If the access faults on its page table, it can not + * be fixed by unprotecting shadow page and it should + * be reported to userspace. + */ + return !write_fault_to_shadow_pgtable; } static bool retry_instruction(struct x86_emulate_ctxt *ctxt, @@ -4867,7 +4874,13 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, int r; struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; bool writeback = true; + bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable; + /* + * Clear write_fault_to_shadow_pgtable here to ensure it is + * never reused. + */ + vcpu->arch.write_fault_to_shadow_pgtable = false; kvm_clear_exception_queue(vcpu); if (!(emulation_type & EMULTYPE_NO_DECODE)) { @@ -4886,7 +4899,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, if (r != EMULATION_OK) { if (emulation_type & EMULTYPE_TRAP_UD) return EMULATE_FAIL; - if (reexecute_instruction(vcpu, cr2)) + if (reexecute_instruction(vcpu, cr2, + write_fault_to_spt)) return EMULATE_DONE; if (emulation_type & EMULTYPE_SKIP) return EMULATE_FAIL; @@ -4916,7 +4930,7 @@ restart: return EMULATE_DONE; if (r == EMULATION_FAILED) { - if (reexecute_instruction(vcpu, cr2)) + if (reexecute_instruction(vcpu, cr2, write_fault_to_spt)) return EMULATE_DONE; return handle_emulation_failure(vcpu); -- cgit v0.10.2 From 0bdea06892e33afddbdc5da6df305e9fe9c41365 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sat, 19 Jan 2013 19:51:50 +0200 Subject: KVM: x86 emulator: Convert SHLD, SHRD to fastop Reviewed-by: Gleb Natapov Signed-off-by: Avi Kivity Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 619a33d..a21773f 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -454,6 +454,8 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt) #define FOP_END \ ".popsection") +#define FOPNOP() FOP_ALIGN FOP_RET + #define FOP1E(op, dst) \ FOP_ALIGN #op " %" #dst " \n\t" FOP_RET @@ -476,6 +478,18 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt) ON64(FOP2E(op##q, rax, rbx)) \ FOP_END +#define FOP3E(op, dst, src, src2) \ + FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET + +/* 3-operand, word-only, src2=cl */ +#define FASTOP3WCL(op) \ + FOP_START(op) \ + FOPNOP() \ + FOP3E(op##w, ax, bx, cl) \ + FOP3E(op##l, eax, ebx, cl) \ + ON64(FOP3E(op##q, rax, rbx, cl)) \ + FOP_END + #define __emulate_1op_rax_rdx(ctxt, _op, _suffix, _ex) \ do { \ unsigned long _tmp; \ @@ -3036,6 +3050,9 @@ FASTOP2(xor); FASTOP2(cmp); FASTOP2(test); +FASTOP3WCL(shld); +FASTOP3WCL(shrd); + static int em_xchg(struct x86_emulate_ctxt *ctxt) { /* Write back the register source. */ @@ -4015,14 +4032,14 @@ static const struct opcode twobyte_table[256] = { /* 0xA0 - 0xA7 */ I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg), II(ImplicitOps, em_cpuid, cpuid), I(DstMem | SrcReg | ModRM | BitOp, em_bt), - D(DstMem | SrcReg | Src2ImmByte | ModRM), - D(DstMem | SrcReg | Src2CL | ModRM), N, N, + F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld), + F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N, /* 0xA8 - 0xAF */ I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg), DI(ImplicitOps, rsm), I(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts), - D(DstMem | SrcReg | Src2ImmByte | ModRM), - D(DstMem | SrcReg | Src2CL | ModRM), + F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd), + F(DstMem | SrcReg | Src2CL | ModRM, em_shrd), D(ModRM), I(DstReg | SrcMem | ModRM, em_imul), /* 0xB0 - 0xB7 */ I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg), @@ -4834,14 +4851,6 @@ twobyte_insn: case 0x90 ... 0x9f: /* setcc r/m8 */ ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags); break; - case 0xa4: /* shld imm8, r, r/m */ - case 0xa5: /* shld cl, r, r/m */ - emulate_2op_cl(ctxt, "shld"); - break; - case 0xac: /* shrd imm8, r, r/m */ - case 0xad: /* shrd cl, r, r/m */ - emulate_2op_cl(ctxt, "shrd"); - break; case 0xae: /* clflush */ break; case 0xb6 ... 0xb7: /* movzx */ -- cgit v0.10.2 From 007a3b547512d69f67ceb9641796d64552bd337e Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sat, 19 Jan 2013 19:51:51 +0200 Subject: KVM: x86 emulator: convert shift/rotate instructions to fastop SHL, SHR, ROL, ROR, RCL, RCR, SAR, SAL Reviewed-by: Gleb Natapov Signed-off-by: Avi Kivity Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index a21773f..a94b1d7 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -478,6 +478,15 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt) ON64(FOP2E(op##q, rax, rbx)) \ FOP_END +/* 2 operand, src is CL */ +#define FASTOP2CL(op) \ + FOP_START(op) \ + FOP2E(op##b, al, cl) \ + FOP2E(op##w, ax, cl) \ + FOP2E(op##l, eax, cl) \ + ON64(FOP2E(op##q, rax, cl)) \ + FOP_END + #define FOP3E(op, dst, src, src2) \ FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET @@ -2046,38 +2055,17 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt) return X86EMUL_CONTINUE; } -static int em_grp2(struct x86_emulate_ctxt *ctxt) -{ - switch (ctxt->modrm_reg) { - case 0: /* rol */ - emulate_2op_SrcB(ctxt, "rol"); - break; - case 1: /* ror */ - emulate_2op_SrcB(ctxt, "ror"); - break; - case 2: /* rcl */ - emulate_2op_SrcB(ctxt, "rcl"); - break; - case 3: /* rcr */ - emulate_2op_SrcB(ctxt, "rcr"); - break; - case 4: /* sal/shl */ - case 6: /* sal/shl */ - emulate_2op_SrcB(ctxt, "sal"); - break; - case 5: /* shr */ - emulate_2op_SrcB(ctxt, "shr"); - break; - case 7: /* sar */ - emulate_2op_SrcB(ctxt, "sar"); - break; - } - return X86EMUL_CONTINUE; -} - FASTOP1(not); FASTOP1(neg); +FASTOP2CL(rol); +FASTOP2CL(ror); +FASTOP2CL(rcl); +FASTOP2CL(rcr); +FASTOP2CL(shl); +FASTOP2CL(shr); +FASTOP2CL(sar); + static int em_mul_ex(struct x86_emulate_ctxt *ctxt) { u8 ex = 0; @@ -3726,6 +3714,17 @@ static const struct opcode group1A[] = { I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N, }; +static const struct opcode group2[] = { + F(DstMem | ModRM, em_rol), + F(DstMem | ModRM, em_ror), + F(DstMem | ModRM, em_rcl), + F(DstMem | ModRM, em_rcr), + F(DstMem | ModRM, em_shl), + F(DstMem | ModRM, em_shr), + F(DstMem | ModRM, em_shl), + F(DstMem | ModRM, em_sar), +}; + static const struct opcode group3[] = { F(DstMem | SrcImm | NoWrite, em_test), F(DstMem | SrcImm | NoWrite, em_test), @@ -3949,7 +3948,7 @@ static const struct opcode opcode_table[256] = { /* 0xB8 - 0xBF */ X8(I(DstReg | SrcImm64 | Mov, em_mov)), /* 0xC0 - 0xC7 */ - D2bv(DstMem | SrcImmByte | ModRM), + G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2), I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm), I(ImplicitOps | Stack, em_ret), I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg), @@ -3961,7 +3960,8 @@ static const struct opcode opcode_table[256] = { D(ImplicitOps), DI(SrcImmByte, intn), D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret), /* 0xD0 - 0xD7 */ - D2bv(DstMem | SrcOne | ModRM), D2bv(DstMem | ModRM), + G(Src2One | ByteOp, group2), G(Src2One, group2), + G(Src2CL | ByteOp, group2), G(Src2CL, group2), N, I(DstAcc | SrcImmByte | No64, em_aad), N, N, /* 0xD8 - 0xDF */ N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N, @@ -4713,9 +4713,6 @@ special_insn: case 8: ctxt->dst.val = (s32)ctxt->dst.val; break; } break; - case 0xc0 ... 0xc1: - rc = em_grp2(ctxt); - break; case 0xcc: /* int3 */ rc = emulate_int(ctxt, 3); break; @@ -4726,13 +4723,6 @@ special_insn: if (ctxt->eflags & EFLG_OF) rc = emulate_int(ctxt, 4); break; - case 0xd0 ... 0xd1: /* Grp2 */ - rc = em_grp2(ctxt); - break; - case 0xd2 ... 0xd3: /* Grp2 */ - ctxt->src.val = reg_read(ctxt, VCPU_REGS_RCX); - rc = em_grp2(ctxt); - break; case 0xe9: /* jmp rel */ case 0xeb: /* jmp rel short */ jmp_rel(ctxt, ctxt->src.val); -- cgit v0.10.2 From 9ae9febae9500a0a6f5ce29ee4b8d942b5332529 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sat, 19 Jan 2013 19:51:52 +0200 Subject: KVM: x86 emulator: covert SETCC to fastop This is a bit of a special case since we don't have the usual byte/word/long/quad switch; instead we switch on the condition code embedded in the instruction. Reviewed-by: Gleb Natapov Signed-off-by: Avi Kivity Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index a94b1d7..e13138d 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -499,6 +499,28 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt) ON64(FOP3E(op##q, rax, rbx, cl)) \ FOP_END +/* Special case for SETcc - 1 instruction per cc */ +#define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t" + +FOP_START(setcc) +FOP_SETCC(seto) +FOP_SETCC(setno) +FOP_SETCC(setc) +FOP_SETCC(setnc) +FOP_SETCC(setz) +FOP_SETCC(setnz) +FOP_SETCC(setbe) +FOP_SETCC(setnbe) +FOP_SETCC(sets) +FOP_SETCC(setns) +FOP_SETCC(setp) +FOP_SETCC(setnp) +FOP_SETCC(setl) +FOP_SETCC(setnl) +FOP_SETCC(setle) +FOP_SETCC(setnle) +FOP_END; + #define __emulate_1op_rax_rdx(ctxt, _op, _suffix, _ex) \ do { \ unsigned long _tmp; \ @@ -939,39 +961,15 @@ static int read_descriptor(struct x86_emulate_ctxt *ctxt, return rc; } -static int test_cc(unsigned int condition, unsigned int flags) +static u8 test_cc(unsigned int condition, unsigned long flags) { - int rc = 0; + u8 rc; + void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf); - switch ((condition & 15) >> 1) { - case 0: /* o */ - rc |= (flags & EFLG_OF); - break; - case 1: /* b/c/nae */ - rc |= (flags & EFLG_CF); - break; - case 2: /* z/e */ - rc |= (flags & EFLG_ZF); - break; - case 3: /* be/na */ - rc |= (flags & (EFLG_CF|EFLG_ZF)); - break; - case 4: /* s */ - rc |= (flags & EFLG_SF); - break; - case 5: /* p/pe */ - rc |= (flags & EFLG_PF); - break; - case 7: /* le/ng */ - rc |= (flags & EFLG_ZF); - /* fall through */ - case 6: /* l/nge */ - rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF)); - break; - } - - /* Odd condition identifiers (lsb == 1) have inverted sense. */ - return (!!rc ^ (condition & 1)); + flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF; + asm("pushq %[flags]; popf; call *%[fastop]" + : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags)); + return rc; } static void fetch_register_operand(struct operand *op) -- cgit v0.10.2 From 95413dc41398fec2518abf4e0449503b1306dcbc Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sat, 19 Jan 2013 19:51:53 +0200 Subject: KVM: x86 emulator: convert INC/DEC to fastop Reviewed-by: Gleb Natapov Signed-off-by: Avi Kivity Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index e13138d..edb09e9 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -2055,6 +2055,8 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt) FASTOP1(not); FASTOP1(neg); +FASTOP1(inc); +FASTOP1(dec); FASTOP2CL(rol); FASTOP2CL(ror); @@ -2105,12 +2107,6 @@ static int em_grp45(struct x86_emulate_ctxt *ctxt) int rc = X86EMUL_CONTINUE; switch (ctxt->modrm_reg) { - case 0: /* inc */ - emulate_1op(ctxt, "inc"); - break; - case 1: /* dec */ - emulate_1op(ctxt, "dec"); - break; case 2: /* call near abs */ { long int old_eip; old_eip = ctxt->_eip; @@ -3735,14 +3731,14 @@ static const struct opcode group3[] = { }; static const struct opcode group4[] = { - I(ByteOp | DstMem | SrcNone | Lock, em_grp45), - I(ByteOp | DstMem | SrcNone | Lock, em_grp45), + F(ByteOp | DstMem | SrcNone | Lock, em_inc), + F(ByteOp | DstMem | SrcNone | Lock, em_dec), N, N, N, N, N, N, }; static const struct opcode group5[] = { - I(DstMem | SrcNone | Lock, em_grp45), - I(DstMem | SrcNone | Lock, em_grp45), + F(DstMem | SrcNone | Lock, em_inc), + F(DstMem | SrcNone | Lock, em_dec), I(SrcMem | Stack, em_grp45), I(SrcMemFAddr | ImplicitOps | Stack, em_call_far), I(SrcMem | Stack, em_grp45), @@ -3891,7 +3887,7 @@ static const struct opcode opcode_table[256] = { /* 0x38 - 0x3F */ F6ALU(NoWrite, em_cmp), N, N, /* 0x40 - 0x4F */ - X16(D(DstReg)), + X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)), /* 0x50 - 0x57 */ X8(I(SrcReg | Stack, em_push)), /* 0x58 - 0x5F */ @@ -4681,12 +4677,6 @@ special_insn: goto twobyte_insn; switch (ctxt->b) { - case 0x40 ... 0x47: /* inc r16/r32 */ - emulate_1op(ctxt, "inc"); - break; - case 0x48 ... 0x4f: /* dec r16/r32 */ - emulate_1op(ctxt, "dec"); - break; case 0x63: /* movsxd */ if (ctxt->mode != X86EMUL_MODE_PROT64) goto cannot_emulate; -- cgit v0.10.2 From 11c363ba8f8eb163c275920b4a27697eb43da6e9 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sat, 19 Jan 2013 19:51:54 +0200 Subject: KVM: x86 emulator: convert BT/BTS/BTR/BTC/BSF/BSR to fastop Reviewed-by: Gleb Natapov Signed-off-by: Avi Kivity Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index edb09e9..62014dc 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -478,6 +478,15 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt) ON64(FOP2E(op##q, rax, rbx)) \ FOP_END +/* 2 operand, word only */ +#define FASTOP2W(op) \ + FOP_START(op) \ + FOPNOP() \ + FOP2E(op##w, ax, bx) \ + FOP2E(op##l, eax, ebx) \ + ON64(FOP2E(op##q, rax, rbx)) \ + FOP_END + /* 2 operand, src is CL */ #define FASTOP2CL(op) \ FOP_START(op) \ @@ -2066,6 +2075,13 @@ FASTOP2CL(shl); FASTOP2CL(shr); FASTOP2CL(sar); +FASTOP2W(bsf); +FASTOP2W(bsr); +FASTOP2W(bt); +FASTOP2W(bts); +FASTOP2W(btr); +FASTOP2W(btc); + static int em_mul_ex(struct x86_emulate_ctxt *ctxt) { u8 ex = 0; @@ -3377,47 +3393,6 @@ static int em_sti(struct x86_emulate_ctxt *ctxt) return X86EMUL_CONTINUE; } -static int em_bt(struct x86_emulate_ctxt *ctxt) -{ - /* Disable writeback. */ - ctxt->dst.type = OP_NONE; - /* only subword offset */ - ctxt->src.val &= (ctxt->dst.bytes << 3) - 1; - - emulate_2op_SrcV_nobyte(ctxt, "bt"); - return X86EMUL_CONTINUE; -} - -static int em_bts(struct x86_emulate_ctxt *ctxt) -{ - emulate_2op_SrcV_nobyte(ctxt, "bts"); - return X86EMUL_CONTINUE; -} - -static int em_btr(struct x86_emulate_ctxt *ctxt) -{ - emulate_2op_SrcV_nobyte(ctxt, "btr"); - return X86EMUL_CONTINUE; -} - -static int em_btc(struct x86_emulate_ctxt *ctxt) -{ - emulate_2op_SrcV_nobyte(ctxt, "btc"); - return X86EMUL_CONTINUE; -} - -static int em_bsf(struct x86_emulate_ctxt *ctxt) -{ - emulate_2op_SrcV_nobyte(ctxt, "bsf"); - return X86EMUL_CONTINUE; -} - -static int em_bsr(struct x86_emulate_ctxt *ctxt) -{ - emulate_2op_SrcV_nobyte(ctxt, "bsr"); - return X86EMUL_CONTINUE; -} - static int em_cpuid(struct x86_emulate_ctxt *ctxt) { u32 eax, ebx, ecx, edx; @@ -3773,10 +3748,10 @@ static const struct group_dual group7 = { { static const struct opcode group8[] = { N, N, N, N, - I(DstMem | SrcImmByte, em_bt), - I(DstMem | SrcImmByte | Lock | PageTable, em_bts), - I(DstMem | SrcImmByte | Lock, em_btr), - I(DstMem | SrcImmByte | Lock | PageTable, em_btc), + F(DstMem | SrcImmByte | NoWrite, em_bt), + F(DstMem | SrcImmByte | Lock | PageTable, em_bts), + F(DstMem | SrcImmByte | Lock, em_btr), + F(DstMem | SrcImmByte | Lock | PageTable, em_btc), }; static const struct group_dual group9 = { { @@ -4025,28 +4000,29 @@ static const struct opcode twobyte_table[256] = { X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)), /* 0xA0 - 0xA7 */ I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg), - II(ImplicitOps, em_cpuid, cpuid), I(DstMem | SrcReg | ModRM | BitOp, em_bt), + II(ImplicitOps, em_cpuid, cpuid), + F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt), F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld), F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N, /* 0xA8 - 0xAF */ I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg), DI(ImplicitOps, rsm), - I(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts), + F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts), F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd), F(DstMem | SrcReg | Src2CL | ModRM, em_shrd), D(ModRM), I(DstReg | SrcMem | ModRM, em_imul), /* 0xB0 - 0xB7 */ I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg), I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg), - I(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr), + F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr), I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg), I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg), D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov), /* 0xB8 - 0xBF */ N, N, G(BitOp, group8), - I(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc), - I(DstReg | SrcMem | ModRM, em_bsf), I(DstReg | SrcMem | ModRM, em_bsr), + F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc), + F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr), D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov), /* 0xC0 - 0xC7 */ D2bv(DstMem | SrcReg | ModRM | Lock), -- cgit v0.10.2 From 4d7583493e1777f42cc0fda9573d312e4753aa3c Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sat, 19 Jan 2013 19:51:55 +0200 Subject: KVM: x86 emulator: convert 2-operand IMUL to fastop Reviewed-by: Gleb Natapov Signed-off-by: Avi Kivity Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 62014dc..45ddec8 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -441,6 +441,8 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt) } \ } while (0) +static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)); + #define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t" #define FOP_RET "ret \n\t" @@ -3051,6 +3053,8 @@ FASTOP2(test); FASTOP3WCL(shld); FASTOP3WCL(shrd); +FASTOP2W(imul); + static int em_xchg(struct x86_emulate_ctxt *ctxt) { /* Write back the register source. */ @@ -3063,16 +3067,10 @@ static int em_xchg(struct x86_emulate_ctxt *ctxt) return X86EMUL_CONTINUE; } -static int em_imul(struct x86_emulate_ctxt *ctxt) -{ - emulate_2op_SrcV_nobyte(ctxt, "imul"); - return X86EMUL_CONTINUE; -} - static int em_imul_3op(struct x86_emulate_ctxt *ctxt) { ctxt->dst.val = ctxt->src2.val; - return em_imul(ctxt); + return fastop(ctxt, em_imul); } static int em_cwd(struct x86_emulate_ctxt *ctxt) @@ -4010,7 +4008,7 @@ static const struct opcode twobyte_table[256] = { F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts), F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd), F(DstMem | SrcReg | Src2CL | ModRM, em_shrd), - D(ModRM), I(DstReg | SrcMem | ModRM, em_imul), + D(ModRM), F(DstReg | SrcMem | ModRM, em_imul), /* 0xB0 - 0xB7 */ I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg), I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg), -- cgit v0.10.2 From 34b77652b9e98b5796b3a69df600e1717572e51d Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sat, 19 Jan 2013 19:51:56 +0200 Subject: KVM: x86 emulator: rearrange fastop definitions Make fastop opcodes usable in other emulations. Reviewed-by: Gleb Natapov Signed-off-by: Avi Kivity Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 45ddec8..d06354d 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -972,6 +972,41 @@ static int read_descriptor(struct x86_emulate_ctxt *ctxt, return rc; } +FASTOP2(add); +FASTOP2(or); +FASTOP2(adc); +FASTOP2(sbb); +FASTOP2(and); +FASTOP2(sub); +FASTOP2(xor); +FASTOP2(cmp); +FASTOP2(test); + +FASTOP3WCL(shld); +FASTOP3WCL(shrd); + +FASTOP2W(imul); + +FASTOP1(not); +FASTOP1(neg); +FASTOP1(inc); +FASTOP1(dec); + +FASTOP2CL(rol); +FASTOP2CL(ror); +FASTOP2CL(rcl); +FASTOP2CL(rcr); +FASTOP2CL(shl); +FASTOP2CL(shr); +FASTOP2CL(sar); + +FASTOP2W(bsf); +FASTOP2W(bsr); +FASTOP2W(bt); +FASTOP2W(bts); +FASTOP2W(btr); +FASTOP2W(btc); + static u8 test_cc(unsigned int condition, unsigned long flags) { u8 rc; @@ -2064,26 +2099,6 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt) return X86EMUL_CONTINUE; } -FASTOP1(not); -FASTOP1(neg); -FASTOP1(inc); -FASTOP1(dec); - -FASTOP2CL(rol); -FASTOP2CL(ror); -FASTOP2CL(rcl); -FASTOP2CL(rcr); -FASTOP2CL(shl); -FASTOP2CL(shr); -FASTOP2CL(sar); - -FASTOP2W(bsf); -FASTOP2W(bsr); -FASTOP2W(bt); -FASTOP2W(bts); -FASTOP2W(btr); -FASTOP2W(btc); - static int em_mul_ex(struct x86_emulate_ctxt *ctxt) { u8 ex = 0; @@ -3040,21 +3055,6 @@ static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt) return X86EMUL_CONTINUE; } -FASTOP2(add); -FASTOP2(or); -FASTOP2(adc); -FASTOP2(sbb); -FASTOP2(and); -FASTOP2(sub); -FASTOP2(xor); -FASTOP2(cmp); -FASTOP2(test); - -FASTOP3WCL(shld); -FASTOP3WCL(shrd); - -FASTOP2W(imul); - static int em_xchg(struct x86_emulate_ctxt *ctxt) { /* Write back the register source. */ -- cgit v0.10.2 From 158de57f905ed97ea0f993feac1c40a40f5c7a04 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sat, 19 Jan 2013 19:51:57 +0200 Subject: KVM: x86 emulator: convert a few freestanding emulations to fastop Reviewed-by: Gleb Natapov Signed-off-by: Avi Kivity Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index d06354d..e99fb72 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -2209,7 +2209,7 @@ static int em_cmpxchg(struct x86_emulate_ctxt *ctxt) /* Save real source value, then compare EAX against destination. */ ctxt->src.orig_val = ctxt->src.val; ctxt->src.val = reg_read(ctxt, VCPU_REGS_RAX); - emulate_2op_SrcV(ctxt, "cmp"); + fastop(ctxt, em_cmp); if (ctxt->eflags & EFLG_ZF) { /* Success: write back to memory. */ @@ -2977,7 +2977,7 @@ static int em_das(struct x86_emulate_ctxt *ctxt) ctxt->src.type = OP_IMM; ctxt->src.val = 0; ctxt->src.bytes = 1; - emulate_2op_SrcV(ctxt, "or"); + fastop(ctxt, em_or); ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF); if (cf) ctxt->eflags |= X86_EFLAGS_CF; @@ -4816,7 +4816,7 @@ twobyte_insn: (s16) ctxt->src.val; break; case 0xc0 ... 0xc1: /* xadd */ - emulate_2op_SrcV(ctxt, "add"); + fastop(ctxt, em_add); /* Write back the register source. */ ctxt->src.val = ctxt->dst.orig_val; write_register_operand(&ctxt->src); -- cgit v0.10.2 From 1f3141e80b149e7215313dff29e9a0c47811b1d1 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Mon, 21 Jan 2013 15:36:41 +0200 Subject: KVM: VMX: remove special CPL cache access during transition to real mode. Since vmx_get_cpl() always returns 0 when VCPU is in real mode it is no longer needed. Also reset CPL cache to zero during transaction to protected mode since transaction may happen while CS.selectors & 3 != 0, but in reality CPL is 0. Signed-off-by: Gleb Natapov Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index dd2a85c..9d2ec88 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2817,6 +2817,10 @@ static void enter_pmode(struct kvm_vcpu *vcpu) fix_pmode_dataseg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); fix_pmode_dataseg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); fix_pmode_dataseg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); + + /* CPL is always 0 when CPU enters protected mode */ + __set_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail); + vmx->cpl = 0; } static gva_t rmode_tss_base(struct kvm *kvm) @@ -3229,14 +3233,6 @@ static int vmx_get_cpl(struct kvm_vcpu *vcpu) && (kvm_get_rflags(vcpu) & X86_EFLAGS_VM)) /* if virtual 8086 */ return 3; - /* - * If we enter real mode with cs.sel & 3 != 0, the normal CPL calculations - * fail; use the cache instead. - */ - if (unlikely(vmx->emulation_required && emulate_invalid_guest_state)) { - return vmx->cpl; - } - if (!test_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail)) { __set_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail); vmx->cpl = vmx_read_guest_seg_selector(vmx, VCPU_SREG_CS) & 3; -- cgit v0.10.2 From 2f143240cb822c0d23ad591b89fe10e7c1f842f5 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Mon, 21 Jan 2013 15:36:42 +0200 Subject: KVM: VMX: reset CPL only on CS register write. Signed-off-by: Gleb Natapov Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 9d2ec88..edfbe94 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3269,7 +3269,8 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu, const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; vmx_segment_cache_clear(vmx); - __clear_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail); + if (seg == VCPU_SREG_CS) + __clear_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail); if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { vmx->rmode.segs[seg] = *var; -- cgit v0.10.2 From c5e97c80b5ddd6139bdadcbd44e263c2a3e7fae6 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Mon, 21 Jan 2013 15:36:43 +0200 Subject: KVM: VMX: if unrestricted guest is enabled vcpu state is always valid. Signed-off-by: Gleb Natapov Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index edfbe94..f942b20 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3488,6 +3488,9 @@ static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu) */ static bool guest_state_valid(struct kvm_vcpu *vcpu) { + if (enable_unrestricted_guest) + return true; + /* real mode guest state checks */ if (!is_protmode(vcpu)) { if (!rmode_segment_valid(vcpu, VCPU_SREG_CS)) -- cgit v0.10.2 From 286da4156dc65c8a054580fdd96b7709132dce8d Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Mon, 21 Jan 2013 15:36:44 +0200 Subject: KVM: VMX: remove hack that disables emulation on vcpu reset/init There is no reason for it. If state is suitable for vmentry it will be detected during guest entry and no emulation will happen. Signed-off-by: Gleb Natapov Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index f942b20..20409bd 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -4035,9 +4035,6 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) ret = 0; - /* HACK: Don't enable emulation on guest boot/reset */ - vmx->emulation_required = 0; - return ret; } -- cgit v0.10.2 From 218e763f458c44f30041c1b48b4371e130fd4317 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Mon, 21 Jan 2013 15:36:45 +0200 Subject: KVM: VMX: skip vmx->rmode.vm86_active check on cr0 write if unrestricted guest is enabled vmx->rmode.vm86_active is never true is unrestricted guest is enabled. Make it more explicit that neither enter_pmode() nor enter_rmode() is called in this case. Signed-off-by: Gleb Natapov Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 20409bd..319e840 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2877,9 +2877,6 @@ static void enter_rmode(struct kvm_vcpu *vcpu) unsigned long flags; struct vcpu_vmx *vmx = to_vmx(vcpu); - if (enable_unrestricted_guest) - return; - vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); @@ -3086,14 +3083,15 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) if (enable_unrestricted_guest) hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST) | KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST; - else + else { hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON; - if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE)) - enter_pmode(vcpu); + if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE)) + enter_pmode(vcpu); - if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE)) - enter_rmode(vcpu); + if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE)) + enter_rmode(vcpu); + } #ifdef CONFIG_X86_64 if (vcpu->arch.efer & EFER_LME) { -- cgit v0.10.2 From 25391454e73e3156202264eb3c473825afe4bc94 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Mon, 21 Jan 2013 15:36:46 +0200 Subject: KVM: VMX: don't clobber segment AR of unusable segments. Usability is returned in unusable field, so not need to clobber entire AR. Callers have to know how to deal with unusable segments already since if emulate_invalid_guest_state=true AR is not zeroed. Signed-off-by: Gleb Natapov Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 319e840..77212d1 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3196,8 +3196,6 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu, var->limit = vmx_read_guest_seg_limit(vmx, seg); var->selector = vmx_read_guest_seg_selector(vmx, seg); ar = vmx_read_guest_seg_ar(vmx, seg); - if ((ar & AR_UNUSABLE_MASK) && !emulate_invalid_guest_state) - ar = 0; var->type = ar & 15; var->s = (ar >> 4) & 1; var->dpl = (ar >> 5) & 3; -- cgit v0.10.2 From 91b0aa2ca63e60ac69c5dd96bff580d3947073d4 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Mon, 21 Jan 2013 15:36:47 +0200 Subject: KVM: VMX: rename fix_pmode_dataseg to fix_pmode_seg. The function deals with code segment too. Signed-off-by: Gleb Natapov Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 77212d1..9bc68c7 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2759,7 +2759,7 @@ static __exit void hardware_unsetup(void) free_kvm_area(); } -static void fix_pmode_dataseg(struct kvm_vcpu *vcpu, int seg, +static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg, struct kvm_segment *save) { if (!emulate_invalid_guest_state) { @@ -2811,12 +2811,12 @@ static void enter_pmode(struct kvm_vcpu *vcpu) update_exception_bitmap(vcpu); - fix_pmode_dataseg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); - fix_pmode_dataseg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); - fix_pmode_dataseg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); - fix_pmode_dataseg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); - fix_pmode_dataseg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); - fix_pmode_dataseg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); + fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); + fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); + fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); + fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); + fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); + fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); /* CPL is always 0 when CPU enters protected mode */ __set_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail); -- cgit v0.10.2 From 378a8b099fc207ddcb91b19a8c1457667e0af398 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Mon, 21 Jan 2013 15:36:48 +0200 Subject: KVM: x86: fix use of uninitialized memory as segment descriptor in emulator. If VMX reports segment as unusable, zero descriptor passed by the emulator before returning. Such descriptor will be considered not present by the emulator. Signed-off-by: Gleb Natapov Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e00dd05..b9f5529 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4489,8 +4489,10 @@ static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector, kvm_get_segment(emul_to_vcpu(ctxt), &var, seg); *selector = var.selector; - if (var.unusable) + if (var.unusable) { + memset(desc, 0, sizeof(*desc)); return false; + } if (var.g) var.limit >>= 12; -- cgit v0.10.2 From 141687869fb904e912568c6b94a6b1fa2114f6ed Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Mon, 21 Jan 2013 15:36:49 +0200 Subject: KVM: VMX: set vmx->emulation_required only when needed. If emulate_invalid_guest_state=false vmx->emulation_required is never actually used, but it ends up to be always set to true since handle_invalid_guest_state(), the only place it is reset back to false, is never called. This, besides been not very clean, makes vmexit and vmentry path to check emulate_invalid_guest_state needlessly. The patch fixes that by keeping emulation_required coherent with emulate_invalid_guest_state setting. Signed-off-by: Gleb Natapov Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 9bc68c7..02eeba8 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2759,6 +2759,11 @@ static __exit void hardware_unsetup(void) free_kvm_area(); } +static bool emulation_required(struct kvm_vcpu *vcpu) +{ + return emulate_invalid_guest_state && !guest_state_valid(vcpu); +} + static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg, struct kvm_segment *save) { @@ -2794,7 +2799,6 @@ static void enter_pmode(struct kvm_vcpu *vcpu) vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); - vmx->emulation_required = 1; vmx->rmode.vm86_active = 0; vmx_segment_cache_clear(vmx); @@ -2885,7 +2889,6 @@ static void enter_rmode(struct kvm_vcpu *vcpu) vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); - vmx->emulation_required = 1; vmx->rmode.vm86_active = 1; /* @@ -3111,6 +3114,9 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) vmcs_writel(CR0_READ_SHADOW, cr0); vmcs_writel(GUEST_CR0, hw_cr0); vcpu->arch.cr0 = cr0; + + /* depends on vcpu->arch.cr0 to be set to a new value */ + vmx->emulation_required = emulation_required(vcpu); } static u64 construct_eptp(unsigned long root_hpa) @@ -3298,8 +3304,7 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu, vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var)); out: - if (!vmx->emulation_required) - vmx->emulation_required = !guest_state_valid(vcpu); + vmx->emulation_required |= emulation_required(vcpu); } static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) @@ -5027,7 +5032,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) schedule(); } - vmx->emulation_required = !guest_state_valid(vcpu); + vmx->emulation_required = emulation_required(vcpu); out: return ret; } @@ -5970,7 +5975,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) u32 vectoring_info = vmx->idt_vectoring_info; /* If guest state is invalid, start emulating */ - if (vmx->emulation_required && emulate_invalid_guest_state) + if (vmx->emulation_required) return handle_invalid_guest_state(vcpu); /* @@ -6253,7 +6258,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) /* Don't enter VMX if guest state is invalid, let the exit handler start emulation until we arrive back to a valid state */ - if (vmx->emulation_required && emulate_invalid_guest_state) + if (vmx->emulation_required) return; if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty)) -- cgit v0.10.2 From 9445ef0181ebe0b74c3f2a23191a6f3d9a92b14b Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Fri, 18 Jan 2013 02:25:23 +0100 Subject: KVM: PPC: E500: Move write_stlbe higher Later patches want to call the function and it doesn't have dependencies on anything below write_host_tlbe. Move it higher up in the file. Signed-off-by: Alexander Graf diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c index cf3f180..d38ad63 100644 --- a/arch/powerpc/kvm/e500_tlb.c +++ b/arch/powerpc/kvm/e500_tlb.c @@ -156,6 +156,22 @@ static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500, } } +/* sesel is for tlb1 only */ +static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500, + struct kvm_book3e_206_tlb_entry *gtlbe, + struct kvm_book3e_206_tlb_entry *stlbe, + int stlbsel, int sesel) +{ + int stid; + + preempt_disable(); + stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe); + + stlbe->mas1 |= MAS1_TID(stid); + write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe); + preempt_enable(); +} + #ifdef CONFIG_KVM_E500V2 void kvmppc_map_magic(struct kvm_vcpu *vcpu) { @@ -834,22 +850,6 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, gva_t ea) return EMULATE_DONE; } -/* sesel is for tlb1 only */ -static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500, - struct kvm_book3e_206_tlb_entry *gtlbe, - struct kvm_book3e_206_tlb_entry *stlbe, - int stlbsel, int sesel) -{ - int stid; - - preempt_disable(); - stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe); - - stlbe->mas1 |= MAS1_TID(stid); - write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe); - preempt_enable(); -} - int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); -- cgit v0.10.2 From 523f0e5421c12610527c620b983b443f329e3a32 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Fri, 18 Jan 2013 02:27:14 +0100 Subject: KVM: PPC: E500: Explicitly mark shadow maps invalid When we invalidate shadow TLB maps on the host, we don't mark them as not valid. But we should. Fix this by removing the E500_TLB_VALID from their flags when invalidating. Signed-off-by: Alexander Graf diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c index d38ad63..8efb2ac 100644 --- a/arch/powerpc/kvm/e500_tlb.c +++ b/arch/powerpc/kvm/e500_tlb.c @@ -204,9 +204,13 @@ static void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, { struct kvm_book3e_206_tlb_entry *gtlbe = get_entry(vcpu_e500, tlbsel, esel); + struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref; - if (tlbsel == 1 && - vcpu_e500->gtlb_priv[1][esel].ref.flags & E500_TLB_BITMAP) { + /* Don't bother with unmapped entries */ + if (!(ref->flags & E500_TLB_VALID)) + return; + + if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) { u64 tmp = vcpu_e500->g2h_tlb1_map[esel]; int hw_tlb_indx; unsigned long flags; @@ -224,7 +228,7 @@ static void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, } mb(); vcpu_e500->g2h_tlb1_map[esel] = 0; - vcpu_e500->gtlb_priv[1][esel].ref.flags &= ~E500_TLB_BITMAP; + ref->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID); local_irq_restore(flags); return; @@ -232,6 +236,9 @@ static void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, /* Guest tlbe is backed by at most one host tlbe per shadow pid. */ kvmppc_e500_tlbil_one(vcpu_e500, gtlbe); + + /* Mark the TLB as not backed by the host anymore */ + ref->flags &= ~E500_TLB_VALID; } static int tlb0_set_base(gva_t addr, int sets, int ways) -- cgit v0.10.2 From 2c378fd779d2b37aed64cb44caa607707edc51d3 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Fri, 18 Jan 2013 02:31:01 +0100 Subject: KVM: PPC: E500: Propagate errors when shadow mapping When shadow mapping a page, mapping this page can fail. In that case we don't have a shadow map. Take this case into account, otherwise we might end up writing bogus TLB entries into the host TLB. While at it, also move the write_stlbe() calls into the respective TLBn handlers. Signed-off-by: Alexander Graf diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c index 8efb2ac..3777167 100644 --- a/arch/powerpc/kvm/e500_tlb.c +++ b/arch/powerpc/kvm/e500_tlb.c @@ -432,7 +432,7 @@ static inline void kvmppc_e500_setup_stlbe( #endif } -static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, +static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe, struct tlbe_ref *ref) @@ -551,7 +551,7 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, if (is_error_noslot_pfn(pfn)) { printk(KERN_ERR "Couldn't get real page for gfn %lx!\n", (long)gfn); - return; + return -EINVAL; } /* Align guest and physical address to page map boundaries */ @@ -571,22 +571,33 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, /* Drop refcount on page, so that mmu notifiers can clear it */ kvm_release_pfn_clean(pfn); + + return 0; } /* XXX only map the one-one case, for now use TLB0 */ -static void kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, - int esel, - struct kvm_book3e_206_tlb_entry *stlbe) +static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, + int esel, + struct kvm_book3e_206_tlb_entry *stlbe) { struct kvm_book3e_206_tlb_entry *gtlbe; struct tlbe_ref *ref; + int stlbsel = 0; + int sesel = 0; + int r; gtlbe = get_entry(vcpu_e500, 0, esel); ref = &vcpu_e500->gtlb_priv[0][esel].ref; - kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe), + r = kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe), get_tlb_raddr(gtlbe) >> PAGE_SHIFT, gtlbe, 0, stlbe, ref); + if (r) + return r; + + write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel); + + return 0; } /* Caller must ensure that the specified guest TLB entry is safe to insert into @@ -597,25 +608,32 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, struct kvm_book3e_206_tlb_entry *stlbe, int esel) { struct tlbe_ref *ref; - unsigned int victim; + unsigned int sesel; + int r; + int stlbsel = 1; - victim = vcpu_e500->host_tlb1_nv++; + sesel = vcpu_e500->host_tlb1_nv++; if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size())) vcpu_e500->host_tlb1_nv = 0; - ref = &vcpu_e500->tlb_refs[1][victim]; - kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, ref); + ref = &vcpu_e500->tlb_refs[1][sesel]; + r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, + ref); + if (r) + return r; - vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << victim; + vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel; vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; - if (vcpu_e500->h2g_tlb1_rmap[victim]) { - unsigned int idx = vcpu_e500->h2g_tlb1_rmap[victim]; - vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << victim); + if (vcpu_e500->h2g_tlb1_rmap[sesel]) { + unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel]; + vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel); } - vcpu_e500->h2g_tlb1_rmap[victim] = esel; + vcpu_e500->h2g_tlb1_rmap[sesel] = esel; - return victim; + write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel); + + return 0; } static void kvmppc_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500) @@ -1034,30 +1052,27 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, struct kvm_book3e_206_tlb_entry *gtlbe, stlbe; int tlbsel = tlbsel_of(index); int esel = esel_of(index); - int stlbsel, sesel; gtlbe = get_entry(vcpu_e500, tlbsel, esel); switch (tlbsel) { case 0: - stlbsel = 0; - sesel = 0; /* unused */ priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; - /* Only triggers after clear_tlb_refs */ - if (unlikely(!(priv->ref.flags & E500_TLB_VALID))) + /* Triggers after clear_tlb_refs or on initial mapping */ + if (!(priv->ref.flags & E500_TLB_VALID)) { kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe); - else + } else { kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K, &priv->ref, eaddr, &stlbe); + write_stlbe(vcpu_e500, gtlbe, &stlbe, 0, 0); + } break; case 1: { gfn_t gfn = gpaddr >> PAGE_SHIFT; - - stlbsel = 1; - sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, - gtlbe, &stlbe, esel); + kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe, &stlbe, + esel); break; } @@ -1065,8 +1080,6 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, BUG(); break; } - - write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel); } /************* MMU Notifiers *************/ -- cgit v0.10.2 From 9d98b3ff949dab3bafa2c50856ce9e1f88497f9a Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Thu, 17 Jan 2013 19:23:28 +0100 Subject: KVM: PPC: e500: Call kvmppc_mmu_map for initial mapping When emulating tlbwe, we want to automatically map the entry that just got written in our shadow TLB map, because chances are quite high that it's going to be used very soon. Today this happens explicitly, duplicating all the logic that is in kvmppc_mmu_map() already. Just call that one instead. Signed-off-by: Alexander Graf diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c index 3777167..48d1a4f 100644 --- a/arch/powerpc/kvm/e500_tlb.c +++ b/arch/powerpc/kvm/e500_tlb.c @@ -878,8 +878,8 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, gva_t ea) int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); - struct kvm_book3e_206_tlb_entry *gtlbe, stlbe; - int tlbsel, esel, stlbsel, sesel; + struct kvm_book3e_206_tlb_entry *gtlbe; + int tlbsel, esel; int recal = 0; tlbsel = get_tlb_tlbsel(vcpu); @@ -917,40 +917,16 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ if (tlbe_is_host_safe(vcpu, gtlbe)) { - u64 eaddr; - u64 raddr; + u64 eaddr = get_tlb_eaddr(gtlbe); + u64 raddr = get_tlb_raddr(gtlbe); - switch (tlbsel) { - case 0: - /* TLB0 */ + if (tlbsel == 0) { gtlbe->mas1 &= ~MAS1_TSIZE(~0); gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K); - - stlbsel = 0; - kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe); - sesel = 0; /* unused */ - - break; - - case 1: - /* TLB1 */ - eaddr = get_tlb_eaddr(gtlbe); - raddr = get_tlb_raddr(gtlbe); - - /* Create a 4KB mapping on the host. - * If the guest wanted a large page, - * only the first 4KB is mapped here and the rest - * are mapped on the fly. */ - stlbsel = 1; - sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, - raddr >> PAGE_SHIFT, gtlbe, &stlbe, esel); - break; - - default: - BUG(); } - write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel); + /* Premap the faulting page */ + kvmppc_mmu_map(vcpu, eaddr, raddr, index_of(tlbsel, esel)); } kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS); -- cgit v0.10.2 From b71c9e2fb72cf538aadbc59ea719639a1e2191fa Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Fri, 11 Jan 2013 15:22:45 +0100 Subject: KVM: PPC: E500: Split host and guest MMU parts This patch splits the file e500_tlb.c into e500_mmu.c (guest TLB handling) and e500_mmu_host.c (host TLB handling). The main benefit of this split is readability and maintainability. It's just a lot harder to write dirty code :). Signed-off-by: Alexander Graf diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index 1e473d4..b772ede 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile @@ -10,7 +10,8 @@ common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o \ eventfd.o) CFLAGS_44x_tlb.o := -I. -CFLAGS_e500_tlb.o := -I. +CFLAGS_e500_mmu.o := -I. +CFLAGS_e500_mmu_host.o := -I. CFLAGS_emulate.o := -I. common-objs-y += powerpc.o emulate.o @@ -35,7 +36,8 @@ kvm-e500-objs := \ booke_emulate.o \ booke_interrupts.o \ e500.o \ - e500_tlb.o \ + e500_mmu.o \ + e500_mmu_host.o \ e500_emulate.o kvm-objs-$(CONFIG_KVM_E500V2) := $(kvm-e500-objs) @@ -45,7 +47,8 @@ kvm-e500mc-objs := \ booke_emulate.o \ bookehv_interrupts.o \ e500mc.o \ - e500_tlb.o \ + e500_mmu.o \ + e500_mmu_host.o \ e500_emulate.o kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs) diff --git a/arch/powerpc/kvm/e500_mmu.c b/arch/powerpc/kvm/e500_mmu.c new file mode 100644 index 0000000..c3d1721 --- /dev/null +++ b/arch/powerpc/kvm/e500_mmu.c @@ -0,0 +1,811 @@ +/* + * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved. + * + * Author: Yu Liu, yu.liu@freescale.com + * Scott Wood, scottwood@freescale.com + * Ashish Kalra, ashish.kalra@freescale.com + * Varun Sethi, varun.sethi@freescale.com + * Alexander Graf, agraf@suse.de + * + * Description: + * This file is based on arch/powerpc/kvm/44x_tlb.c, + * by Hollis Blanchard . + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "e500.h" +#include "trace.h" +#include "timing.h" +#include "e500_mmu_host.h" + +static inline unsigned int gtlb0_get_next_victim( + struct kvmppc_vcpu_e500 *vcpu_e500) +{ + unsigned int victim; + + victim = vcpu_e500->gtlb_nv[0]++; + if (unlikely(vcpu_e500->gtlb_nv[0] >= vcpu_e500->gtlb_params[0].ways)) + vcpu_e500->gtlb_nv[0] = 0; + + return victim; +} + +static int tlb0_set_base(gva_t addr, int sets, int ways) +{ + int set_base; + + set_base = (addr >> PAGE_SHIFT) & (sets - 1); + set_base *= ways; + + return set_base; +} + +static int gtlb0_set_base(struct kvmppc_vcpu_e500 *vcpu_e500, gva_t addr) +{ + return tlb0_set_base(addr, vcpu_e500->gtlb_params[0].sets, + vcpu_e500->gtlb_params[0].ways); +} + +static unsigned int get_tlb_esel(struct kvm_vcpu *vcpu, int tlbsel) +{ + struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); + int esel = get_tlb_esel_bit(vcpu); + + if (tlbsel == 0) { + esel &= vcpu_e500->gtlb_params[0].ways - 1; + esel += gtlb0_set_base(vcpu_e500, vcpu->arch.shared->mas2); + } else { + esel &= vcpu_e500->gtlb_params[tlbsel].entries - 1; + } + + return esel; +} + +/* Search the guest TLB for a matching entry. */ +static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500, + gva_t eaddr, int tlbsel, unsigned int pid, int as) +{ + int size = vcpu_e500->gtlb_params[tlbsel].entries; + unsigned int set_base, offset; + int i; + + if (tlbsel == 0) { + set_base = gtlb0_set_base(vcpu_e500, eaddr); + size = vcpu_e500->gtlb_params[0].ways; + } else { + if (eaddr < vcpu_e500->tlb1_min_eaddr || + eaddr > vcpu_e500->tlb1_max_eaddr) + return -1; + set_base = 0; + } + + offset = vcpu_e500->gtlb_offset[tlbsel]; + + for (i = 0; i < size; i++) { + struct kvm_book3e_206_tlb_entry *tlbe = + &vcpu_e500->gtlb_arch[offset + set_base + i]; + unsigned int tid; + + if (eaddr < get_tlb_eaddr(tlbe)) + continue; + + if (eaddr > get_tlb_end(tlbe)) + continue; + + tid = get_tlb_tid(tlbe); + if (tid && (tid != pid)) + continue; + + if (!get_tlb_v(tlbe)) + continue; + + if (get_tlb_ts(tlbe) != as && as != -1) + continue; + + return set_base + i; + } + + return -1; +} + +static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu, + unsigned int eaddr, int as) +{ + struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); + unsigned int victim, tsized; + int tlbsel; + + /* since we only have two TLBs, only lower bit is used. */ + tlbsel = (vcpu->arch.shared->mas4 >> 28) & 0x1; + victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0; + tsized = (vcpu->arch.shared->mas4 >> 7) & 0x1f; + + vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim) + | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); + vcpu->arch.shared->mas1 = MAS1_VALID | (as ? MAS1_TS : 0) + | MAS1_TID(get_tlbmiss_tid(vcpu)) + | MAS1_TSIZE(tsized); + vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN) + | (vcpu->arch.shared->mas4 & MAS2_ATTRIB_MASK); + vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3; + vcpu->arch.shared->mas6 = (vcpu->arch.shared->mas6 & MAS6_SPID1) + | (get_cur_pid(vcpu) << 16) + | (as ? MAS6_SAS : 0); +} + +static void kvmppc_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500) +{ + int size = vcpu_e500->gtlb_params[1].entries; + unsigned int offset; + gva_t eaddr; + int i; + + vcpu_e500->tlb1_min_eaddr = ~0UL; + vcpu_e500->tlb1_max_eaddr = 0; + offset = vcpu_e500->gtlb_offset[1]; + + for (i = 0; i < size; i++) { + struct kvm_book3e_206_tlb_entry *tlbe = + &vcpu_e500->gtlb_arch[offset + i]; + + if (!get_tlb_v(tlbe)) + continue; + + eaddr = get_tlb_eaddr(tlbe); + vcpu_e500->tlb1_min_eaddr = + min(vcpu_e500->tlb1_min_eaddr, eaddr); + + eaddr = get_tlb_end(tlbe); + vcpu_e500->tlb1_max_eaddr = + max(vcpu_e500->tlb1_max_eaddr, eaddr); + } +} + +static int kvmppc_need_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500, + struct kvm_book3e_206_tlb_entry *gtlbe) +{ + unsigned long start, end, size; + + size = get_tlb_bytes(gtlbe); + start = get_tlb_eaddr(gtlbe) & ~(size - 1); + end = start + size - 1; + + return vcpu_e500->tlb1_min_eaddr == start || + vcpu_e500->tlb1_max_eaddr == end; +} + +/* This function is supposed to be called for a adding a new valid tlb entry */ +static void kvmppc_set_tlb1map_range(struct kvm_vcpu *vcpu, + struct kvm_book3e_206_tlb_entry *gtlbe) +{ + unsigned long start, end, size; + struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); + + if (!get_tlb_v(gtlbe)) + return; + + size = get_tlb_bytes(gtlbe); + start = get_tlb_eaddr(gtlbe) & ~(size - 1); + end = start + size - 1; + + vcpu_e500->tlb1_min_eaddr = min(vcpu_e500->tlb1_min_eaddr, start); + vcpu_e500->tlb1_max_eaddr = max(vcpu_e500->tlb1_max_eaddr, end); +} + +static inline int kvmppc_e500_gtlbe_invalidate( + struct kvmppc_vcpu_e500 *vcpu_e500, + int tlbsel, int esel) +{ + struct kvm_book3e_206_tlb_entry *gtlbe = + get_entry(vcpu_e500, tlbsel, esel); + + if (unlikely(get_tlb_iprot(gtlbe))) + return -1; + + if (tlbsel == 1 && kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe)) + kvmppc_recalc_tlb1map_range(vcpu_e500); + + gtlbe->mas1 = 0; + + return 0; +} + +int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value) +{ + int esel; + + if (value & MMUCSR0_TLB0FI) + for (esel = 0; esel < vcpu_e500->gtlb_params[0].entries; esel++) + kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel); + if (value & MMUCSR0_TLB1FI) + for (esel = 0; esel < vcpu_e500->gtlb_params[1].entries; esel++) + kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel); + + /* Invalidate all vcpu id mappings */ + kvmppc_e500_tlbil_all(vcpu_e500); + + return EMULATE_DONE; +} + +int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, gva_t ea) +{ + struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); + unsigned int ia; + int esel, tlbsel; + + ia = (ea >> 2) & 0x1; + + /* since we only have two TLBs, only lower bit is used. */ + tlbsel = (ea >> 3) & 0x1; + + if (ia) { + /* invalidate all entries */ + for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; + esel++) + kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); + } else { + ea &= 0xfffff000; + esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, + get_cur_pid(vcpu), -1); + if (esel >= 0) + kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); + } + + /* Invalidate all vcpu id mappings */ + kvmppc_e500_tlbil_all(vcpu_e500); + + return EMULATE_DONE; +} + +static void tlbilx_all(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, + int pid, int type) +{ + struct kvm_book3e_206_tlb_entry *tlbe; + int tid, esel; + + /* invalidate all entries */ + for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; esel++) { + tlbe = get_entry(vcpu_e500, tlbsel, esel); + tid = get_tlb_tid(tlbe); + if (type == 0 || tid == pid) { + inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); + kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); + } + } +} + +static void tlbilx_one(struct kvmppc_vcpu_e500 *vcpu_e500, int pid, + gva_t ea) +{ + int tlbsel, esel; + + for (tlbsel = 0; tlbsel < 2; tlbsel++) { + esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, -1); + if (esel >= 0) { + inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); + kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); + break; + } + } +} + +int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int type, gva_t ea) +{ + struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); + int pid = get_cur_spid(vcpu); + + if (type == 0 || type == 1) { + tlbilx_all(vcpu_e500, 0, pid, type); + tlbilx_all(vcpu_e500, 1, pid, type); + } else if (type == 3) { + tlbilx_one(vcpu_e500, pid, ea); + } + + return EMULATE_DONE; +} + +int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu) +{ + struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); + int tlbsel, esel; + struct kvm_book3e_206_tlb_entry *gtlbe; + + tlbsel = get_tlb_tlbsel(vcpu); + esel = get_tlb_esel(vcpu, tlbsel); + + gtlbe = get_entry(vcpu_e500, tlbsel, esel); + vcpu->arch.shared->mas0 &= ~MAS0_NV(~0); + vcpu->arch.shared->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); + vcpu->arch.shared->mas1 = gtlbe->mas1; + vcpu->arch.shared->mas2 = gtlbe->mas2; + vcpu->arch.shared->mas7_3 = gtlbe->mas7_3; + + return EMULATE_DONE; +} + +int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, gva_t ea) +{ + struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); + int as = !!get_cur_sas(vcpu); + unsigned int pid = get_cur_spid(vcpu); + int esel, tlbsel; + struct kvm_book3e_206_tlb_entry *gtlbe = NULL; + + for (tlbsel = 0; tlbsel < 2; tlbsel++) { + esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as); + if (esel >= 0) { + gtlbe = get_entry(vcpu_e500, tlbsel, esel); + break; + } + } + + if (gtlbe) { + esel &= vcpu_e500->gtlb_params[tlbsel].ways - 1; + + vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel) + | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); + vcpu->arch.shared->mas1 = gtlbe->mas1; + vcpu->arch.shared->mas2 = gtlbe->mas2; + vcpu->arch.shared->mas7_3 = gtlbe->mas7_3; + } else { + int victim; + + /* since we only have two TLBs, only lower bit is used. */ + tlbsel = vcpu->arch.shared->mas4 >> 28 & 0x1; + victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0; + + vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) + | MAS0_ESEL(victim) + | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); + vcpu->arch.shared->mas1 = + (vcpu->arch.shared->mas6 & MAS6_SPID0) + | (vcpu->arch.shared->mas6 & (MAS6_SAS ? MAS1_TS : 0)) + | (vcpu->arch.shared->mas4 & MAS4_TSIZED(~0)); + vcpu->arch.shared->mas2 &= MAS2_EPN; + vcpu->arch.shared->mas2 |= vcpu->arch.shared->mas4 & + MAS2_ATTRIB_MASK; + vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 | + MAS3_U2 | MAS3_U3; + } + + kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS); + return EMULATE_DONE; +} + +int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) +{ + struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); + struct kvm_book3e_206_tlb_entry *gtlbe; + int tlbsel, esel; + int recal = 0; + + tlbsel = get_tlb_tlbsel(vcpu); + esel = get_tlb_esel(vcpu, tlbsel); + + gtlbe = get_entry(vcpu_e500, tlbsel, esel); + + if (get_tlb_v(gtlbe)) { + inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); + if ((tlbsel == 1) && + kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe)) + recal = 1; + } + + gtlbe->mas1 = vcpu->arch.shared->mas1; + gtlbe->mas2 = vcpu->arch.shared->mas2; + if (!(vcpu->arch.shared->msr & MSR_CM)) + gtlbe->mas2 &= 0xffffffffUL; + gtlbe->mas7_3 = vcpu->arch.shared->mas7_3; + + trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1, + gtlbe->mas2, gtlbe->mas7_3); + + if (tlbsel == 1) { + /* + * If a valid tlb1 entry is overwritten then recalculate the + * min/max TLB1 map address range otherwise no need to look + * in tlb1 array. + */ + if (recal) + kvmppc_recalc_tlb1map_range(vcpu_e500); + else + kvmppc_set_tlb1map_range(vcpu, gtlbe); + } + + /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ + if (tlbe_is_host_safe(vcpu, gtlbe)) { + u64 eaddr = get_tlb_eaddr(gtlbe); + u64 raddr = get_tlb_raddr(gtlbe); + + if (tlbsel == 0) { + gtlbe->mas1 &= ~MAS1_TSIZE(~0); + gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K); + } + + /* Premap the faulting page */ + kvmppc_mmu_map(vcpu, eaddr, raddr, index_of(tlbsel, esel)); + } + + kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS); + return EMULATE_DONE; +} + +static int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu, + gva_t eaddr, unsigned int pid, int as) +{ + struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); + int esel, tlbsel; + + for (tlbsel = 0; tlbsel < 2; tlbsel++) { + esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as); + if (esel >= 0) + return index_of(tlbsel, esel); + } + + return -1; +} + +/* 'linear_address' is actually an encoding of AS|PID|EADDR . */ +int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, + struct kvm_translation *tr) +{ + int index; + gva_t eaddr; + u8 pid; + u8 as; + + eaddr = tr->linear_address; + pid = (tr->linear_address >> 32) & 0xff; + as = (tr->linear_address >> 40) & 0x1; + + index = kvmppc_e500_tlb_search(vcpu, eaddr, pid, as); + if (index < 0) { + tr->valid = 0; + return 0; + } + + tr->physical_address = kvmppc_mmu_xlate(vcpu, index, eaddr); + /* XXX what does "writeable" and "usermode" even mean? */ + tr->valid = 1; + + return 0; +} + + +int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) +{ + unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); + + return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as); +} + +int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) +{ + unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS); + + return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as); +} + +void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu) +{ + unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); + + kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as); +} + +void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu) +{ + unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS); + + kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as); +} + +gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index, + gva_t eaddr) +{ + struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); + struct kvm_book3e_206_tlb_entry *gtlbe; + u64 pgmask; + + gtlbe = get_entry(vcpu_e500, tlbsel_of(index), esel_of(index)); + pgmask = get_tlb_bytes(gtlbe) - 1; + + return get_tlb_raddr(gtlbe) | (eaddr & pgmask); +} + +void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) +{ +} + +/*****************************************/ + +static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500) +{ + int i; + + clear_tlb1_bitmap(vcpu_e500); + kfree(vcpu_e500->g2h_tlb1_map); + + clear_tlb_refs(vcpu_e500); + kfree(vcpu_e500->gtlb_priv[0]); + kfree(vcpu_e500->gtlb_priv[1]); + + if (vcpu_e500->shared_tlb_pages) { + vfree((void *)(round_down((uintptr_t)vcpu_e500->gtlb_arch, + PAGE_SIZE))); + + for (i = 0; i < vcpu_e500->num_shared_tlb_pages; i++) { + set_page_dirty_lock(vcpu_e500->shared_tlb_pages[i]); + put_page(vcpu_e500->shared_tlb_pages[i]); + } + + vcpu_e500->num_shared_tlb_pages = 0; + + kfree(vcpu_e500->shared_tlb_pages); + vcpu_e500->shared_tlb_pages = NULL; + } else { + kfree(vcpu_e500->gtlb_arch); + } + + vcpu_e500->gtlb_arch = NULL; +} + +void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) +{ + sregs->u.e.mas0 = vcpu->arch.shared->mas0; + sregs->u.e.mas1 = vcpu->arch.shared->mas1; + sregs->u.e.mas2 = vcpu->arch.shared->mas2; + sregs->u.e.mas7_3 = vcpu->arch.shared->mas7_3; + sregs->u.e.mas4 = vcpu->arch.shared->mas4; + sregs->u.e.mas6 = vcpu->arch.shared->mas6; + + sregs->u.e.mmucfg = vcpu->arch.mmucfg; + sregs->u.e.tlbcfg[0] = vcpu->arch.tlbcfg[0]; + sregs->u.e.tlbcfg[1] = vcpu->arch.tlbcfg[1]; + sregs->u.e.tlbcfg[2] = 0; + sregs->u.e.tlbcfg[3] = 0; +} + +int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) +{ + if (sregs->u.e.features & KVM_SREGS_E_ARCH206_MMU) { + vcpu->arch.shared->mas0 = sregs->u.e.mas0; + vcpu->arch.shared->mas1 = sregs->u.e.mas1; + vcpu->arch.shared->mas2 = sregs->u.e.mas2; + vcpu->arch.shared->mas7_3 = sregs->u.e.mas7_3; + vcpu->arch.shared->mas4 = sregs->u.e.mas4; + vcpu->arch.shared->mas6 = sregs->u.e.mas6; + } + + return 0; +} + +int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu, + struct kvm_config_tlb *cfg) +{ + struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); + struct kvm_book3e_206_tlb_params params; + char *virt; + struct page **pages; + struct tlbe_priv *privs[2] = {}; + u64 *g2h_bitmap = NULL; + size_t array_len; + u32 sets; + int num_pages, ret, i; + + if (cfg->mmu_type != KVM_MMU_FSL_BOOKE_NOHV) + return -EINVAL; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)cfg->params, + sizeof(params))) + return -EFAULT; + + if (params.tlb_sizes[1] > 64) + return -EINVAL; + if (params.tlb_ways[1] != params.tlb_sizes[1]) + return -EINVAL; + if (params.tlb_sizes[2] != 0 || params.tlb_sizes[3] != 0) + return -EINVAL; + if (params.tlb_ways[2] != 0 || params.tlb_ways[3] != 0) + return -EINVAL; + + if (!is_power_of_2(params.tlb_ways[0])) + return -EINVAL; + + sets = params.tlb_sizes[0] >> ilog2(params.tlb_ways[0]); + if (!is_power_of_2(sets)) + return -EINVAL; + + array_len = params.tlb_sizes[0] + params.tlb_sizes[1]; + array_len *= sizeof(struct kvm_book3e_206_tlb_entry); + + if (cfg->array_len < array_len) + return -EINVAL; + + num_pages = DIV_ROUND_UP(cfg->array + array_len - 1, PAGE_SIZE) - + cfg->array / PAGE_SIZE; + pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL); + if (!pages) + return -ENOMEM; + + ret = get_user_pages_fast(cfg->array, num_pages, 1, pages); + if (ret < 0) + goto err_pages; + + if (ret != num_pages) { + num_pages = ret; + ret = -EFAULT; + goto err_put_page; + } + + virt = vmap(pages, num_pages, VM_MAP, PAGE_KERNEL); + if (!virt) { + ret = -ENOMEM; + goto err_put_page; + } + + privs[0] = kzalloc(sizeof(struct tlbe_priv) * params.tlb_sizes[0], + GFP_KERNEL); + privs[1] = kzalloc(sizeof(struct tlbe_priv) * params.tlb_sizes[1], + GFP_KERNEL); + + if (!privs[0] || !privs[1]) { + ret = -ENOMEM; + goto err_privs; + } + + g2h_bitmap = kzalloc(sizeof(u64) * params.tlb_sizes[1], + GFP_KERNEL); + if (!g2h_bitmap) { + ret = -ENOMEM; + goto err_privs; + } + + free_gtlb(vcpu_e500); + + vcpu_e500->gtlb_priv[0] = privs[0]; + vcpu_e500->gtlb_priv[1] = privs[1]; + vcpu_e500->g2h_tlb1_map = g2h_bitmap; + + vcpu_e500->gtlb_arch = (struct kvm_book3e_206_tlb_entry *) + (virt + (cfg->array & (PAGE_SIZE - 1))); + + vcpu_e500->gtlb_params[0].entries = params.tlb_sizes[0]; + vcpu_e500->gtlb_params[1].entries = params.tlb_sizes[1]; + + vcpu_e500->gtlb_offset[0] = 0; + vcpu_e500->gtlb_offset[1] = params.tlb_sizes[0]; + + vcpu->arch.mmucfg = mfspr(SPRN_MMUCFG) & ~MMUCFG_LPIDSIZE; + + vcpu->arch.tlbcfg[0] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); + if (params.tlb_sizes[0] <= 2048) + vcpu->arch.tlbcfg[0] |= params.tlb_sizes[0]; + vcpu->arch.tlbcfg[0] |= params.tlb_ways[0] << TLBnCFG_ASSOC_SHIFT; + + vcpu->arch.tlbcfg[1] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); + vcpu->arch.tlbcfg[1] |= params.tlb_sizes[1]; + vcpu->arch.tlbcfg[1] |= params.tlb_ways[1] << TLBnCFG_ASSOC_SHIFT; + + vcpu_e500->shared_tlb_pages = pages; + vcpu_e500->num_shared_tlb_pages = num_pages; + + vcpu_e500->gtlb_params[0].ways = params.tlb_ways[0]; + vcpu_e500->gtlb_params[0].sets = sets; + + vcpu_e500->gtlb_params[1].ways = params.tlb_sizes[1]; + vcpu_e500->gtlb_params[1].sets = 1; + + kvmppc_recalc_tlb1map_range(vcpu_e500); + return 0; + +err_privs: + kfree(privs[0]); + kfree(privs[1]); + +err_put_page: + for (i = 0; i < num_pages; i++) + put_page(pages[i]); + +err_pages: + kfree(pages); + return ret; +} + +int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu, + struct kvm_dirty_tlb *dirty) +{ + struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); + kvmppc_recalc_tlb1map_range(vcpu_e500); + clear_tlb_refs(vcpu_e500); + return 0; +} + +int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500) +{ + struct kvm_vcpu *vcpu = &vcpu_e500->vcpu; + int entry_size = sizeof(struct kvm_book3e_206_tlb_entry); + int entries = KVM_E500_TLB0_SIZE + KVM_E500_TLB1_SIZE; + + if (e500_mmu_host_init(vcpu_e500)) + goto err; + + vcpu_e500->gtlb_params[0].entries = KVM_E500_TLB0_SIZE; + vcpu_e500->gtlb_params[1].entries = KVM_E500_TLB1_SIZE; + + vcpu_e500->gtlb_params[0].ways = KVM_E500_TLB0_WAY_NUM; + vcpu_e500->gtlb_params[0].sets = + KVM_E500_TLB0_SIZE / KVM_E500_TLB0_WAY_NUM; + + vcpu_e500->gtlb_params[1].ways = KVM_E500_TLB1_SIZE; + vcpu_e500->gtlb_params[1].sets = 1; + + vcpu_e500->gtlb_arch = kmalloc(entries * entry_size, GFP_KERNEL); + if (!vcpu_e500->gtlb_arch) + return -ENOMEM; + + vcpu_e500->gtlb_offset[0] = 0; + vcpu_e500->gtlb_offset[1] = KVM_E500_TLB0_SIZE; + + vcpu_e500->gtlb_priv[0] = kzalloc(sizeof(struct tlbe_ref) * + vcpu_e500->gtlb_params[0].entries, + GFP_KERNEL); + if (!vcpu_e500->gtlb_priv[0]) + goto err; + + vcpu_e500->gtlb_priv[1] = kzalloc(sizeof(struct tlbe_ref) * + vcpu_e500->gtlb_params[1].entries, + GFP_KERNEL); + if (!vcpu_e500->gtlb_priv[1]) + goto err; + + vcpu_e500->g2h_tlb1_map = kzalloc(sizeof(u64) * + vcpu_e500->gtlb_params[1].entries, + GFP_KERNEL); + if (!vcpu_e500->g2h_tlb1_map) + goto err; + + /* Init TLB configuration register */ + vcpu->arch.tlbcfg[0] = mfspr(SPRN_TLB0CFG) & + ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); + vcpu->arch.tlbcfg[0] |= vcpu_e500->gtlb_params[0].entries; + vcpu->arch.tlbcfg[0] |= + vcpu_e500->gtlb_params[0].ways << TLBnCFG_ASSOC_SHIFT; + + vcpu->arch.tlbcfg[1] = mfspr(SPRN_TLB1CFG) & + ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); + vcpu->arch.tlbcfg[1] |= vcpu_e500->gtlb_params[1].entries; + vcpu->arch.tlbcfg[1] |= + vcpu_e500->gtlb_params[1].ways << TLBnCFG_ASSOC_SHIFT; + + kvmppc_recalc_tlb1map_range(vcpu_e500); + return 0; + +err: + free_gtlb(vcpu_e500); + return -1; +} + +void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) +{ + free_gtlb(vcpu_e500); + e500_mmu_host_uninit(vcpu_e500); +} diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c new file mode 100644 index 0000000..4c32d65 --- /dev/null +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -0,0 +1,672 @@ +/* + * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved. + * + * Author: Yu Liu, yu.liu@freescale.com + * Scott Wood, scottwood@freescale.com + * Ashish Kalra, ashish.kalra@freescale.com + * Varun Sethi, varun.sethi@freescale.com + * Alexander Graf, agraf@suse.de + * + * Description: + * This file is based on arch/powerpc/kvm/44x_tlb.c, + * by Hollis Blanchard . + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "e500.h" +#include "trace.h" +#include "timing.h" +#include "e500_mmu_host.h" + +#define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1) + +static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM]; + +static inline unsigned int tlb1_max_shadow_size(void) +{ + /* reserve one entry for magic page */ + return host_tlb_params[1].entries - tlbcam_index - 1; +} + +static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode) +{ + /* Mask off reserved bits. */ + mas3 &= MAS3_ATTRIB_MASK; + +#ifndef CONFIG_KVM_BOOKE_HV + if (!usermode) { + /* Guest is in supervisor mode, + * so we need to translate guest + * supervisor permissions into user permissions. */ + mas3 &= ~E500_TLB_USER_PERM_MASK; + mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1; + } + mas3 |= E500_TLB_SUPER_PERM_MASK; +#endif + return mas3; +} + +static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode) +{ +#ifdef CONFIG_SMP + return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M; +#else + return mas2 & MAS2_ATTRIB_MASK; +#endif +} + +/* + * writing shadow tlb entry to host TLB + */ +static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe, + uint32_t mas0) +{ + unsigned long flags; + + local_irq_save(flags); + mtspr(SPRN_MAS0, mas0); + mtspr(SPRN_MAS1, stlbe->mas1); + mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2); + mtspr(SPRN_MAS3, (u32)stlbe->mas7_3); + mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32)); +#ifdef CONFIG_KVM_BOOKE_HV + mtspr(SPRN_MAS8, stlbe->mas8); +#endif + asm volatile("isync; tlbwe" : : : "memory"); + +#ifdef CONFIG_KVM_BOOKE_HV + /* Must clear mas8 for other host tlbwe's */ + mtspr(SPRN_MAS8, 0); + isync(); +#endif + local_irq_restore(flags); + + trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1, + stlbe->mas2, stlbe->mas7_3); +} + +/* + * Acquire a mas0 with victim hint, as if we just took a TLB miss. + * + * We don't care about the address we're searching for, other than that it's + * in the right set and is not present in the TLB. Using a zero PID and a + * userspace address means we don't have to set and then restore MAS5, or + * calculate a proper MAS6 value. + */ +static u32 get_host_mas0(unsigned long eaddr) +{ + unsigned long flags; + u32 mas0; + + local_irq_save(flags); + mtspr(SPRN_MAS6, 0); + asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET)); + mas0 = mfspr(SPRN_MAS0); + local_irq_restore(flags); + + return mas0; +} + +/* sesel is for tlb1 only */ +static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500, + int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe) +{ + u32 mas0; + + if (tlbsel == 0) { + mas0 = get_host_mas0(stlbe->mas2); + __write_host_tlbe(stlbe, mas0); + } else { + __write_host_tlbe(stlbe, + MAS0_TLBSEL(1) | + MAS0_ESEL(to_htlb1_esel(sesel))); + } +} + +/* sesel is for tlb1 only */ +static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500, + struct kvm_book3e_206_tlb_entry *gtlbe, + struct kvm_book3e_206_tlb_entry *stlbe, + int stlbsel, int sesel) +{ + int stid; + + preempt_disable(); + stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe); + + stlbe->mas1 |= MAS1_TID(stid); + write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe); + preempt_enable(); +} + +#ifdef CONFIG_KVM_E500V2 +/* XXX should be a hook in the gva2hpa translation */ +void kvmppc_map_magic(struct kvm_vcpu *vcpu) +{ + struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); + struct kvm_book3e_206_tlb_entry magic; + ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; + unsigned int stid; + pfn_t pfn; + + pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT; + get_page(pfn_to_page(pfn)); + + preempt_disable(); + stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0); + + magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) | + MAS1_TSIZE(BOOK3E_PAGESZ_4K); + magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M; + magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) | + MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR; + magic.mas8 = 0; + + __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index)); + preempt_enable(); +} +#endif + +void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, + int esel) +{ + struct kvm_book3e_206_tlb_entry *gtlbe = + get_entry(vcpu_e500, tlbsel, esel); + struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref; + + /* Don't bother with unmapped entries */ + if (!(ref->flags & E500_TLB_VALID)) + return; + + if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) { + u64 tmp = vcpu_e500->g2h_tlb1_map[esel]; + int hw_tlb_indx; + unsigned long flags; + + local_irq_save(flags); + while (tmp) { + hw_tlb_indx = __ilog2_u64(tmp & -tmp); + mtspr(SPRN_MAS0, + MAS0_TLBSEL(1) | + MAS0_ESEL(to_htlb1_esel(hw_tlb_indx))); + mtspr(SPRN_MAS1, 0); + asm volatile("tlbwe"); + vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0; + tmp &= tmp - 1; + } + mb(); + vcpu_e500->g2h_tlb1_map[esel] = 0; + ref->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID); + local_irq_restore(flags); + + return; + } + + /* Guest tlbe is backed by at most one host tlbe per shadow pid. */ + kvmppc_e500_tlbil_one(vcpu_e500, gtlbe); + + /* Mark the TLB as not backed by the host anymore */ + ref->flags &= ~E500_TLB_VALID; +} + +static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe) +{ + return tlbe->mas7_3 & (MAS3_SW|MAS3_UW); +} + +static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, + struct kvm_book3e_206_tlb_entry *gtlbe, + pfn_t pfn) +{ + ref->pfn = pfn; + ref->flags = E500_TLB_VALID; + + if (tlbe_is_writable(gtlbe)) + kvm_set_pfn_dirty(pfn); +} + +static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) +{ + if (ref->flags & E500_TLB_VALID) { + trace_kvm_booke206_ref_release(ref->pfn, ref->flags); + ref->flags = 0; + } +} + +void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500) +{ + if (vcpu_e500->g2h_tlb1_map) + memset(vcpu_e500->g2h_tlb1_map, 0, + sizeof(u64) * vcpu_e500->gtlb_params[1].entries); + if (vcpu_e500->h2g_tlb1_rmap) + memset(vcpu_e500->h2g_tlb1_rmap, 0, + sizeof(unsigned int) * host_tlb_params[1].entries); +} + +static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) +{ + int tlbsel = 0; + int i; + + for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) { + struct tlbe_ref *ref = + &vcpu_e500->gtlb_priv[tlbsel][i].ref; + kvmppc_e500_ref_release(ref); + } +} + +void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500) +{ + int stlbsel = 1; + int i; + + kvmppc_e500_tlbil_all(vcpu_e500); + + for (i = 0; i < host_tlb_params[stlbsel].entries; i++) { + struct tlbe_ref *ref = + &vcpu_e500->tlb_refs[stlbsel][i]; + kvmppc_e500_ref_release(ref); + } + + clear_tlb_privs(vcpu_e500); +} + +void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu) +{ + struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); + clear_tlb_refs(vcpu_e500); + clear_tlb1_bitmap(vcpu_e500); +} + +/* TID must be supplied by the caller */ +static void kvmppc_e500_setup_stlbe( + struct kvm_vcpu *vcpu, + struct kvm_book3e_206_tlb_entry *gtlbe, + int tsize, struct tlbe_ref *ref, u64 gvaddr, + struct kvm_book3e_206_tlb_entry *stlbe) +{ + pfn_t pfn = ref->pfn; + u32 pr = vcpu->arch.shared->msr & MSR_PR; + + BUG_ON(!(ref->flags & E500_TLB_VALID)); + + /* Force IPROT=0 for all guest mappings. */ + stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID; + stlbe->mas2 = (gvaddr & MAS2_EPN) | + e500_shadow_mas2_attrib(gtlbe->mas2, pr); + stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) | + e500_shadow_mas3_attrib(gtlbe->mas7_3, pr); + +#ifdef CONFIG_KVM_BOOKE_HV + stlbe->mas8 = MAS8_TGS | vcpu->kvm->arch.lpid; +#endif +} + +static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, + u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, + int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe, + struct tlbe_ref *ref) +{ + struct kvm_memory_slot *slot; + unsigned long pfn = 0; /* silence GCC warning */ + unsigned long hva; + int pfnmap = 0; + int tsize = BOOK3E_PAGESZ_4K; + + /* + * Translate guest physical to true physical, acquiring + * a page reference if it is normal, non-reserved memory. + * + * gfn_to_memslot() must succeed because otherwise we wouldn't + * have gotten this far. Eventually we should just pass the slot + * pointer through from the first lookup. + */ + slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn); + hva = gfn_to_hva_memslot(slot, gfn); + + if (tlbsel == 1) { + struct vm_area_struct *vma; + down_read(¤t->mm->mmap_sem); + + vma = find_vma(current->mm, hva); + if (vma && hva >= vma->vm_start && + (vma->vm_flags & VM_PFNMAP)) { + /* + * This VMA is a physically contiguous region (e.g. + * /dev/mem) that bypasses normal Linux page + * management. Find the overlap between the + * vma and the memslot. + */ + + unsigned long start, end; + unsigned long slot_start, slot_end; + + pfnmap = 1; + + start = vma->vm_pgoff; + end = start + + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); + + pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT); + + slot_start = pfn - (gfn - slot->base_gfn); + slot_end = slot_start + slot->npages; + + if (start < slot_start) + start = slot_start; + if (end > slot_end) + end = slot_end; + + tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >> + MAS1_TSIZE_SHIFT; + + /* + * e500 doesn't implement the lowest tsize bit, + * or 1K pages. + */ + tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1); + + /* + * Now find the largest tsize (up to what the guest + * requested) that will cover gfn, stay within the + * range, and for which gfn and pfn are mutually + * aligned. + */ + + for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) { + unsigned long gfn_start, gfn_end, tsize_pages; + tsize_pages = 1 << (tsize - 2); + + gfn_start = gfn & ~(tsize_pages - 1); + gfn_end = gfn_start + tsize_pages; + + if (gfn_start + pfn - gfn < start) + continue; + if (gfn_end + pfn - gfn > end) + continue; + if ((gfn & (tsize_pages - 1)) != + (pfn & (tsize_pages - 1))) + continue; + + gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); + pfn &= ~(tsize_pages - 1); + break; + } + } else if (vma && hva >= vma->vm_start && + (vma->vm_flags & VM_HUGETLB)) { + unsigned long psize = vma_kernel_pagesize(vma); + + tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >> + MAS1_TSIZE_SHIFT; + + /* + * Take the largest page size that satisfies both host + * and guest mapping + */ + tsize = min(__ilog2(psize) - 10, tsize); + + /* + * e500 doesn't implement the lowest tsize bit, + * or 1K pages. + */ + tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1); + } + + up_read(¤t->mm->mmap_sem); + } + + if (likely(!pfnmap)) { + unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT); + pfn = gfn_to_pfn_memslot(slot, gfn); + if (is_error_noslot_pfn(pfn)) { + printk(KERN_ERR "Couldn't get real page for gfn %lx!\n", + (long)gfn); + return -EINVAL; + } + + /* Align guest and physical address to page map boundaries */ + pfn &= ~(tsize_pages - 1); + gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); + } + + /* Drop old ref and setup new one. */ + kvmppc_e500_ref_release(ref); + kvmppc_e500_ref_setup(ref, gtlbe, pfn); + + kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, + ref, gvaddr, stlbe); + + /* Clear i-cache for new pages */ + kvmppc_mmu_flush_icache(pfn); + + /* Drop refcount on page, so that mmu notifiers can clear it */ + kvm_release_pfn_clean(pfn); + + return 0; +} + +/* XXX only map the one-one case, for now use TLB0 */ +static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, int esel, + struct kvm_book3e_206_tlb_entry *stlbe) +{ + struct kvm_book3e_206_tlb_entry *gtlbe; + struct tlbe_ref *ref; + int stlbsel = 0; + int sesel = 0; + int r; + + gtlbe = get_entry(vcpu_e500, 0, esel); + ref = &vcpu_e500->gtlb_priv[0][esel].ref; + + r = kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe), + get_tlb_raddr(gtlbe) >> PAGE_SHIFT, + gtlbe, 0, stlbe, ref); + if (r) + return r; + + write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel); + + return 0; +} + +/* Caller must ensure that the specified guest TLB entry is safe to insert into + * the shadow TLB. */ +/* XXX for both one-one and one-to-many , for now use TLB1 */ +static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, + u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, + struct kvm_book3e_206_tlb_entry *stlbe, int esel) +{ + struct tlbe_ref *ref; + unsigned int sesel; + int r; + int stlbsel = 1; + + sesel = vcpu_e500->host_tlb1_nv++; + + if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size())) + vcpu_e500->host_tlb1_nv = 0; + + ref = &vcpu_e500->tlb_refs[1][sesel]; + r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, + ref); + if (r) + return r; + + vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel; + vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; + if (vcpu_e500->h2g_tlb1_rmap[sesel]) { + unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel]; + vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel); + } + vcpu_e500->h2g_tlb1_rmap[sesel] = esel; + + write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel); + + return 0; +} + +void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, + unsigned int index) +{ + struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); + struct tlbe_priv *priv; + struct kvm_book3e_206_tlb_entry *gtlbe, stlbe; + int tlbsel = tlbsel_of(index); + int esel = esel_of(index); + + gtlbe = get_entry(vcpu_e500, tlbsel, esel); + + switch (tlbsel) { + case 0: + priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; + + /* Triggers after clear_tlb_refs or on initial mapping */ + if (!(priv->ref.flags & E500_TLB_VALID)) { + kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe); + } else { + kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K, + &priv->ref, eaddr, &stlbe); + write_stlbe(vcpu_e500, gtlbe, &stlbe, 0, 0); + } + break; + + case 1: { + gfn_t gfn = gpaddr >> PAGE_SHIFT; + kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe, &stlbe, + esel); + break; + } + + default: + BUG(); + break; + } +} + +/************* MMU Notifiers *************/ + +int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) +{ + trace_kvm_unmap_hva(hva); + + /* + * Flush all shadow tlb entries everywhere. This is slow, but + * we are 100% sure that we catch the to be unmapped page + */ + kvm_flush_remote_tlbs(kvm); + + return 0; +} + +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) +{ + /* kvm_unmap_hva flushes everything anyways */ + kvm_unmap_hva(kvm, start); + + return 0; +} + +int kvm_age_hva(struct kvm *kvm, unsigned long hva) +{ + /* XXX could be more clever ;) */ + return 0; +} + +int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) +{ + /* XXX could be more clever ;) */ + return 0; +} + +void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) +{ + /* The page will get remapped properly on its next fault */ + kvm_unmap_hva(kvm, hva); +} + +/*****************************************/ + +int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500) +{ + host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY; + host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; + + /* + * This should never happen on real e500 hardware, but is + * architecturally possible -- e.g. in some weird nested + * virtualization case. + */ + if (host_tlb_params[0].entries == 0 || + host_tlb_params[1].entries == 0) { + pr_err("%s: need to know host tlb size\n", __func__); + return -ENODEV; + } + + host_tlb_params[0].ways = (mfspr(SPRN_TLB0CFG) & TLBnCFG_ASSOC) >> + TLBnCFG_ASSOC_SHIFT; + host_tlb_params[1].ways = host_tlb_params[1].entries; + + if (!is_power_of_2(host_tlb_params[0].entries) || + !is_power_of_2(host_tlb_params[0].ways) || + host_tlb_params[0].entries < host_tlb_params[0].ways || + host_tlb_params[0].ways == 0) { + pr_err("%s: bad tlb0 host config: %u entries %u ways\n", + __func__, host_tlb_params[0].entries, + host_tlb_params[0].ways); + return -ENODEV; + } + + host_tlb_params[0].sets = + host_tlb_params[0].entries / host_tlb_params[0].ways; + host_tlb_params[1].sets = 1; + + vcpu_e500->tlb_refs[0] = + kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries, + GFP_KERNEL); + if (!vcpu_e500->tlb_refs[0]) + goto err; + + vcpu_e500->tlb_refs[1] = + kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries, + GFP_KERNEL); + if (!vcpu_e500->tlb_refs[1]) + goto err; + + vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) * + host_tlb_params[1].entries, + GFP_KERNEL); + if (!vcpu_e500->h2g_tlb1_rmap) + goto err; + + return 0; + +err: + kfree(vcpu_e500->tlb_refs[0]); + kfree(vcpu_e500->tlb_refs[1]); + return -EINVAL; +} + +void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) +{ + kfree(vcpu_e500->h2g_tlb1_rmap); + kfree(vcpu_e500->tlb_refs[0]); + kfree(vcpu_e500->tlb_refs[1]); +} diff --git a/arch/powerpc/kvm/e500_mmu_host.h b/arch/powerpc/kvm/e500_mmu_host.h new file mode 100644 index 0000000..9e4d4a2 --- /dev/null +++ b/arch/powerpc/kvm/e500_mmu_host.h @@ -0,0 +1,20 @@ +/* + * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + */ + +#ifndef KVM_E500_MMU_HOST_H +#define KVM_E500_MMU_HOST_H + +void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, + int esel); + +void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500); +void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500); +int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500); +void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500); + +#endif /* KVM_E500_MMU_HOST_H */ diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c deleted file mode 100644 index 48d1a4f..0000000 --- a/arch/powerpc/kvm/e500_tlb.c +++ /dev/null @@ -1,1426 +0,0 @@ -/* - * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved. - * - * Author: Yu Liu, yu.liu@freescale.com - * Scott Wood, scottwood@freescale.com - * Ashish Kalra, ashish.kalra@freescale.com - * Varun Sethi, varun.sethi@freescale.com - * - * Description: - * This file is based on arch/powerpc/kvm/44x_tlb.c, - * by Hollis Blanchard . - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "e500.h" -#include "trace.h" -#include "timing.h" - -#define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1) - -static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM]; - -static inline unsigned int gtlb0_get_next_victim( - struct kvmppc_vcpu_e500 *vcpu_e500) -{ - unsigned int victim; - - victim = vcpu_e500->gtlb_nv[0]++; - if (unlikely(vcpu_e500->gtlb_nv[0] >= vcpu_e500->gtlb_params[0].ways)) - vcpu_e500->gtlb_nv[0] = 0; - - return victim; -} - -static inline unsigned int tlb1_max_shadow_size(void) -{ - /* reserve one entry for magic page */ - return host_tlb_params[1].entries - tlbcam_index - 1; -} - -static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe) -{ - return tlbe->mas7_3 & (MAS3_SW|MAS3_UW); -} - -static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode) -{ - /* Mask off reserved bits. */ - mas3 &= MAS3_ATTRIB_MASK; - -#ifndef CONFIG_KVM_BOOKE_HV - if (!usermode) { - /* Guest is in supervisor mode, - * so we need to translate guest - * supervisor permissions into user permissions. */ - mas3 &= ~E500_TLB_USER_PERM_MASK; - mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1; - } - mas3 |= E500_TLB_SUPER_PERM_MASK; -#endif - return mas3; -} - -static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode) -{ -#ifdef CONFIG_SMP - return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M; -#else - return mas2 & MAS2_ATTRIB_MASK; -#endif -} - -/* - * writing shadow tlb entry to host TLB - */ -static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe, - uint32_t mas0) -{ - unsigned long flags; - - local_irq_save(flags); - mtspr(SPRN_MAS0, mas0); - mtspr(SPRN_MAS1, stlbe->mas1); - mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2); - mtspr(SPRN_MAS3, (u32)stlbe->mas7_3); - mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32)); -#ifdef CONFIG_KVM_BOOKE_HV - mtspr(SPRN_MAS8, stlbe->mas8); -#endif - asm volatile("isync; tlbwe" : : : "memory"); - -#ifdef CONFIG_KVM_BOOKE_HV - /* Must clear mas8 for other host tlbwe's */ - mtspr(SPRN_MAS8, 0); - isync(); -#endif - local_irq_restore(flags); - - trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1, - stlbe->mas2, stlbe->mas7_3); -} - -/* - * Acquire a mas0 with victim hint, as if we just took a TLB miss. - * - * We don't care about the address we're searching for, other than that it's - * in the right set and is not present in the TLB. Using a zero PID and a - * userspace address means we don't have to set and then restore MAS5, or - * calculate a proper MAS6 value. - */ -static u32 get_host_mas0(unsigned long eaddr) -{ - unsigned long flags; - u32 mas0; - - local_irq_save(flags); - mtspr(SPRN_MAS6, 0); - asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET)); - mas0 = mfspr(SPRN_MAS0); - local_irq_restore(flags); - - return mas0; -} - -/* sesel is for tlb1 only */ -static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500, - int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe) -{ - u32 mas0; - - if (tlbsel == 0) { - mas0 = get_host_mas0(stlbe->mas2); - __write_host_tlbe(stlbe, mas0); - } else { - __write_host_tlbe(stlbe, - MAS0_TLBSEL(1) | - MAS0_ESEL(to_htlb1_esel(sesel))); - } -} - -/* sesel is for tlb1 only */ -static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500, - struct kvm_book3e_206_tlb_entry *gtlbe, - struct kvm_book3e_206_tlb_entry *stlbe, - int stlbsel, int sesel) -{ - int stid; - - preempt_disable(); - stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe); - - stlbe->mas1 |= MAS1_TID(stid); - write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe); - preempt_enable(); -} - -#ifdef CONFIG_KVM_E500V2 -void kvmppc_map_magic(struct kvm_vcpu *vcpu) -{ - struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); - struct kvm_book3e_206_tlb_entry magic; - ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; - unsigned int stid; - pfn_t pfn; - - pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT; - get_page(pfn_to_page(pfn)); - - preempt_disable(); - stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0); - - magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) | - MAS1_TSIZE(BOOK3E_PAGESZ_4K); - magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M; - magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) | - MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR; - magic.mas8 = 0; - - __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index)); - preempt_enable(); -} -#endif - -static void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, - int tlbsel, int esel) -{ - struct kvm_book3e_206_tlb_entry *gtlbe = - get_entry(vcpu_e500, tlbsel, esel); - struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref; - - /* Don't bother with unmapped entries */ - if (!(ref->flags & E500_TLB_VALID)) - return; - - if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) { - u64 tmp = vcpu_e500->g2h_tlb1_map[esel]; - int hw_tlb_indx; - unsigned long flags; - - local_irq_save(flags); - while (tmp) { - hw_tlb_indx = __ilog2_u64(tmp & -tmp); - mtspr(SPRN_MAS0, - MAS0_TLBSEL(1) | - MAS0_ESEL(to_htlb1_esel(hw_tlb_indx))); - mtspr(SPRN_MAS1, 0); - asm volatile("tlbwe"); - vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0; - tmp &= tmp - 1; - } - mb(); - vcpu_e500->g2h_tlb1_map[esel] = 0; - ref->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID); - local_irq_restore(flags); - - return; - } - - /* Guest tlbe is backed by at most one host tlbe per shadow pid. */ - kvmppc_e500_tlbil_one(vcpu_e500, gtlbe); - - /* Mark the TLB as not backed by the host anymore */ - ref->flags &= ~E500_TLB_VALID; -} - -static int tlb0_set_base(gva_t addr, int sets, int ways) -{ - int set_base; - - set_base = (addr >> PAGE_SHIFT) & (sets - 1); - set_base *= ways; - - return set_base; -} - -static int gtlb0_set_base(struct kvmppc_vcpu_e500 *vcpu_e500, gva_t addr) -{ - return tlb0_set_base(addr, vcpu_e500->gtlb_params[0].sets, - vcpu_e500->gtlb_params[0].ways); -} - -static unsigned int get_tlb_esel(struct kvm_vcpu *vcpu, int tlbsel) -{ - struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); - int esel = get_tlb_esel_bit(vcpu); - - if (tlbsel == 0) { - esel &= vcpu_e500->gtlb_params[0].ways - 1; - esel += gtlb0_set_base(vcpu_e500, vcpu->arch.shared->mas2); - } else { - esel &= vcpu_e500->gtlb_params[tlbsel].entries - 1; - } - - return esel; -} - -/* Search the guest TLB for a matching entry. */ -static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500, - gva_t eaddr, int tlbsel, unsigned int pid, int as) -{ - int size = vcpu_e500->gtlb_params[tlbsel].entries; - unsigned int set_base, offset; - int i; - - if (tlbsel == 0) { - set_base = gtlb0_set_base(vcpu_e500, eaddr); - size = vcpu_e500->gtlb_params[0].ways; - } else { - if (eaddr < vcpu_e500->tlb1_min_eaddr || - eaddr > vcpu_e500->tlb1_max_eaddr) - return -1; - set_base = 0; - } - - offset = vcpu_e500->gtlb_offset[tlbsel]; - - for (i = 0; i < size; i++) { - struct kvm_book3e_206_tlb_entry *tlbe = - &vcpu_e500->gtlb_arch[offset + set_base + i]; - unsigned int tid; - - if (eaddr < get_tlb_eaddr(tlbe)) - continue; - - if (eaddr > get_tlb_end(tlbe)) - continue; - - tid = get_tlb_tid(tlbe); - if (tid && (tid != pid)) - continue; - - if (!get_tlb_v(tlbe)) - continue; - - if (get_tlb_ts(tlbe) != as && as != -1) - continue; - - return set_base + i; - } - - return -1; -} - -static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, - struct kvm_book3e_206_tlb_entry *gtlbe, - pfn_t pfn) -{ - ref->pfn = pfn; - ref->flags = E500_TLB_VALID; - - if (tlbe_is_writable(gtlbe)) - kvm_set_pfn_dirty(pfn); -} - -static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) -{ - if (ref->flags & E500_TLB_VALID) { - trace_kvm_booke206_ref_release(ref->pfn, ref->flags); - ref->flags = 0; - } -} - -static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500) -{ - if (vcpu_e500->g2h_tlb1_map) - memset(vcpu_e500->g2h_tlb1_map, 0, - sizeof(u64) * vcpu_e500->gtlb_params[1].entries); - if (vcpu_e500->h2g_tlb1_rmap) - memset(vcpu_e500->h2g_tlb1_rmap, 0, - sizeof(unsigned int) * host_tlb_params[1].entries); -} - -static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) -{ - int tlbsel = 0; - int i; - - for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) { - struct tlbe_ref *ref = - &vcpu_e500->gtlb_priv[tlbsel][i].ref; - kvmppc_e500_ref_release(ref); - } -} - -static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500) -{ - int stlbsel = 1; - int i; - - kvmppc_e500_tlbil_all(vcpu_e500); - - for (i = 0; i < host_tlb_params[stlbsel].entries; i++) { - struct tlbe_ref *ref = - &vcpu_e500->tlb_refs[stlbsel][i]; - kvmppc_e500_ref_release(ref); - } - - clear_tlb_privs(vcpu_e500); -} - -void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu) -{ - struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); - clear_tlb_refs(vcpu_e500); - clear_tlb1_bitmap(vcpu_e500); -} - -static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu, - unsigned int eaddr, int as) -{ - struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); - unsigned int victim, tsized; - int tlbsel; - - /* since we only have two TLBs, only lower bit is used. */ - tlbsel = (vcpu->arch.shared->mas4 >> 28) & 0x1; - victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0; - tsized = (vcpu->arch.shared->mas4 >> 7) & 0x1f; - - vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim) - | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); - vcpu->arch.shared->mas1 = MAS1_VALID | (as ? MAS1_TS : 0) - | MAS1_TID(get_tlbmiss_tid(vcpu)) - | MAS1_TSIZE(tsized); - vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN) - | (vcpu->arch.shared->mas4 & MAS2_ATTRIB_MASK); - vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3; - vcpu->arch.shared->mas6 = (vcpu->arch.shared->mas6 & MAS6_SPID1) - | (get_cur_pid(vcpu) << 16) - | (as ? MAS6_SAS : 0); -} - -/* TID must be supplied by the caller */ -static inline void kvmppc_e500_setup_stlbe( - struct kvm_vcpu *vcpu, - struct kvm_book3e_206_tlb_entry *gtlbe, - int tsize, struct tlbe_ref *ref, u64 gvaddr, - struct kvm_book3e_206_tlb_entry *stlbe) -{ - pfn_t pfn = ref->pfn; - u32 pr = vcpu->arch.shared->msr & MSR_PR; - - BUG_ON(!(ref->flags & E500_TLB_VALID)); - - /* Force IPROT=0 for all guest mappings. */ - stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID; - stlbe->mas2 = (gvaddr & MAS2_EPN) | - e500_shadow_mas2_attrib(gtlbe->mas2, pr); - stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) | - e500_shadow_mas3_attrib(gtlbe->mas7_3, pr); - -#ifdef CONFIG_KVM_BOOKE_HV - stlbe->mas8 = MAS8_TGS | vcpu->kvm->arch.lpid; -#endif -} - -static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, - u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, - int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe, - struct tlbe_ref *ref) -{ - struct kvm_memory_slot *slot; - unsigned long pfn = 0; /* silence GCC warning */ - unsigned long hva; - int pfnmap = 0; - int tsize = BOOK3E_PAGESZ_4K; - - /* - * Translate guest physical to true physical, acquiring - * a page reference if it is normal, non-reserved memory. - * - * gfn_to_memslot() must succeed because otherwise we wouldn't - * have gotten this far. Eventually we should just pass the slot - * pointer through from the first lookup. - */ - slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn); - hva = gfn_to_hva_memslot(slot, gfn); - - if (tlbsel == 1) { - struct vm_area_struct *vma; - down_read(¤t->mm->mmap_sem); - - vma = find_vma(current->mm, hva); - if (vma && hva >= vma->vm_start && - (vma->vm_flags & VM_PFNMAP)) { - /* - * This VMA is a physically contiguous region (e.g. - * /dev/mem) that bypasses normal Linux page - * management. Find the overlap between the - * vma and the memslot. - */ - - unsigned long start, end; - unsigned long slot_start, slot_end; - - pfnmap = 1; - - start = vma->vm_pgoff; - end = start + - ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); - - pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT); - - slot_start = pfn - (gfn - slot->base_gfn); - slot_end = slot_start + slot->npages; - - if (start < slot_start) - start = slot_start; - if (end > slot_end) - end = slot_end; - - tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >> - MAS1_TSIZE_SHIFT; - - /* - * e500 doesn't implement the lowest tsize bit, - * or 1K pages. - */ - tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1); - - /* - * Now find the largest tsize (up to what the guest - * requested) that will cover gfn, stay within the - * range, and for which gfn and pfn are mutually - * aligned. - */ - - for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) { - unsigned long gfn_start, gfn_end, tsize_pages; - tsize_pages = 1 << (tsize - 2); - - gfn_start = gfn & ~(tsize_pages - 1); - gfn_end = gfn_start + tsize_pages; - - if (gfn_start + pfn - gfn < start) - continue; - if (gfn_end + pfn - gfn > end) - continue; - if ((gfn & (tsize_pages - 1)) != - (pfn & (tsize_pages - 1))) - continue; - - gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); - pfn &= ~(tsize_pages - 1); - break; - } - } else if (vma && hva >= vma->vm_start && - (vma->vm_flags & VM_HUGETLB)) { - unsigned long psize = vma_kernel_pagesize(vma); - - tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >> - MAS1_TSIZE_SHIFT; - - /* - * Take the largest page size that satisfies both host - * and guest mapping - */ - tsize = min(__ilog2(psize) - 10, tsize); - - /* - * e500 doesn't implement the lowest tsize bit, - * or 1K pages. - */ - tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1); - } - - up_read(¤t->mm->mmap_sem); - } - - if (likely(!pfnmap)) { - unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT); - pfn = gfn_to_pfn_memslot(slot, gfn); - if (is_error_noslot_pfn(pfn)) { - printk(KERN_ERR "Couldn't get real page for gfn %lx!\n", - (long)gfn); - return -EINVAL; - } - - /* Align guest and physical address to page map boundaries */ - pfn &= ~(tsize_pages - 1); - gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); - } - - /* Drop old ref and setup new one. */ - kvmppc_e500_ref_release(ref); - kvmppc_e500_ref_setup(ref, gtlbe, pfn); - - kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, - ref, gvaddr, stlbe); - - /* Clear i-cache for new pages */ - kvmppc_mmu_flush_icache(pfn); - - /* Drop refcount on page, so that mmu notifiers can clear it */ - kvm_release_pfn_clean(pfn); - - return 0; -} - -/* XXX only map the one-one case, for now use TLB0 */ -static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, - int esel, - struct kvm_book3e_206_tlb_entry *stlbe) -{ - struct kvm_book3e_206_tlb_entry *gtlbe; - struct tlbe_ref *ref; - int stlbsel = 0; - int sesel = 0; - int r; - - gtlbe = get_entry(vcpu_e500, 0, esel); - ref = &vcpu_e500->gtlb_priv[0][esel].ref; - - r = kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe), - get_tlb_raddr(gtlbe) >> PAGE_SHIFT, - gtlbe, 0, stlbe, ref); - if (r) - return r; - - write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel); - - return 0; -} - -/* Caller must ensure that the specified guest TLB entry is safe to insert into - * the shadow TLB. */ -/* XXX for both one-one and one-to-many , for now use TLB1 */ -static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, - u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, - struct kvm_book3e_206_tlb_entry *stlbe, int esel) -{ - struct tlbe_ref *ref; - unsigned int sesel; - int r; - int stlbsel = 1; - - sesel = vcpu_e500->host_tlb1_nv++; - - if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size())) - vcpu_e500->host_tlb1_nv = 0; - - ref = &vcpu_e500->tlb_refs[1][sesel]; - r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, - ref); - if (r) - return r; - - vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel; - vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; - if (vcpu_e500->h2g_tlb1_rmap[sesel]) { - unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel]; - vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel); - } - vcpu_e500->h2g_tlb1_rmap[sesel] = esel; - - write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel); - - return 0; -} - -static void kvmppc_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500) -{ - int size = vcpu_e500->gtlb_params[1].entries; - unsigned int offset; - gva_t eaddr; - int i; - - vcpu_e500->tlb1_min_eaddr = ~0UL; - vcpu_e500->tlb1_max_eaddr = 0; - offset = vcpu_e500->gtlb_offset[1]; - - for (i = 0; i < size; i++) { - struct kvm_book3e_206_tlb_entry *tlbe = - &vcpu_e500->gtlb_arch[offset + i]; - - if (!get_tlb_v(tlbe)) - continue; - - eaddr = get_tlb_eaddr(tlbe); - vcpu_e500->tlb1_min_eaddr = - min(vcpu_e500->tlb1_min_eaddr, eaddr); - - eaddr = get_tlb_end(tlbe); - vcpu_e500->tlb1_max_eaddr = - max(vcpu_e500->tlb1_max_eaddr, eaddr); - } -} - -static int kvmppc_need_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500, - struct kvm_book3e_206_tlb_entry *gtlbe) -{ - unsigned long start, end, size; - - size = get_tlb_bytes(gtlbe); - start = get_tlb_eaddr(gtlbe) & ~(size - 1); - end = start + size - 1; - - return vcpu_e500->tlb1_min_eaddr == start || - vcpu_e500->tlb1_max_eaddr == end; -} - -/* This function is supposed to be called for a adding a new valid tlb entry */ -static void kvmppc_set_tlb1map_range(struct kvm_vcpu *vcpu, - struct kvm_book3e_206_tlb_entry *gtlbe) -{ - unsigned long start, end, size; - struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); - - if (!get_tlb_v(gtlbe)) - return; - - size = get_tlb_bytes(gtlbe); - start = get_tlb_eaddr(gtlbe) & ~(size - 1); - end = start + size - 1; - - vcpu_e500->tlb1_min_eaddr = min(vcpu_e500->tlb1_min_eaddr, start); - vcpu_e500->tlb1_max_eaddr = max(vcpu_e500->tlb1_max_eaddr, end); -} - -static inline int kvmppc_e500_gtlbe_invalidate( - struct kvmppc_vcpu_e500 *vcpu_e500, - int tlbsel, int esel) -{ - struct kvm_book3e_206_tlb_entry *gtlbe = - get_entry(vcpu_e500, tlbsel, esel); - - if (unlikely(get_tlb_iprot(gtlbe))) - return -1; - - if (tlbsel == 1 && kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe)) - kvmppc_recalc_tlb1map_range(vcpu_e500); - - gtlbe->mas1 = 0; - - return 0; -} - -int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value) -{ - int esel; - - if (value & MMUCSR0_TLB0FI) - for (esel = 0; esel < vcpu_e500->gtlb_params[0].entries; esel++) - kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel); - if (value & MMUCSR0_TLB1FI) - for (esel = 0; esel < vcpu_e500->gtlb_params[1].entries; esel++) - kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel); - - /* Invalidate all vcpu id mappings */ - kvmppc_e500_tlbil_all(vcpu_e500); - - return EMULATE_DONE; -} - -int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, gva_t ea) -{ - struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); - unsigned int ia; - int esel, tlbsel; - - ia = (ea >> 2) & 0x1; - - /* since we only have two TLBs, only lower bit is used. */ - tlbsel = (ea >> 3) & 0x1; - - if (ia) { - /* invalidate all entries */ - for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; - esel++) - kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); - } else { - ea &= 0xfffff000; - esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, - get_cur_pid(vcpu), -1); - if (esel >= 0) - kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); - } - - /* Invalidate all vcpu id mappings */ - kvmppc_e500_tlbil_all(vcpu_e500); - - return EMULATE_DONE; -} - -static void tlbilx_all(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, - int pid, int type) -{ - struct kvm_book3e_206_tlb_entry *tlbe; - int tid, esel; - - /* invalidate all entries */ - for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; esel++) { - tlbe = get_entry(vcpu_e500, tlbsel, esel); - tid = get_tlb_tid(tlbe); - if (type == 0 || tid == pid) { - inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); - kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); - } - } -} - -static void tlbilx_one(struct kvmppc_vcpu_e500 *vcpu_e500, int pid, - gva_t ea) -{ - int tlbsel, esel; - - for (tlbsel = 0; tlbsel < 2; tlbsel++) { - esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, -1); - if (esel >= 0) { - inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); - kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); - break; - } - } -} - -int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int type, gva_t ea) -{ - struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); - int pid = get_cur_spid(vcpu); - - if (type == 0 || type == 1) { - tlbilx_all(vcpu_e500, 0, pid, type); - tlbilx_all(vcpu_e500, 1, pid, type); - } else if (type == 3) { - tlbilx_one(vcpu_e500, pid, ea); - } - - return EMULATE_DONE; -} - -int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu) -{ - struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); - int tlbsel, esel; - struct kvm_book3e_206_tlb_entry *gtlbe; - - tlbsel = get_tlb_tlbsel(vcpu); - esel = get_tlb_esel(vcpu, tlbsel); - - gtlbe = get_entry(vcpu_e500, tlbsel, esel); - vcpu->arch.shared->mas0 &= ~MAS0_NV(~0); - vcpu->arch.shared->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); - vcpu->arch.shared->mas1 = gtlbe->mas1; - vcpu->arch.shared->mas2 = gtlbe->mas2; - vcpu->arch.shared->mas7_3 = gtlbe->mas7_3; - - return EMULATE_DONE; -} - -int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, gva_t ea) -{ - struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); - int as = !!get_cur_sas(vcpu); - unsigned int pid = get_cur_spid(vcpu); - int esel, tlbsel; - struct kvm_book3e_206_tlb_entry *gtlbe = NULL; - - for (tlbsel = 0; tlbsel < 2; tlbsel++) { - esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as); - if (esel >= 0) { - gtlbe = get_entry(vcpu_e500, tlbsel, esel); - break; - } - } - - if (gtlbe) { - esel &= vcpu_e500->gtlb_params[tlbsel].ways - 1; - - vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel) - | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); - vcpu->arch.shared->mas1 = gtlbe->mas1; - vcpu->arch.shared->mas2 = gtlbe->mas2; - vcpu->arch.shared->mas7_3 = gtlbe->mas7_3; - } else { - int victim; - - /* since we only have two TLBs, only lower bit is used. */ - tlbsel = vcpu->arch.shared->mas4 >> 28 & 0x1; - victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0; - - vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) - | MAS0_ESEL(victim) - | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); - vcpu->arch.shared->mas1 = - (vcpu->arch.shared->mas6 & MAS6_SPID0) - | (vcpu->arch.shared->mas6 & (MAS6_SAS ? MAS1_TS : 0)) - | (vcpu->arch.shared->mas4 & MAS4_TSIZED(~0)); - vcpu->arch.shared->mas2 &= MAS2_EPN; - vcpu->arch.shared->mas2 |= vcpu->arch.shared->mas4 & - MAS2_ATTRIB_MASK; - vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 | - MAS3_U2 | MAS3_U3; - } - - kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS); - return EMULATE_DONE; -} - -int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) -{ - struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); - struct kvm_book3e_206_tlb_entry *gtlbe; - int tlbsel, esel; - int recal = 0; - - tlbsel = get_tlb_tlbsel(vcpu); - esel = get_tlb_esel(vcpu, tlbsel); - - gtlbe = get_entry(vcpu_e500, tlbsel, esel); - - if (get_tlb_v(gtlbe)) { - inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); - if ((tlbsel == 1) && - kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe)) - recal = 1; - } - - gtlbe->mas1 = vcpu->arch.shared->mas1; - gtlbe->mas2 = vcpu->arch.shared->mas2; - if (!(vcpu->arch.shared->msr & MSR_CM)) - gtlbe->mas2 &= 0xffffffffUL; - gtlbe->mas7_3 = vcpu->arch.shared->mas7_3; - - trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1, - gtlbe->mas2, gtlbe->mas7_3); - - if (tlbsel == 1) { - /* - * If a valid tlb1 entry is overwritten then recalculate the - * min/max TLB1 map address range otherwise no need to look - * in tlb1 array. - */ - if (recal) - kvmppc_recalc_tlb1map_range(vcpu_e500); - else - kvmppc_set_tlb1map_range(vcpu, gtlbe); - } - - /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ - if (tlbe_is_host_safe(vcpu, gtlbe)) { - u64 eaddr = get_tlb_eaddr(gtlbe); - u64 raddr = get_tlb_raddr(gtlbe); - - if (tlbsel == 0) { - gtlbe->mas1 &= ~MAS1_TSIZE(~0); - gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K); - } - - /* Premap the faulting page */ - kvmppc_mmu_map(vcpu, eaddr, raddr, index_of(tlbsel, esel)); - } - - kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS); - return EMULATE_DONE; -} - -static int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu, - gva_t eaddr, unsigned int pid, int as) -{ - struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); - int esel, tlbsel; - - for (tlbsel = 0; tlbsel < 2; tlbsel++) { - esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as); - if (esel >= 0) - return index_of(tlbsel, esel); - } - - return -1; -} - -/* 'linear_address' is actually an encoding of AS|PID|EADDR . */ -int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, - struct kvm_translation *tr) -{ - int index; - gva_t eaddr; - u8 pid; - u8 as; - - eaddr = tr->linear_address; - pid = (tr->linear_address >> 32) & 0xff; - as = (tr->linear_address >> 40) & 0x1; - - index = kvmppc_e500_tlb_search(vcpu, eaddr, pid, as); - if (index < 0) { - tr->valid = 0; - return 0; - } - - tr->physical_address = kvmppc_mmu_xlate(vcpu, index, eaddr); - /* XXX what does "writeable" and "usermode" even mean? */ - tr->valid = 1; - - return 0; -} - - -int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) -{ - unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); - - return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as); -} - -int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) -{ - unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS); - - return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as); -} - -void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu) -{ - unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); - - kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as); -} - -void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu) -{ - unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS); - - kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as); -} - -gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index, - gva_t eaddr) -{ - struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); - struct kvm_book3e_206_tlb_entry *gtlbe; - u64 pgmask; - - gtlbe = get_entry(vcpu_e500, tlbsel_of(index), esel_of(index)); - pgmask = get_tlb_bytes(gtlbe) - 1; - - return get_tlb_raddr(gtlbe) | (eaddr & pgmask); -} - -void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) -{ -} - -void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, - unsigned int index) -{ - struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); - struct tlbe_priv *priv; - struct kvm_book3e_206_tlb_entry *gtlbe, stlbe; - int tlbsel = tlbsel_of(index); - int esel = esel_of(index); - - gtlbe = get_entry(vcpu_e500, tlbsel, esel); - - switch (tlbsel) { - case 0: - priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; - - /* Triggers after clear_tlb_refs or on initial mapping */ - if (!(priv->ref.flags & E500_TLB_VALID)) { - kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe); - } else { - kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K, - &priv->ref, eaddr, &stlbe); - write_stlbe(vcpu_e500, gtlbe, &stlbe, 0, 0); - } - break; - - case 1: { - gfn_t gfn = gpaddr >> PAGE_SHIFT; - kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe, &stlbe, - esel); - break; - } - - default: - BUG(); - break; - } -} - -/************* MMU Notifiers *************/ - -int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) -{ - trace_kvm_unmap_hva(hva); - - /* - * Flush all shadow tlb entries everywhere. This is slow, but - * we are 100% sure that we catch the to be unmapped page - */ - kvm_flush_remote_tlbs(kvm); - - return 0; -} - -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) -{ - /* kvm_unmap_hva flushes everything anyways */ - kvm_unmap_hva(kvm, start); - - return 0; -} - -int kvm_age_hva(struct kvm *kvm, unsigned long hva) -{ - /* XXX could be more clever ;) */ - return 0; -} - -int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) -{ - /* XXX could be more clever ;) */ - return 0; -} - -void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) -{ - /* The page will get remapped properly on its next fault */ - kvm_unmap_hva(kvm, hva); -} - -/*****************************************/ - -static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500) -{ - int i; - - clear_tlb1_bitmap(vcpu_e500); - kfree(vcpu_e500->g2h_tlb1_map); - - clear_tlb_refs(vcpu_e500); - kfree(vcpu_e500->gtlb_priv[0]); - kfree(vcpu_e500->gtlb_priv[1]); - - if (vcpu_e500->shared_tlb_pages) { - vfree((void *)(round_down((uintptr_t)vcpu_e500->gtlb_arch, - PAGE_SIZE))); - - for (i = 0; i < vcpu_e500->num_shared_tlb_pages; i++) { - set_page_dirty_lock(vcpu_e500->shared_tlb_pages[i]); - put_page(vcpu_e500->shared_tlb_pages[i]); - } - - vcpu_e500->num_shared_tlb_pages = 0; - - kfree(vcpu_e500->shared_tlb_pages); - vcpu_e500->shared_tlb_pages = NULL; - } else { - kfree(vcpu_e500->gtlb_arch); - } - - vcpu_e500->gtlb_arch = NULL; -} - -void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) -{ - sregs->u.e.mas0 = vcpu->arch.shared->mas0; - sregs->u.e.mas1 = vcpu->arch.shared->mas1; - sregs->u.e.mas2 = vcpu->arch.shared->mas2; - sregs->u.e.mas7_3 = vcpu->arch.shared->mas7_3; - sregs->u.e.mas4 = vcpu->arch.shared->mas4; - sregs->u.e.mas6 = vcpu->arch.shared->mas6; - - sregs->u.e.mmucfg = vcpu->arch.mmucfg; - sregs->u.e.tlbcfg[0] = vcpu->arch.tlbcfg[0]; - sregs->u.e.tlbcfg[1] = vcpu->arch.tlbcfg[1]; - sregs->u.e.tlbcfg[2] = 0; - sregs->u.e.tlbcfg[3] = 0; -} - -int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) -{ - if (sregs->u.e.features & KVM_SREGS_E_ARCH206_MMU) { - vcpu->arch.shared->mas0 = sregs->u.e.mas0; - vcpu->arch.shared->mas1 = sregs->u.e.mas1; - vcpu->arch.shared->mas2 = sregs->u.e.mas2; - vcpu->arch.shared->mas7_3 = sregs->u.e.mas7_3; - vcpu->arch.shared->mas4 = sregs->u.e.mas4; - vcpu->arch.shared->mas6 = sregs->u.e.mas6; - } - - return 0; -} - -int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu, - struct kvm_config_tlb *cfg) -{ - struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); - struct kvm_book3e_206_tlb_params params; - char *virt; - struct page **pages; - struct tlbe_priv *privs[2] = {}; - u64 *g2h_bitmap = NULL; - size_t array_len; - u32 sets; - int num_pages, ret, i; - - if (cfg->mmu_type != KVM_MMU_FSL_BOOKE_NOHV) - return -EINVAL; - - if (copy_from_user(¶ms, (void __user *)(uintptr_t)cfg->params, - sizeof(params))) - return -EFAULT; - - if (params.tlb_sizes[1] > 64) - return -EINVAL; - if (params.tlb_ways[1] != params.tlb_sizes[1]) - return -EINVAL; - if (params.tlb_sizes[2] != 0 || params.tlb_sizes[3] != 0) - return -EINVAL; - if (params.tlb_ways[2] != 0 || params.tlb_ways[3] != 0) - return -EINVAL; - - if (!is_power_of_2(params.tlb_ways[0])) - return -EINVAL; - - sets = params.tlb_sizes[0] >> ilog2(params.tlb_ways[0]); - if (!is_power_of_2(sets)) - return -EINVAL; - - array_len = params.tlb_sizes[0] + params.tlb_sizes[1]; - array_len *= sizeof(struct kvm_book3e_206_tlb_entry); - - if (cfg->array_len < array_len) - return -EINVAL; - - num_pages = DIV_ROUND_UP(cfg->array + array_len - 1, PAGE_SIZE) - - cfg->array / PAGE_SIZE; - pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL); - if (!pages) - return -ENOMEM; - - ret = get_user_pages_fast(cfg->array, num_pages, 1, pages); - if (ret < 0) - goto err_pages; - - if (ret != num_pages) { - num_pages = ret; - ret = -EFAULT; - goto err_put_page; - } - - virt = vmap(pages, num_pages, VM_MAP, PAGE_KERNEL); - if (!virt) { - ret = -ENOMEM; - goto err_put_page; - } - - privs[0] = kzalloc(sizeof(struct tlbe_priv) * params.tlb_sizes[0], - GFP_KERNEL); - privs[1] = kzalloc(sizeof(struct tlbe_priv) * params.tlb_sizes[1], - GFP_KERNEL); - - if (!privs[0] || !privs[1]) { - ret = -ENOMEM; - goto err_privs; - } - - g2h_bitmap = kzalloc(sizeof(u64) * params.tlb_sizes[1], - GFP_KERNEL); - if (!g2h_bitmap) { - ret = -ENOMEM; - goto err_privs; - } - - free_gtlb(vcpu_e500); - - vcpu_e500->gtlb_priv[0] = privs[0]; - vcpu_e500->gtlb_priv[1] = privs[1]; - vcpu_e500->g2h_tlb1_map = g2h_bitmap; - - vcpu_e500->gtlb_arch = (struct kvm_book3e_206_tlb_entry *) - (virt + (cfg->array & (PAGE_SIZE - 1))); - - vcpu_e500->gtlb_params[0].entries = params.tlb_sizes[0]; - vcpu_e500->gtlb_params[1].entries = params.tlb_sizes[1]; - - vcpu_e500->gtlb_offset[0] = 0; - vcpu_e500->gtlb_offset[1] = params.tlb_sizes[0]; - - vcpu->arch.mmucfg = mfspr(SPRN_MMUCFG) & ~MMUCFG_LPIDSIZE; - - vcpu->arch.tlbcfg[0] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); - if (params.tlb_sizes[0] <= 2048) - vcpu->arch.tlbcfg[0] |= params.tlb_sizes[0]; - vcpu->arch.tlbcfg[0] |= params.tlb_ways[0] << TLBnCFG_ASSOC_SHIFT; - - vcpu->arch.tlbcfg[1] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); - vcpu->arch.tlbcfg[1] |= params.tlb_sizes[1]; - vcpu->arch.tlbcfg[1] |= params.tlb_ways[1] << TLBnCFG_ASSOC_SHIFT; - - vcpu_e500->shared_tlb_pages = pages; - vcpu_e500->num_shared_tlb_pages = num_pages; - - vcpu_e500->gtlb_params[0].ways = params.tlb_ways[0]; - vcpu_e500->gtlb_params[0].sets = sets; - - vcpu_e500->gtlb_params[1].ways = params.tlb_sizes[1]; - vcpu_e500->gtlb_params[1].sets = 1; - - kvmppc_recalc_tlb1map_range(vcpu_e500); - return 0; - -err_privs: - kfree(privs[0]); - kfree(privs[1]); - -err_put_page: - for (i = 0; i < num_pages; i++) - put_page(pages[i]); - -err_pages: - kfree(pages); - return ret; -} - -int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu, - struct kvm_dirty_tlb *dirty) -{ - struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); - kvmppc_recalc_tlb1map_range(vcpu_e500); - clear_tlb_refs(vcpu_e500); - return 0; -} - -int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500) -{ - struct kvm_vcpu *vcpu = &vcpu_e500->vcpu; - int entry_size = sizeof(struct kvm_book3e_206_tlb_entry); - int entries = KVM_E500_TLB0_SIZE + KVM_E500_TLB1_SIZE; - - host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY; - host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; - - /* - * This should never happen on real e500 hardware, but is - * architecturally possible -- e.g. in some weird nested - * virtualization case. - */ - if (host_tlb_params[0].entries == 0 || - host_tlb_params[1].entries == 0) { - pr_err("%s: need to know host tlb size\n", __func__); - return -ENODEV; - } - - host_tlb_params[0].ways = (mfspr(SPRN_TLB0CFG) & TLBnCFG_ASSOC) >> - TLBnCFG_ASSOC_SHIFT; - host_tlb_params[1].ways = host_tlb_params[1].entries; - - if (!is_power_of_2(host_tlb_params[0].entries) || - !is_power_of_2(host_tlb_params[0].ways) || - host_tlb_params[0].entries < host_tlb_params[0].ways || - host_tlb_params[0].ways == 0) { - pr_err("%s: bad tlb0 host config: %u entries %u ways\n", - __func__, host_tlb_params[0].entries, - host_tlb_params[0].ways); - return -ENODEV; - } - - host_tlb_params[0].sets = - host_tlb_params[0].entries / host_tlb_params[0].ways; - host_tlb_params[1].sets = 1; - - vcpu_e500->gtlb_params[0].entries = KVM_E500_TLB0_SIZE; - vcpu_e500->gtlb_params[1].entries = KVM_E500_TLB1_SIZE; - - vcpu_e500->gtlb_params[0].ways = KVM_E500_TLB0_WAY_NUM; - vcpu_e500->gtlb_params[0].sets = - KVM_E500_TLB0_SIZE / KVM_E500_TLB0_WAY_NUM; - - vcpu_e500->gtlb_params[1].ways = KVM_E500_TLB1_SIZE; - vcpu_e500->gtlb_params[1].sets = 1; - - vcpu_e500->gtlb_arch = kmalloc(entries * entry_size, GFP_KERNEL); - if (!vcpu_e500->gtlb_arch) - return -ENOMEM; - - vcpu_e500->gtlb_offset[0] = 0; - vcpu_e500->gtlb_offset[1] = KVM_E500_TLB0_SIZE; - - vcpu_e500->tlb_refs[0] = - kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries, - GFP_KERNEL); - if (!vcpu_e500->tlb_refs[0]) - goto err; - - vcpu_e500->tlb_refs[1] = - kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries, - GFP_KERNEL); - if (!vcpu_e500->tlb_refs[1]) - goto err; - - vcpu_e500->gtlb_priv[0] = kzalloc(sizeof(struct tlbe_ref) * - vcpu_e500->gtlb_params[0].entries, - GFP_KERNEL); - if (!vcpu_e500->gtlb_priv[0]) - goto err; - - vcpu_e500->gtlb_priv[1] = kzalloc(sizeof(struct tlbe_ref) * - vcpu_e500->gtlb_params[1].entries, - GFP_KERNEL); - if (!vcpu_e500->gtlb_priv[1]) - goto err; - - vcpu_e500->g2h_tlb1_map = kzalloc(sizeof(u64) * - vcpu_e500->gtlb_params[1].entries, - GFP_KERNEL); - if (!vcpu_e500->g2h_tlb1_map) - goto err; - - vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) * - host_tlb_params[1].entries, - GFP_KERNEL); - if (!vcpu_e500->h2g_tlb1_rmap) - goto err; - - /* Init TLB configuration register */ - vcpu->arch.tlbcfg[0] = mfspr(SPRN_TLB0CFG) & - ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); - vcpu->arch.tlbcfg[0] |= vcpu_e500->gtlb_params[0].entries; - vcpu->arch.tlbcfg[0] |= - vcpu_e500->gtlb_params[0].ways << TLBnCFG_ASSOC_SHIFT; - - vcpu->arch.tlbcfg[1] = mfspr(SPRN_TLB1CFG) & - ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); - vcpu->arch.tlbcfg[1] |= vcpu_e500->gtlb_params[1].entries; - vcpu->arch.tlbcfg[1] |= - vcpu_e500->gtlb_params[1].ways << TLBnCFG_ASSOC_SHIFT; - - kvmppc_recalc_tlb1map_range(vcpu_e500); - return 0; - -err: - free_gtlb(vcpu_e500); - kfree(vcpu_e500->tlb_refs[0]); - kfree(vcpu_e500->tlb_refs[1]); - return -1; -} - -void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) -{ - free_gtlb(vcpu_e500); - kfree(vcpu_e500->h2g_tlb1_rmap); - kfree(vcpu_e500->tlb_refs[0]); - kfree(vcpu_e500->tlb_refs[1]); -} -- cgit v0.10.2 From c015c62b13498629809185eb0ff04e3f13d1afb6 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Thu, 17 Jan 2013 17:54:36 +0100 Subject: KVM: PPC: e500: Implement TLB1-in-TLB0 mapping When a host mapping fault happens in a guest TLB1 entry today, we map the translated guest entry into the host's TLB1. This isn't particularly clever when the guest is mapped by normal 4k pages, since these would be a lot better to put into TLB0 instead. This patch adds the required logic to map 4k TLB1 shadow maps into the host's TLB0. Signed-off-by: Alexander Graf diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h index c70d37e..41cefd4 100644 --- a/arch/powerpc/kvm/e500.h +++ b/arch/powerpc/kvm/e500.h @@ -28,6 +28,7 @@ #define E500_TLB_VALID 1 #define E500_TLB_BITMAP 2 +#define E500_TLB_TLB0 (1 << 2) struct tlbe_ref { pfn_t pfn; diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index 4c32d65..9a150bc 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -216,10 +216,21 @@ void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, vcpu_e500->g2h_tlb1_map[esel] = 0; ref->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID); local_irq_restore(flags); + } - return; + if (tlbsel == 1 && ref->flags & E500_TLB_TLB0) { + /* + * TLB1 entry is backed by 4k pages. This should happen + * rarely and is not worth optimizing. Invalidate everything. + */ + kvmppc_e500_tlbil_all(vcpu_e500); + ref->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID); } + /* Already invalidated in between */ + if (!(ref->flags & E500_TLB_VALID)) + return; + /* Guest tlbe is backed by at most one host tlbe per shadow pid. */ kvmppc_e500_tlbil_one(vcpu_e500, gtlbe); @@ -487,38 +498,54 @@ static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, int esel, return 0; } +static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500, + struct tlbe_ref *ref, + int esel) +{ + unsigned int sesel = vcpu_e500->host_tlb1_nv++; + + if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size())) + vcpu_e500->host_tlb1_nv = 0; + + vcpu_e500->tlb_refs[1][sesel] = *ref; + vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel; + vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; + if (vcpu_e500->h2g_tlb1_rmap[sesel]) { + unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel]; + vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel); + } + vcpu_e500->h2g_tlb1_rmap[sesel] = esel; + + return sesel; +} + /* Caller must ensure that the specified guest TLB entry is safe to insert into * the shadow TLB. */ -/* XXX for both one-one and one-to-many , for now use TLB1 */ +/* For both one-one and one-to-many */ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, struct kvm_book3e_206_tlb_entry *stlbe, int esel) { - struct tlbe_ref *ref; - unsigned int sesel; + struct tlbe_ref ref; + int sesel; int r; - int stlbsel = 1; - - sesel = vcpu_e500->host_tlb1_nv++; - - if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size())) - vcpu_e500->host_tlb1_nv = 0; - ref = &vcpu_e500->tlb_refs[1][sesel]; + ref.flags = 0; r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, - ref); + &ref); if (r) return r; - vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel; - vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; - if (vcpu_e500->h2g_tlb1_rmap[sesel]) { - unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel]; - vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel); + /* Use TLB0 when we can only map a page with 4k */ + if (get_tlb_tsize(stlbe) == BOOK3E_PAGESZ_4K) { + vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_TLB0; + write_stlbe(vcpu_e500, gtlbe, stlbe, 0, 0); + return 0; } - vcpu_e500->h2g_tlb1_rmap[sesel] = esel; - write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel); + /* Otherwise map into TLB1 */ + sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, &ref, esel); + write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel); return 0; } -- cgit v0.10.2 From 483ba97c0fafaade3a0ee61217ca5123b0d85c03 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Fri, 18 Jan 2013 15:13:19 +0100 Subject: KVM: PPC: E500: Make clear_tlb_refs and clear_tlb1_bitmap static Host shadow TLB flushing is logic that the guest TLB code should have no insight about. Declare the internal clear_tlb_refs and clear_tlb1_bitmap functions static to the host TLB handling file. Instead of these, we can use the already exported kvmppc_core_flush_tlb(). This gives us a common API across the board to say "please flush any pending host shadow translation". Signed-off-by: Alexander Graf diff --git a/arch/powerpc/kvm/e500_mmu.c b/arch/powerpc/kvm/e500_mmu.c index c3d1721..623a192 100644 --- a/arch/powerpc/kvm/e500_mmu.c +++ b/arch/powerpc/kvm/e500_mmu.c @@ -541,10 +541,8 @@ static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500) { int i; - clear_tlb1_bitmap(vcpu_e500); + kvmppc_core_flush_tlb(&vcpu_e500->vcpu); kfree(vcpu_e500->g2h_tlb1_map); - - clear_tlb_refs(vcpu_e500); kfree(vcpu_e500->gtlb_priv[0]); kfree(vcpu_e500->gtlb_priv[1]); @@ -735,7 +733,7 @@ int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu, { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); kvmppc_recalc_tlb1map_range(vcpu_e500); - clear_tlb_refs(vcpu_e500); + kvmppc_core_flush_tlb(vcpu); return 0; } diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index 9a150bc..a222edf 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -262,7 +262,7 @@ static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) } } -void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500) +static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500) { if (vcpu_e500->g2h_tlb1_map) memset(vcpu_e500->g2h_tlb1_map, 0, @@ -284,7 +284,7 @@ static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) } } -void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500) +static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500) { int stlbsel = 1; int i; diff --git a/arch/powerpc/kvm/e500_mmu_host.h b/arch/powerpc/kvm/e500_mmu_host.h index 9e4d4a2..7624835 100644 --- a/arch/powerpc/kvm/e500_mmu_host.h +++ b/arch/powerpc/kvm/e500_mmu_host.h @@ -12,8 +12,6 @@ void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int esel); -void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500); -void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500); int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500); void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500); -- cgit v0.10.2 From b9e3e208935e95ad62bd1b1bc4408c23a9ae3ada Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Fri, 18 Jan 2013 15:22:08 +0100 Subject: KVM: PPC: E500: Remove kvmppc_e500_tlbil_all usage from guest TLB code The guest TLB handling code should not have any insight into how the host TLB shadow code works. kvmppc_e500_tlbil_all() is a function that is used for distinction between e500v2 and e500mc (E.HV) on how to flush shadow entries. This function really is private between the e500.c/e500mc.c file and e500_mmu_host.c. Instead of this one, use the public kvmppc_core_flush_tlb() function to flush all shadow TLB entries. As a nice side effect, with this we also end up flushing TLB1 entries which we forgot to do before. Signed-off-by: Alexander Graf diff --git a/arch/powerpc/kvm/e500_mmu.c b/arch/powerpc/kvm/e500_mmu.c index 623a192..5c44759 100644 --- a/arch/powerpc/kvm/e500_mmu.c +++ b/arch/powerpc/kvm/e500_mmu.c @@ -239,8 +239,8 @@ int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value) for (esel = 0; esel < vcpu_e500->gtlb_params[1].entries; esel++) kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel); - /* Invalidate all vcpu id mappings */ - kvmppc_e500_tlbil_all(vcpu_e500); + /* Invalidate all host shadow mappings */ + kvmppc_core_flush_tlb(&vcpu_e500->vcpu); return EMULATE_DONE; } @@ -269,8 +269,8 @@ int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, gva_t ea) kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); } - /* Invalidate all vcpu id mappings */ - kvmppc_e500_tlbil_all(vcpu_e500); + /* Invalidate all host shadow mappings */ + kvmppc_core_flush_tlb(&vcpu_e500->vcpu); return EMULATE_DONE; } -- cgit v0.10.2 From 3f0c3d0bb2bcc4b88b22452a7cf0073ee9a0f1e6 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sat, 26 Jan 2013 23:56:04 +0200 Subject: KVM: x86 emulator: fix test_cc() build failure on i386 'pushq' doesn't exist on i386. Replace with 'push', which should work since the operand is a register. Signed-off-by: Avi Kivity Signed-off-by: Gleb Natapov diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index e99fb72..2b11318 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -1013,7 +1013,7 @@ static u8 test_cc(unsigned int condition, unsigned long flags) void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf); flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF; - asm("pushq %[flags]; popf; call *%[fastop]" + asm("push %[flags]; popf; call *%[fastop]" : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags)); return rc; } -- cgit v0.10.2 From 261874b0d5ebe2a5ccc544df7170d6559635e79a Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Thu, 24 Jan 2013 15:04:03 -0700 Subject: kvm: Force IOMMU remapping on memory slot read-only flag changes Memory slot flags can be altered without changing other parameters of the slot. The read-only attribute is the only one the IOMMU cares about, so generate an un-map, re-map when this occurs. This also avoid unnecessarily re-mapping the slot when no IOMMU visible changes are made. Reviewed-by: Xiao Guangrong Signed-off-by: Alex Williamson Signed-off-by: Gleb Natapov diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 5e709eb..3fec2cd 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -731,6 +731,7 @@ int __kvm_set_memory_region(struct kvm *kvm, struct kvm_memory_slot *slot; struct kvm_memory_slot old, new; struct kvm_memslots *slots = NULL, *old_memslots; + bool old_iommu_mapped; r = check_memory_region_flags(mem); if (r) @@ -772,6 +773,8 @@ int __kvm_set_memory_region(struct kvm *kvm, new.npages = npages; new.flags = mem->flags; + old_iommu_mapped = old.npages; + /* * Disallow changing a memory slot's size or changing anything about * zero sized slots that doesn't involve making them non-zero. @@ -835,6 +838,7 @@ int __kvm_set_memory_region(struct kvm *kvm, /* slot was deleted or moved, clear iommu mapping */ kvm_iommu_unmap_pages(kvm, &old); + old_iommu_mapped = false; /* From this point no new shadow pages pointing to a deleted, * or moved, memslot will be created. * @@ -863,11 +867,27 @@ int __kvm_set_memory_region(struct kvm *kvm, goto out_free; } - /* map new memory slot into the iommu */ + /* + * IOMMU mapping: New slots need to be mapped. Old slots need to be + * un-mapped and re-mapped if their base changes or if flags that the + * iommu cares about change (read-only). Base change unmapping is + * handled above with slot deletion, so we only unmap incompatible + * flags here. Anything else the iommu might care about for existing + * slots (size changes, userspace addr changes) is disallowed above, + * so any other attribute changes getting here can be skipped. + */ if (npages) { - r = kvm_iommu_map_pages(kvm, &new); - if (r) - goto out_slots; + if (old_iommu_mapped && + ((new.flags ^ old.flags) & KVM_MEM_READONLY)) { + kvm_iommu_unmap_pages(kvm, &old); + old_iommu_mapped = false; + } + + if (!old_iommu_mapped) { + r = kvm_iommu_map_pages(kvm, &new); + if (r) + goto out_slots; + } } /* actual memory is freed via old in kvm_free_physmem_slot below */ -- cgit v0.10.2 From d47510e295c0f82699192a61d715351cf00f65de Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Thu, 24 Jan 2013 15:04:09 -0700 Subject: kvm: Obey read-only mappings in iommu We've been ignoring read-only mappings and programming everything into the iommu as read-write. Fix this to only include the write access flag when read-only is not set. Signed-off-by: Alex Williamson Signed-off-by: Gleb Natapov diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c index 4a340cb..72a130b 100644 --- a/virt/kvm/iommu.c +++ b/virt/kvm/iommu.c @@ -76,7 +76,9 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) gfn = slot->base_gfn; end_gfn = gfn + slot->npages; - flags = IOMMU_READ | IOMMU_WRITE; + flags = IOMMU_READ; + if (!(slot->flags & KVM_MEM_READONLY)) + flags |= IOMMU_WRITE; if (kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY) flags |= IOMMU_CACHE; -- cgit v0.10.2 From 83d4c286931c9d28c5be21bac3c73a2332cab681 Mon Sep 17 00:00:00 2001 From: Yang Zhang Date: Fri, 25 Jan 2013 10:18:49 +0800 Subject: x86, apicv: add APICv register virtualization support - APIC read doesn't cause VM-Exit - APIC write becomes trap-like Reviewed-by: Marcelo Tosatti Signed-off-by: Kevin Tian Signed-off-by: Yang Zhang Signed-off-by: Gleb Natapov diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index e385df9..44c3f7e 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -66,6 +66,7 @@ #define EXIT_REASON_EPT_MISCONFIG 49 #define EXIT_REASON_WBINVD 54 #define EXIT_REASON_XSETBV 55 +#define EXIT_REASON_APIC_WRITE 56 #define EXIT_REASON_INVPCID 58 #define VMX_EXIT_REASONS \ @@ -141,6 +142,7 @@ #define SECONDARY_EXEC_ENABLE_VPID 0x00000020 #define SECONDARY_EXEC_WBINVD_EXITING 0x00000040 #define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080 +#define SECONDARY_EXEC_APIC_REGISTER_VIRT 0x00000100 #define SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400 #define SECONDARY_EXEC_ENABLE_INVPCID 0x00001000 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 9392f52..0664c13 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1212,6 +1212,21 @@ void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi); +/* emulate APIC access in a trap manner */ +void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset) +{ + u32 val = 0; + + /* hw has done the conditional check and inst decode */ + offset &= 0xff0; + + apic_reg_read(vcpu->arch.apic, offset, 4, &val); + + /* TODO: optimize to just emulate side effect w/o one more write */ + apic_reg_write(vcpu->arch.apic, offset, val); +} +EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode); + void kvm_free_lapic(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index e5ebf9f..9a8ee22 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h @@ -64,6 +64,8 @@ int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu); u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu); void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data); +void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset); + void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr); void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu); void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 02eeba8..5ad7c85 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -84,6 +84,9 @@ module_param(vmm_exclusive, bool, S_IRUGO); static bool __read_mostly fasteoi = 1; module_param(fasteoi, bool, S_IRUGO); +static bool __read_mostly enable_apicv_reg = 1; +module_param(enable_apicv_reg, bool, S_IRUGO); + /* * If nested=1, nested virtualization is supported, i.e., guests may use * VMX and be a hypervisor for its own guests. If nested=0, guests may not @@ -764,6 +767,12 @@ static inline bool cpu_has_vmx_virtualize_apic_accesses(void) SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; } +static inline bool cpu_has_vmx_apic_register_virt(void) +{ + return vmcs_config.cpu_based_2nd_exec_ctrl & + SECONDARY_EXEC_APIC_REGISTER_VIRT; +} + static inline bool cpu_has_vmx_flexpriority(void) { return cpu_has_vmx_tpr_shadow() && @@ -2540,7 +2549,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) SECONDARY_EXEC_UNRESTRICTED_GUEST | SECONDARY_EXEC_PAUSE_LOOP_EXITING | SECONDARY_EXEC_RDTSCP | - SECONDARY_EXEC_ENABLE_INVPCID; + SECONDARY_EXEC_ENABLE_INVPCID | + SECONDARY_EXEC_APIC_REGISTER_VIRT; if (adjust_vmx_controls(min2, opt2, MSR_IA32_VMX_PROCBASED_CTLS2, &_cpu_based_2nd_exec_control) < 0) @@ -2551,6 +2561,11 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW; #endif + + if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)) + _cpu_based_2nd_exec_control &= ~( + SECONDARY_EXEC_APIC_REGISTER_VIRT); + if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) { /* CR3 accesses and invlpg don't need to cause VM Exits when EPT enabled */ @@ -2748,6 +2763,9 @@ static __init int hardware_setup(void) if (!cpu_has_vmx_ple()) ple_gap = 0; + if (!cpu_has_vmx_apic_register_virt()) + enable_apicv_reg = 0; + if (nested) nested_vmx_setup_ctls_msrs(); @@ -3829,6 +3847,8 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx) exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST; if (!ple_gap) exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING; + if (!enable_apicv_reg || !irqchip_in_kernel(vmx->vcpu.kvm)) + exec_control &= ~SECONDARY_EXEC_APIC_REGISTER_VIRT; return exec_control; } @@ -4787,6 +4807,16 @@ static int handle_apic_access(struct kvm_vcpu *vcpu) return emulate_instruction(vcpu, 0) == EMULATE_DONE; } +static int handle_apic_write(struct kvm_vcpu *vcpu) +{ + unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); + u32 offset = exit_qualification & 0xfff; + + /* APIC-write VM exit is trap-like and thus no need to adjust IP */ + kvm_apic_write_nodecode(vcpu, offset); + return 1; +} + static int handle_task_switch(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); @@ -5721,6 +5751,7 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { [EXIT_REASON_VMON] = handle_vmon, [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold, [EXIT_REASON_APIC_ACCESS] = handle_apic_access, + [EXIT_REASON_APIC_WRITE] = handle_apic_write, [EXIT_REASON_WBINVD] = handle_wbinvd, [EXIT_REASON_XSETBV] = handle_xsetbv, [EXIT_REASON_TASK_SWITCH] = handle_task_switch, -- cgit v0.10.2 From 8d14695f9542e9e0195d6e41ddaa52c32322adf5 Mon Sep 17 00:00:00 2001 From: Yang Zhang Date: Fri, 25 Jan 2013 10:18:50 +0800 Subject: x86, apicv: add virtual x2apic support basically to benefit from apicv, we need to enable virtualized x2apic mode. Currently, we only enable it when guest is really using x2apic. Also, clear MSR bitmap for corresponding x2apic MSRs when guest enabled x2apic: 0x800 - 0x8ff: no read intercept for apicv register virtualization, except APIC ID and TMCCT which need software's assistance to get right value. Reviewed-by: Marcelo Tosatti Signed-off-by: Kevin Tian Signed-off-by: Yang Zhang Signed-off-by: Gleb Natapov diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 77d56a4..d42c283 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -699,6 +699,7 @@ struct kvm_x86_ops { void (*enable_nmi_window)(struct kvm_vcpu *vcpu); void (*enable_irq_window)(struct kvm_vcpu *vcpu); void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); + void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set); int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); int (*get_tdp_level)(void); u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 44c3f7e..0a54df0 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -139,6 +139,7 @@ #define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001 #define SECONDARY_EXEC_ENABLE_EPT 0x00000002 #define SECONDARY_EXEC_RDTSCP 0x00000008 +#define SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE 0x00000010 #define SECONDARY_EXEC_ENABLE_VPID 0x00000020 #define SECONDARY_EXEC_WBINVD_EXITING 0x00000040 #define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 0664c13..f69fc50 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -140,11 +140,6 @@ static inline int apic_enabled(struct kvm_lapic *apic) (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \ APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER) -static inline int apic_x2apic_mode(struct kvm_lapic *apic) -{ - return apic->vcpu->arch.apic_base & X2APIC_ENABLE; -} - static inline int kvm_apic_id(struct kvm_lapic *apic) { return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff; @@ -1303,6 +1298,7 @@ u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu) void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) { + u64 old_value = vcpu->arch.apic_base; struct kvm_lapic *apic = vcpu->arch.apic; if (!apic) { @@ -1324,11 +1320,16 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) value &= ~MSR_IA32_APICBASE_BSP; vcpu->arch.apic_base = value; - if (apic_x2apic_mode(apic)) { - u32 id = kvm_apic_id(apic); - u32 ldr = ((id >> 4) << 16) | (1 << (id & 0xf)); - kvm_apic_set_ldr(apic, ldr); + if ((old_value ^ value) & X2APIC_ENABLE) { + if (value & X2APIC_ENABLE) { + u32 id = kvm_apic_id(apic); + u32 ldr = ((id >> 4) << 16) | (1 << (id & 0xf)); + kvm_apic_set_ldr(apic, ldr); + kvm_x86_ops->set_virtual_x2apic_mode(vcpu, true); + } else + kvm_x86_ops->set_virtual_x2apic_mode(vcpu, false); } + apic->base_address = apic->vcpu->arch.apic_base & MSR_IA32_APICBASE_BASE; diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index 9a8ee22..22a5397 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h @@ -126,4 +126,9 @@ static inline int kvm_lapic_enabled(struct kvm_vcpu *vcpu) return kvm_apic_present(vcpu) && kvm_apic_sw_enabled(vcpu->arch.apic); } +static inline int apic_x2apic_mode(struct kvm_lapic *apic) +{ + return apic->vcpu->arch.apic_base & X2APIC_ENABLE; +} + #endif diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index d29d3cd..38407e9 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -3571,6 +3571,11 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) set_cr_intercept(svm, INTERCEPT_CR8_WRITE); } +static void svm_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set) +{ + return; +} + static int svm_nmi_allowed(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); @@ -4290,6 +4295,7 @@ static struct kvm_x86_ops svm_x86_ops = { .enable_nmi_window = enable_nmi_window, .enable_irq_window = enable_irq_window, .update_cr8_intercept = update_cr8_intercept, + .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode, .set_tss_addr = svm_set_tss_addr, .get_tdp_level = get_npt_level, diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 5ad7c85..3ce8a16 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -643,6 +643,8 @@ static unsigned long *vmx_io_bitmap_a; static unsigned long *vmx_io_bitmap_b; static unsigned long *vmx_msr_bitmap_legacy; static unsigned long *vmx_msr_bitmap_longmode; +static unsigned long *vmx_msr_bitmap_legacy_x2apic; +static unsigned long *vmx_msr_bitmap_longmode_x2apic; static bool cpu_has_load_ia32_efer; static bool cpu_has_load_perf_global_ctrl; @@ -767,6 +769,12 @@ static inline bool cpu_has_vmx_virtualize_apic_accesses(void) SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; } +static inline bool cpu_has_vmx_virtualize_x2apic_mode(void) +{ + return vmcs_config.cpu_based_2nd_exec_ctrl & + SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; +} + static inline bool cpu_has_vmx_apic_register_virt(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & @@ -1830,6 +1838,25 @@ static void move_msr_up(struct vcpu_vmx *vmx, int from, int to) vmx->guest_msrs[from] = tmp; } +static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu) +{ + unsigned long *msr_bitmap; + + if (irqchip_in_kernel(vcpu->kvm) && apic_x2apic_mode(vcpu->arch.apic)) { + if (is_long_mode(vcpu)) + msr_bitmap = vmx_msr_bitmap_longmode_x2apic; + else + msr_bitmap = vmx_msr_bitmap_legacy_x2apic; + } else { + if (is_long_mode(vcpu)) + msr_bitmap = vmx_msr_bitmap_longmode; + else + msr_bitmap = vmx_msr_bitmap_legacy; + } + + vmcs_write64(MSR_BITMAP, __pa(msr_bitmap)); +} + /* * Set up the vmcs to automatically save and restore system * msrs. Don't touch the 64-bit msrs if the guest is in legacy @@ -1838,7 +1865,6 @@ static void move_msr_up(struct vcpu_vmx *vmx, int from, int to) static void setup_msrs(struct vcpu_vmx *vmx) { int save_nmsrs, index; - unsigned long *msr_bitmap; save_nmsrs = 0; #ifdef CONFIG_X86_64 @@ -1870,14 +1896,8 @@ static void setup_msrs(struct vcpu_vmx *vmx) vmx->save_nmsrs = save_nmsrs; - if (cpu_has_vmx_msr_bitmap()) { - if (is_long_mode(&vmx->vcpu)) - msr_bitmap = vmx_msr_bitmap_longmode; - else - msr_bitmap = vmx_msr_bitmap_legacy; - - vmcs_write64(MSR_BITMAP, __pa(msr_bitmap)); - } + if (cpu_has_vmx_msr_bitmap()) + vmx_set_msr_bitmap(&vmx->vcpu); } /* @@ -2543,6 +2563,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) { min2 = 0; opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | SECONDARY_EXEC_WBINVD_EXITING | SECONDARY_EXEC_ENABLE_VPID | SECONDARY_EXEC_ENABLE_EPT | @@ -2564,7 +2585,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)) _cpu_based_2nd_exec_control &= ~( - SECONDARY_EXEC_APIC_REGISTER_VIRT); + SECONDARY_EXEC_APIC_REGISTER_VIRT | + SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE); if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) { /* CR3 accesses and invlpg don't need to cause VM Exits when EPT @@ -3725,7 +3747,10 @@ static void free_vpid(struct vcpu_vmx *vmx) spin_unlock(&vmx_vpid_lock); } -static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr) +#define MSR_TYPE_R 1 +#define MSR_TYPE_W 2 +static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, + u32 msr, int type) { int f = sizeof(unsigned long); @@ -3738,20 +3763,93 @@ static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr) * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. */ if (msr <= 0x1fff) { - __clear_bit(msr, msr_bitmap + 0x000 / f); /* read-low */ - __clear_bit(msr, msr_bitmap + 0x800 / f); /* write-low */ + if (type & MSR_TYPE_R) + /* read-low */ + __clear_bit(msr, msr_bitmap + 0x000 / f); + + if (type & MSR_TYPE_W) + /* write-low */ + __clear_bit(msr, msr_bitmap + 0x800 / f); + } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { msr &= 0x1fff; - __clear_bit(msr, msr_bitmap + 0x400 / f); /* read-high */ - __clear_bit(msr, msr_bitmap + 0xc00 / f); /* write-high */ + if (type & MSR_TYPE_R) + /* read-high */ + __clear_bit(msr, msr_bitmap + 0x400 / f); + + if (type & MSR_TYPE_W) + /* write-high */ + __clear_bit(msr, msr_bitmap + 0xc00 / f); + + } +} + +static void __vmx_enable_intercept_for_msr(unsigned long *msr_bitmap, + u32 msr, int type) +{ + int f = sizeof(unsigned long); + + if (!cpu_has_vmx_msr_bitmap()) + return; + + /* + * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals + * have the write-low and read-high bitmap offsets the wrong way round. + * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. + */ + if (msr <= 0x1fff) { + if (type & MSR_TYPE_R) + /* read-low */ + __set_bit(msr, msr_bitmap + 0x000 / f); + + if (type & MSR_TYPE_W) + /* write-low */ + __set_bit(msr, msr_bitmap + 0x800 / f); + + } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { + msr &= 0x1fff; + if (type & MSR_TYPE_R) + /* read-high */ + __set_bit(msr, msr_bitmap + 0x400 / f); + + if (type & MSR_TYPE_W) + /* write-high */ + __set_bit(msr, msr_bitmap + 0xc00 / f); + } } static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only) { if (!longmode_only) - __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy, msr); - __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode, msr); + __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy, + msr, MSR_TYPE_R | MSR_TYPE_W); + __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode, + msr, MSR_TYPE_R | MSR_TYPE_W); +} + +static void vmx_enable_intercept_msr_read_x2apic(u32 msr) +{ + __vmx_enable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic, + msr, MSR_TYPE_R); + __vmx_enable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic, + msr, MSR_TYPE_R); +} + +static void vmx_disable_intercept_msr_read_x2apic(u32 msr) +{ + __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic, + msr, MSR_TYPE_R); + __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic, + msr, MSR_TYPE_R); +} + +static void vmx_disable_intercept_msr_write_x2apic(u32 msr) +{ + __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic, + msr, MSR_TYPE_W); + __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic, + msr, MSR_TYPE_W); } /* @@ -3849,6 +3947,7 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx) exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING; if (!enable_apicv_reg || !irqchip_in_kernel(vmx->vcpu.kvm)) exec_control &= ~SECONDARY_EXEC_APIC_REGISTER_VIRT; + exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; return exec_control; } @@ -6101,6 +6200,34 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) vmcs_write32(TPR_THRESHOLD, irr); } +static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set) +{ + u32 sec_exec_control; + + /* + * There is not point to enable virtualize x2apic without enable + * apicv + */ + if (!cpu_has_vmx_virtualize_x2apic_mode() || !enable_apicv_reg) + return; + + if (!vm_need_tpr_shadow(vcpu->kvm)) + return; + + sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); + + if (set) { + sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; + sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; + } else { + sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; + sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; + } + vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control); + + vmx_set_msr_bitmap(vcpu); +} + static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx) { u32 exit_intr_info; @@ -7364,6 +7491,7 @@ static struct kvm_x86_ops vmx_x86_ops = { .enable_nmi_window = enable_nmi_window, .enable_irq_window = enable_irq_window, .update_cr8_intercept = update_cr8_intercept, + .set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode, .set_tss_addr = vmx_set_tss_addr, .get_tdp_level = get_ept_level, @@ -7396,7 +7524,7 @@ static struct kvm_x86_ops vmx_x86_ops = { static int __init vmx_init(void) { - int r, i; + int r, i, msr; rdmsrl_safe(MSR_EFER, &host_efer); @@ -7417,11 +7545,19 @@ static int __init vmx_init(void) if (!vmx_msr_bitmap_legacy) goto out1; + vmx_msr_bitmap_legacy_x2apic = + (unsigned long *)__get_free_page(GFP_KERNEL); + if (!vmx_msr_bitmap_legacy_x2apic) + goto out2; vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL); if (!vmx_msr_bitmap_longmode) - goto out2; + goto out3; + vmx_msr_bitmap_longmode_x2apic = + (unsigned long *)__get_free_page(GFP_KERNEL); + if (!vmx_msr_bitmap_longmode_x2apic) + goto out4; /* * Allow direct access to the PC debug port (it is often used for I/O @@ -7453,6 +7589,24 @@ static int __init vmx_init(void) vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false); vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false); vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false); + memcpy(vmx_msr_bitmap_legacy_x2apic, + vmx_msr_bitmap_legacy, PAGE_SIZE); + memcpy(vmx_msr_bitmap_longmode_x2apic, + vmx_msr_bitmap_longmode, PAGE_SIZE); + + if (enable_apicv_reg) { + for (msr = 0x800; msr <= 0x8ff; msr++) + vmx_disable_intercept_msr_read_x2apic(msr); + + /* According SDM, in x2apic mode, the whole id reg is used. + * But in KVM, it only use the highest eight bits. Need to + * intercept it */ + vmx_enable_intercept_msr_read_x2apic(0x802); + /* TMCCT */ + vmx_enable_intercept_msr_read_x2apic(0x839); + /* TPR */ + vmx_disable_intercept_msr_write_x2apic(0x808); + } if (enable_ept) { kvm_mmu_set_mask_ptes(0ull, @@ -7466,8 +7620,10 @@ static int __init vmx_init(void) return 0; -out3: +out4: free_page((unsigned long)vmx_msr_bitmap_longmode); +out3: + free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic); out2: free_page((unsigned long)vmx_msr_bitmap_legacy); out1: @@ -7479,6 +7635,8 @@ out: static void __exit vmx_exit(void) { + free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic); + free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic); free_page((unsigned long)vmx_msr_bitmap_legacy); free_page((unsigned long)vmx_msr_bitmap_longmode); free_page((unsigned long)vmx_io_bitmap_b); -- cgit v0.10.2 From c7c9c56ca26f7b9458711b2d78b60b60e0d38ba7 Mon Sep 17 00:00:00 2001 From: Yang Zhang Date: Fri, 25 Jan 2013 10:18:51 +0800 Subject: x86, apicv: add virtual interrupt delivery support Virtual interrupt delivery avoids KVM to inject vAPIC interrupts manually, which is fully taken care of by the hardware. This needs some special awareness into existing interrupr injection path: - for pending interrupt, instead of direct injection, we may need update architecture specific indicators before resuming to guest. - A pending interrupt, which is masked by ISR, should be also considered in above update action, since hardware will decide when to inject it at right time. Current has_interrupt and get_interrupt only returns a valid vector from injection p.o.v. Reviewed-by: Marcelo Tosatti Signed-off-by: Kevin Tian Signed-off-by: Yang Zhang Signed-off-by: Gleb Natapov diff --git a/arch/ia64/kvm/lapic.h b/arch/ia64/kvm/lapic.h index c5f92a9..c3e2935 100644 --- a/arch/ia64/kvm/lapic.h +++ b/arch/ia64/kvm/lapic.h @@ -27,4 +27,10 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq); #define kvm_apic_present(x) (true) #define kvm_lapic_enabled(x) (true) +static inline bool kvm_apic_vid_enabled(void) +{ + /* IA64 has no apicv supporting, do nothing here */ + return false; +} + #endif diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index d42c283..635a74d 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -699,6 +699,10 @@ struct kvm_x86_ops { void (*enable_nmi_window)(struct kvm_vcpu *vcpu); void (*enable_irq_window)(struct kvm_vcpu *vcpu); void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); + int (*vm_has_apicv)(struct kvm *kvm); + void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr); + void (*hwapic_isr_update)(struct kvm *kvm, int isr); + void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap); void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set); int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); int (*get_tdp_level)(void); @@ -994,6 +998,7 @@ int kvm_age_hva(struct kvm *kvm, unsigned long hva); int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); int cpuid_maxphyaddr(struct kvm_vcpu *vcpu); +int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v); int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); int kvm_cpu_get_interrupt(struct kvm_vcpu *v); diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 0a54df0..694586c 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -62,6 +62,7 @@ #define EXIT_REASON_MCE_DURING_VMENTRY 41 #define EXIT_REASON_TPR_BELOW_THRESHOLD 43 #define EXIT_REASON_APIC_ACCESS 44 +#define EXIT_REASON_EOI_INDUCED 45 #define EXIT_REASON_EPT_VIOLATION 48 #define EXIT_REASON_EPT_MISCONFIG 49 #define EXIT_REASON_WBINVD 54 @@ -144,6 +145,7 @@ #define SECONDARY_EXEC_WBINVD_EXITING 0x00000040 #define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080 #define SECONDARY_EXEC_APIC_REGISTER_VIRT 0x00000100 +#define SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY 0x00000200 #define SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400 #define SECONDARY_EXEC_ENABLE_INVPCID 0x00001000 @@ -181,6 +183,7 @@ enum vmcs_field { GUEST_GS_SELECTOR = 0x0000080a, GUEST_LDTR_SELECTOR = 0x0000080c, GUEST_TR_SELECTOR = 0x0000080e, + GUEST_INTR_STATUS = 0x00000810, HOST_ES_SELECTOR = 0x00000c00, HOST_CS_SELECTOR = 0x00000c02, HOST_SS_SELECTOR = 0x00000c04, @@ -208,6 +211,14 @@ enum vmcs_field { APIC_ACCESS_ADDR_HIGH = 0x00002015, EPT_POINTER = 0x0000201a, EPT_POINTER_HIGH = 0x0000201b, + EOI_EXIT_BITMAP0 = 0x0000201c, + EOI_EXIT_BITMAP0_HIGH = 0x0000201d, + EOI_EXIT_BITMAP1 = 0x0000201e, + EOI_EXIT_BITMAP1_HIGH = 0x0000201f, + EOI_EXIT_BITMAP2 = 0x00002020, + EOI_EXIT_BITMAP2_HIGH = 0x00002021, + EOI_EXIT_BITMAP3 = 0x00002022, + EOI_EXIT_BITMAP3_HIGH = 0x00002023, GUEST_PHYSICAL_ADDRESS = 0x00002400, GUEST_PHYSICAL_ADDRESS_HIGH = 0x00002401, VMCS_LINK_POINTER = 0x00002800, diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c index b111aee..484bc87 100644 --- a/arch/x86/kvm/irq.c +++ b/arch/x86/kvm/irq.c @@ -38,6 +38,38 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) EXPORT_SYMBOL(kvm_cpu_has_pending_timer); /* + * check if there is pending interrupt from + * non-APIC source without intack. + */ +static int kvm_cpu_has_extint(struct kvm_vcpu *v) +{ + if (kvm_apic_accept_pic_intr(v)) + return pic_irqchip(v->kvm)->output; /* PIC */ + else + return 0; +} + +/* + * check if there is injectable interrupt: + * when virtual interrupt delivery enabled, + * interrupt from apic will handled by hardware, + * we don't need to check it here. + */ +int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v) +{ + if (!irqchip_in_kernel(v->kvm)) + return v->arch.interrupt.pending; + + if (kvm_cpu_has_extint(v)) + return 1; + + if (kvm_apic_vid_enabled(v->kvm)) + return 0; + + return kvm_apic_has_interrupt(v) != -1; /* LAPIC */ +} + +/* * check if there is pending interrupt without * intack. */ @@ -46,27 +78,41 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *v) if (!irqchip_in_kernel(v->kvm)) return v->arch.interrupt.pending; - if (kvm_apic_accept_pic_intr(v) && pic_irqchip(v->kvm)->output) - return pic_irqchip(v->kvm)->output; /* PIC */ + if (kvm_cpu_has_extint(v)) + return 1; return kvm_apic_has_interrupt(v) != -1; /* LAPIC */ } EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt); /* + * Read pending interrupt(from non-APIC source) + * vector and intack. + */ +static int kvm_cpu_get_extint(struct kvm_vcpu *v) +{ + if (kvm_cpu_has_extint(v)) + return kvm_pic_read_irq(v->kvm); /* PIC */ + return -1; +} + +/* * Read pending interrupt vector and intack. */ int kvm_cpu_get_interrupt(struct kvm_vcpu *v) { + int vector; + if (!irqchip_in_kernel(v->kvm)) return v->arch.interrupt.nr; - if (kvm_apic_accept_pic_intr(v) && pic_irqchip(v->kvm)->output) - return kvm_pic_read_irq(v->kvm); /* PIC */ + vector = kvm_cpu_get_extint(v); + + if (kvm_apic_vid_enabled(v->kvm) || vector != -1) + return vector; /* PIC */ return kvm_get_apic_interrupt(v); /* APIC */ } -EXPORT_SYMBOL_GPL(kvm_cpu_get_interrupt); void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu) { diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index f69fc50..02b51dd 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -145,21 +145,51 @@ static inline int kvm_apic_id(struct kvm_lapic *apic) return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff; } -static inline u16 apic_cluster_id(struct kvm_apic_map *map, u32 ldr) +void kvm_calculate_eoi_exitmap(struct kvm_vcpu *vcpu, + struct kvm_lapic_irq *irq, + u64 *eoi_exit_bitmap) { - u16 cid; - ldr >>= 32 - map->ldr_bits; - cid = (ldr >> map->cid_shift) & map->cid_mask; + struct kvm_lapic **dst; + struct kvm_apic_map *map; + unsigned long bitmap = 1; + int i; - BUG_ON(cid >= ARRAY_SIZE(map->logical_map)); + rcu_read_lock(); + map = rcu_dereference(vcpu->kvm->arch.apic_map); - return cid; -} + if (unlikely(!map)) { + __set_bit(irq->vector, (unsigned long *)eoi_exit_bitmap); + goto out; + } -static inline u16 apic_logical_id(struct kvm_apic_map *map, u32 ldr) -{ - ldr >>= (32 - map->ldr_bits); - return ldr & map->lid_mask; + if (irq->dest_mode == 0) { /* physical mode */ + if (irq->delivery_mode == APIC_DM_LOWEST || + irq->dest_id == 0xff) { + __set_bit(irq->vector, + (unsigned long *)eoi_exit_bitmap); + goto out; + } + dst = &map->phys_map[irq->dest_id & 0xff]; + } else { + u32 mda = irq->dest_id << (32 - map->ldr_bits); + + dst = map->logical_map[apic_cluster_id(map, mda)]; + + bitmap = apic_logical_id(map, mda); + } + + for_each_set_bit(i, &bitmap, 16) { + if (!dst[i]) + continue; + if (dst[i]->vcpu == vcpu) { + __set_bit(irq->vector, + (unsigned long *)eoi_exit_bitmap); + break; + } + } + +out: + rcu_read_unlock(); } static void recalculate_apic_map(struct kvm *kvm) @@ -225,6 +255,8 @@ out: if (old) kfree_rcu(old, rcu); + + kvm_ioapic_make_eoibitmap_request(kvm); } static inline void kvm_apic_set_id(struct kvm_lapic *apic, u8 id) @@ -340,6 +372,10 @@ static inline int apic_find_highest_irr(struct kvm_lapic *apic) { int result; + /* + * Note that irr_pending is just a hint. It will be always + * true with virtual interrupt delivery enabled. + */ if (!apic->irr_pending) return -1; @@ -456,6 +492,8 @@ static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu) static inline int apic_find_highest_isr(struct kvm_lapic *apic) { int result; + + /* Note that isr_count is always 1 with vid enabled */ if (!apic->isr_count) return -1; if (likely(apic->highest_isr_cache != -1)) @@ -735,6 +773,19 @@ int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2) return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio; } +static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector) +{ + if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) && + kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) { + int trigger_mode; + if (apic_test_vector(vector, apic->regs + APIC_TMR)) + trigger_mode = IOAPIC_LEVEL_TRIG; + else + trigger_mode = IOAPIC_EDGE_TRIG; + kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode); + } +} + static int apic_set_eoi(struct kvm_lapic *apic) { int vector = apic_find_highest_isr(apic); @@ -751,19 +802,26 @@ static int apic_set_eoi(struct kvm_lapic *apic) apic_clear_isr(vector, apic); apic_update_ppr(apic); - if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) && - kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) { - int trigger_mode; - if (apic_test_vector(vector, apic->regs + APIC_TMR)) - trigger_mode = IOAPIC_LEVEL_TRIG; - else - trigger_mode = IOAPIC_EDGE_TRIG; - kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode); - } + kvm_ioapic_send_eoi(apic, vector); kvm_make_request(KVM_REQ_EVENT, apic->vcpu); return vector; } +/* + * this interface assumes a trap-like exit, which has already finished + * desired side effect including vISR and vPPR update. + */ +void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector) +{ + struct kvm_lapic *apic = vcpu->arch.apic; + + trace_kvm_eoi(apic, vector); + + kvm_ioapic_send_eoi(apic, vector); + kvm_make_request(KVM_REQ_EVENT, apic->vcpu); +} +EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated); + static void apic_send_ipi(struct kvm_lapic *apic) { u32 icr_low = kvm_apic_get_reg(apic, APIC_ICR); @@ -1375,8 +1433,8 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu) apic_set_reg(apic, APIC_ISR + 0x10 * i, 0); apic_set_reg(apic, APIC_TMR + 0x10 * i, 0); } - apic->irr_pending = false; - apic->isr_count = 0; + apic->irr_pending = kvm_apic_vid_enabled(vcpu->kvm); + apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm); apic->highest_isr_cache = -1; update_divide_count(apic); atomic_set(&apic->lapic_timer.pending, 0); @@ -1591,8 +1649,10 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu, update_divide_count(apic); start_apic_timer(apic); apic->irr_pending = true; - apic->isr_count = count_vectors(apic->regs + APIC_ISR); + apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm) ? + 1 : count_vectors(apic->regs + APIC_ISR); apic->highest_isr_cache = -1; + kvm_x86_ops->hwapic_isr_update(vcpu->kvm, apic_find_highest_isr(apic)); kvm_make_request(KVM_REQ_EVENT, vcpu); } diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index 22a5397..1676d34 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h @@ -65,6 +65,7 @@ u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu); void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data); void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset); +void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector); void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr); void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu); @@ -131,4 +132,30 @@ static inline int apic_x2apic_mode(struct kvm_lapic *apic) return apic->vcpu->arch.apic_base & X2APIC_ENABLE; } +static inline bool kvm_apic_vid_enabled(struct kvm *kvm) +{ + return kvm_x86_ops->vm_has_apicv(kvm); +} + +static inline u16 apic_cluster_id(struct kvm_apic_map *map, u32 ldr) +{ + u16 cid; + ldr >>= 32 - map->ldr_bits; + cid = (ldr >> map->cid_shift) & map->cid_mask; + + BUG_ON(cid >= ARRAY_SIZE(map->logical_map)); + + return cid; +} + +static inline u16 apic_logical_id(struct kvm_apic_map *map, u32 ldr) +{ + ldr >>= (32 - map->ldr_bits); + return ldr & map->lid_mask; +} + +void kvm_calculate_eoi_exitmap(struct kvm_vcpu *vcpu, + struct kvm_lapic_irq *irq, + u64 *eoi_bitmap); + #endif diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 38407e9..e1b1ce2 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -3576,6 +3576,21 @@ static void svm_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set) return; } +static int svm_vm_has_apicv(struct kvm *kvm) +{ + return 0; +} + +static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) +{ + return; +} + +static void svm_hwapic_isr_update(struct kvm *kvm, int isr) +{ + return; +} + static int svm_nmi_allowed(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); @@ -4296,6 +4311,9 @@ static struct kvm_x86_ops svm_x86_ops = { .enable_irq_window = enable_irq_window, .update_cr8_intercept = update_cr8_intercept, .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode, + .vm_has_apicv = svm_vm_has_apicv, + .load_eoi_exitmap = svm_load_eoi_exitmap, + .hwapic_isr_update = svm_hwapic_isr_update, .set_tss_addr = svm_set_tss_addr, .get_tdp_level = get_npt_level, diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 3ce8a16..0cf74a6 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -84,8 +84,8 @@ module_param(vmm_exclusive, bool, S_IRUGO); static bool __read_mostly fasteoi = 1; module_param(fasteoi, bool, S_IRUGO); -static bool __read_mostly enable_apicv_reg = 1; -module_param(enable_apicv_reg, bool, S_IRUGO); +static bool __read_mostly enable_apicv_reg_vid = 1; +module_param(enable_apicv_reg_vid, bool, S_IRUGO); /* * If nested=1, nested virtualization is supported, i.e., guests may use @@ -781,6 +781,12 @@ static inline bool cpu_has_vmx_apic_register_virt(void) SECONDARY_EXEC_APIC_REGISTER_VIRT; } +static inline bool cpu_has_vmx_virtual_intr_delivery(void) +{ + return vmcs_config.cpu_based_2nd_exec_ctrl & + SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY; +} + static inline bool cpu_has_vmx_flexpriority(void) { return cpu_has_vmx_tpr_shadow() && @@ -2571,7 +2577,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) SECONDARY_EXEC_PAUSE_LOOP_EXITING | SECONDARY_EXEC_RDTSCP | SECONDARY_EXEC_ENABLE_INVPCID | - SECONDARY_EXEC_APIC_REGISTER_VIRT; + SECONDARY_EXEC_APIC_REGISTER_VIRT | + SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY; if (adjust_vmx_controls(min2, opt2, MSR_IA32_VMX_PROCBASED_CTLS2, &_cpu_based_2nd_exec_control) < 0) @@ -2586,7 +2593,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)) _cpu_based_2nd_exec_control &= ~( SECONDARY_EXEC_APIC_REGISTER_VIRT | - SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE); + SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | + SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) { /* CR3 accesses and invlpg don't need to cause VM Exits when EPT @@ -2785,8 +2793,14 @@ static __init int hardware_setup(void) if (!cpu_has_vmx_ple()) ple_gap = 0; - if (!cpu_has_vmx_apic_register_virt()) - enable_apicv_reg = 0; + if (!cpu_has_vmx_apic_register_virt() || + !cpu_has_vmx_virtual_intr_delivery()) + enable_apicv_reg_vid = 0; + + if (enable_apicv_reg_vid) + kvm_x86_ops->update_cr8_intercept = NULL; + else + kvm_x86_ops->hwapic_irr_update = NULL; if (nested) nested_vmx_setup_ctls_msrs(); @@ -3928,6 +3942,11 @@ static u32 vmx_exec_control(struct vcpu_vmx *vmx) return exec_control; } +static int vmx_vm_has_apicv(struct kvm *kvm) +{ + return enable_apicv_reg_vid && irqchip_in_kernel(kvm); +} + static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx) { u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl; @@ -3945,8 +3964,9 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx) exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST; if (!ple_gap) exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING; - if (!enable_apicv_reg || !irqchip_in_kernel(vmx->vcpu.kvm)) - exec_control &= ~SECONDARY_EXEC_APIC_REGISTER_VIRT; + if (!vmx_vm_has_apicv(vmx->vcpu.kvm)) + exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT | + SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; return exec_control; } @@ -3992,6 +4012,15 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) vmx_secondary_exec_control(vmx)); } + if (enable_apicv_reg_vid) { + vmcs_write64(EOI_EXIT_BITMAP0, 0); + vmcs_write64(EOI_EXIT_BITMAP1, 0); + vmcs_write64(EOI_EXIT_BITMAP2, 0); + vmcs_write64(EOI_EXIT_BITMAP3, 0); + + vmcs_write16(GUEST_INTR_STATUS, 0); + } + if (ple_gap) { vmcs_write32(PLE_GAP, ple_gap); vmcs_write32(PLE_WINDOW, ple_window); @@ -4906,6 +4935,16 @@ static int handle_apic_access(struct kvm_vcpu *vcpu) return emulate_instruction(vcpu, 0) == EMULATE_DONE; } +static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu) +{ + unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); + int vector = exit_qualification & 0xff; + + /* EOI-induced VM exit is trap-like and thus no need to adjust IP */ + kvm_apic_set_eoi_accelerated(vcpu, vector); + return 1; +} + static int handle_apic_write(struct kvm_vcpu *vcpu) { unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); @@ -5851,6 +5890,7 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold, [EXIT_REASON_APIC_ACCESS] = handle_apic_access, [EXIT_REASON_APIC_WRITE] = handle_apic_write, + [EXIT_REASON_EOI_INDUCED] = handle_apic_eoi_induced, [EXIT_REASON_WBINVD] = handle_wbinvd, [EXIT_REASON_XSETBV] = handle_xsetbv, [EXIT_REASON_TASK_SWITCH] = handle_task_switch, @@ -6208,7 +6248,8 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set) * There is not point to enable virtualize x2apic without enable * apicv */ - if (!cpu_has_vmx_virtualize_x2apic_mode() || !enable_apicv_reg) + if (!cpu_has_vmx_virtualize_x2apic_mode() || + !vmx_vm_has_apicv(vcpu->kvm)) return; if (!vm_need_tpr_shadow(vcpu->kvm)) @@ -6228,6 +6269,56 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set) vmx_set_msr_bitmap(vcpu); } +static void vmx_hwapic_isr_update(struct kvm *kvm, int isr) +{ + u16 status; + u8 old; + + if (!vmx_vm_has_apicv(kvm)) + return; + + if (isr == -1) + isr = 0; + + status = vmcs_read16(GUEST_INTR_STATUS); + old = status >> 8; + if (isr != old) { + status &= 0xff; + status |= isr << 8; + vmcs_write16(GUEST_INTR_STATUS, status); + } +} + +static void vmx_set_rvi(int vector) +{ + u16 status; + u8 old; + + status = vmcs_read16(GUEST_INTR_STATUS); + old = (u8)status & 0xff; + if ((u8)vector != old) { + status &= ~0xff; + status |= (u8)vector; + vmcs_write16(GUEST_INTR_STATUS, status); + } +} + +static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr) +{ + if (max_irr == -1) + return; + + vmx_set_rvi(max_irr); +} + +static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) +{ + vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]); + vmcs_write64(EOI_EXIT_BITMAP1, eoi_exit_bitmap[1]); + vmcs_write64(EOI_EXIT_BITMAP2, eoi_exit_bitmap[2]); + vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]); +} + static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx) { u32 exit_intr_info; @@ -7492,6 +7583,10 @@ static struct kvm_x86_ops vmx_x86_ops = { .enable_irq_window = enable_irq_window, .update_cr8_intercept = update_cr8_intercept, .set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode, + .vm_has_apicv = vmx_vm_has_apicv, + .load_eoi_exitmap = vmx_load_eoi_exitmap, + .hwapic_irr_update = vmx_hwapic_irr_update, + .hwapic_isr_update = vmx_hwapic_isr_update, .set_tss_addr = vmx_set_tss_addr, .get_tdp_level = get_ept_level, @@ -7594,7 +7689,7 @@ static int __init vmx_init(void) memcpy(vmx_msr_bitmap_longmode_x2apic, vmx_msr_bitmap_longmode, PAGE_SIZE); - if (enable_apicv_reg) { + if (enable_apicv_reg_vid) { for (msr = 0x800; msr <= 0x8ff; msr++) vmx_disable_intercept_msr_read_x2apic(msr); @@ -7606,6 +7701,10 @@ static int __init vmx_init(void) vmx_enable_intercept_msr_read_x2apic(0x839); /* TPR */ vmx_disable_intercept_msr_write_x2apic(0x808); + /* EOI */ + vmx_disable_intercept_msr_write_x2apic(0x80b); + /* SELF-IPI */ + vmx_disable_intercept_msr_write_x2apic(0x83f); } if (enable_ept) { diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index b9f5529..cf512e70 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5565,7 +5565,7 @@ static void inject_pending_event(struct kvm_vcpu *vcpu) vcpu->arch.nmi_injected = true; kvm_x86_ops->set_nmi(vcpu); } - } else if (kvm_cpu_has_interrupt(vcpu)) { + } else if (kvm_cpu_has_injectable_intr(vcpu)) { if (kvm_x86_ops->interrupt_allowed(vcpu)) { kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), false); @@ -5633,6 +5633,16 @@ static void kvm_gen_update_masterclock(struct kvm *kvm) #endif } +static void update_eoi_exitmap(struct kvm_vcpu *vcpu) +{ + u64 eoi_exit_bitmap[4]; + + memset(eoi_exit_bitmap, 0, 32); + + kvm_ioapic_calculate_eoi_exitmap(vcpu, eoi_exit_bitmap); + kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap); +} + static int vcpu_enter_guest(struct kvm_vcpu *vcpu) { int r; @@ -5686,6 +5696,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) kvm_handle_pmu_event(vcpu); if (kvm_check_request(KVM_REQ_PMI, vcpu)) kvm_deliver_pmi(vcpu); + if (kvm_check_request(KVM_REQ_EOIBITMAP, vcpu)) + update_eoi_exitmap(vcpu); } if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) { @@ -5694,10 +5706,17 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) /* enable NMI/IRQ window open exits if needed */ if (vcpu->arch.nmi_pending) kvm_x86_ops->enable_nmi_window(vcpu); - else if (kvm_cpu_has_interrupt(vcpu) || req_int_win) + else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win) kvm_x86_ops->enable_irq_window(vcpu); if (kvm_lapic_enabled(vcpu)) { + /* + * Update architecture specific hints for APIC + * virtual interrupt delivery. + */ + if (kvm_x86_ops->hwapic_irr_update) + kvm_x86_ops->hwapic_irr_update(vcpu, + kvm_lapic_find_highest_irr(vcpu)); update_cr8_intercept(vcpu); kvm_lapic_sync_to_vapic(vcpu); } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 4dd7d75..0350e0d 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -123,6 +123,7 @@ static inline bool is_error_page(struct page *page) #define KVM_REQ_MASTERCLOCK_UPDATE 19 #define KVM_REQ_MCLOCK_INPROGRESS 20 #define KVM_REQ_EPR_EXIT 21 +#define KVM_REQ_EOIBITMAP 22 #define KVM_USERSPACE_IRQ_SOURCE_ID 0 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 @@ -538,6 +539,7 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); void kvm_flush_remote_tlbs(struct kvm *kvm); void kvm_reload_remote_mmus(struct kvm *kvm); void kvm_make_mclock_inprogress_request(struct kvm *kvm); +void kvm_make_update_eoibitmap_request(struct kvm *kvm); long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); @@ -691,6 +693,7 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level); int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level); int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, int irq_source_id, int level); +bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin); void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); void kvm_register_irq_ack_notifier(struct kvm *kvm, struct kvm_irq_ack_notifier *kian); diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index f3abbef..ce82b94 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include #include @@ -115,6 +116,42 @@ static void update_handled_vectors(struct kvm_ioapic *ioapic) smp_wmb(); } +void kvm_ioapic_calculate_eoi_exitmap(struct kvm_vcpu *vcpu, + u64 *eoi_exit_bitmap) +{ + struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; + union kvm_ioapic_redirect_entry *e; + struct kvm_lapic_irq irqe; + int index; + + spin_lock(&ioapic->lock); + /* traverse ioapic entry to set eoi exit bitmap*/ + for (index = 0; index < IOAPIC_NUM_PINS; index++) { + e = &ioapic->redirtbl[index]; + if (!e->fields.mask && + (e->fields.trig_mode == IOAPIC_LEVEL_TRIG || + kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, + index))) { + irqe.dest_id = e->fields.dest_id; + irqe.vector = e->fields.vector; + irqe.dest_mode = e->fields.dest_mode; + irqe.delivery_mode = e->fields.delivery_mode << 8; + kvm_calculate_eoi_exitmap(vcpu, &irqe, eoi_exit_bitmap); + } + } + spin_unlock(&ioapic->lock); +} +EXPORT_SYMBOL_GPL(kvm_ioapic_calculate_eoi_exitmap); + +void kvm_ioapic_make_eoibitmap_request(struct kvm *kvm) +{ + struct kvm_ioapic *ioapic = kvm->arch.vioapic; + + if (!kvm_apic_vid_enabled(kvm) || !ioapic) + return; + kvm_make_update_eoibitmap_request(kvm); +} + static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) { unsigned index; @@ -156,6 +193,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG && ioapic->irr & (1 << index)) ioapic_service(ioapic, index); + kvm_ioapic_make_eoibitmap_request(ioapic->kvm); break; } } @@ -455,6 +493,7 @@ int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) spin_lock(&ioapic->lock); memcpy(ioapic, state, sizeof(struct kvm_ioapic_state)); update_handled_vectors(ioapic); + kvm_ioapic_make_eoibitmap_request(kvm); spin_unlock(&ioapic->lock); return 0; } diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h index a30abfe..0400a46 100644 --- a/virt/kvm/ioapic.h +++ b/virt/kvm/ioapic.h @@ -82,5 +82,9 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, struct kvm_lapic_irq *irq); int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state); int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state); +void kvm_ioapic_make_eoibitmap_request(struct kvm *kvm); +void kvm_ioapic_calculate_eoi_exitmap(struct kvm_vcpu *vcpu, + u64 *eoi_exit_bitmap); + #endif diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c index 656fa45..ff6d40e 100644 --- a/virt/kvm/irq_comm.c +++ b/virt/kvm/irq_comm.c @@ -22,6 +22,7 @@ #include #include +#include #include #include @@ -237,6 +238,28 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level) return ret; } +bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin) +{ + struct kvm_irq_ack_notifier *kian; + struct hlist_node *n; + int gsi; + + rcu_read_lock(); + gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin]; + if (gsi != -1) + hlist_for_each_entry_rcu(kian, n, &kvm->irq_ack_notifier_list, + link) + if (kian->gsi == gsi) { + rcu_read_unlock(); + return true; + } + + rcu_read_unlock(); + + return false; +} +EXPORT_SYMBOL_GPL(kvm_irq_has_notifier); + void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin) { struct kvm_irq_ack_notifier *kian; @@ -261,6 +284,7 @@ void kvm_register_irq_ack_notifier(struct kvm *kvm, mutex_lock(&kvm->irq_lock); hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list); mutex_unlock(&kvm->irq_lock); + kvm_ioapic_make_eoibitmap_request(kvm); } void kvm_unregister_irq_ack_notifier(struct kvm *kvm, @@ -270,6 +294,7 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm, hlist_del_init_rcu(&kian->link); mutex_unlock(&kvm->irq_lock); synchronize_rcu(); + kvm_ioapic_make_eoibitmap_request(kvm); } int kvm_request_irq_source_id(struct kvm *kvm) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 3fec2cd..abc23e2 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -217,6 +217,11 @@ void kvm_make_mclock_inprogress_request(struct kvm *kvm) make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS); } +void kvm_make_update_eoibitmap_request(struct kvm *kvm) +{ + make_all_cpus_request(kvm, KVM_REQ_EOIBITMAP); +} + int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) { struct page *page; -- cgit v0.10.2 From 7b270f609982f68f2433442bf167f735e7364b06 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 22 Jan 2013 13:09:13 +0530 Subject: sched: Bail out of yield_to when source and target runqueue has one task In case of undercomitted scenarios, especially in large guests yield_to overhead is significantly high. when run queue length of source and target is one, take an opportunity to bail out and return -ESRCH. This return condition can be further exploited to quickly come out of PLE handler. (History: Raghavendra initially worked on break out of kvm ple handler upon seeing source runqueue length = 1, but it had to export rq length). Peter came up with the elegant idea of return -ESRCH in scheduler core. Signed-off-by: Peter Zijlstra Raghavendra, Checking the rq length of target vcpu condition added.(thanks Avi) Reviewed-by: Srikar Dronamraju Signed-off-by: Raghavendra K T Acked-by: Andrew Jones Tested-by: Chegu Vinod Signed-off-by: Gleb Natapov diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 0533496..01edad9 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4316,7 +4316,10 @@ EXPORT_SYMBOL(yield); * It's the caller's job to ensure that the target task struct * can't go away on us before we can do any checks. * - * Returns true if we indeed boosted the target task. + * Returns: + * true (>0) if we indeed boosted the target task. + * false (0) if we failed to boost the target. + * -ESRCH if there's no task to yield to. */ bool __sched yield_to(struct task_struct *p, bool preempt) { @@ -4330,6 +4333,15 @@ bool __sched yield_to(struct task_struct *p, bool preempt) again: p_rq = task_rq(p); + /* + * If we're the only runnable task on the rq and target rq also + * has only one task, there's absolutely no point in yielding. + */ + if (rq->nr_running == 1 && p_rq->nr_running == 1) { + yielded = -ESRCH; + goto out_irq; + } + double_rq_lock(rq, p_rq); while (task_rq(p) != p_rq) { double_rq_unlock(rq, p_rq); @@ -4337,13 +4349,13 @@ again: } if (!curr->sched_class->yield_to_task) - goto out; + goto out_unlock; if (curr->sched_class != p->sched_class) - goto out; + goto out_unlock; if (task_running(p_rq, p) || p->state) - goto out; + goto out_unlock; yielded = curr->sched_class->yield_to_task(rq, p, preempt); if (yielded) { @@ -4356,11 +4368,12 @@ again: resched_task(p_rq->curr); } -out: +out_unlock: double_rq_unlock(rq, p_rq); +out_irq: local_irq_restore(flags); - if (yielded) + if (yielded > 0) schedule(); return yielded; -- cgit v0.10.2 From c45c528e899094b9049b3c900e2cf1f00aa0490c Mon Sep 17 00:00:00 2001 From: Raghavendra K T Date: Tue, 22 Jan 2013 13:09:24 +0530 Subject: kvm: Handle yield_to failure return code for potential undercommit case yield_to returns -ESRCH, When source and target of yield_to run queue length is one. When we see three successive failures of yield_to we assume we are in potential undercommit case and abort from PLE handler. The assumption is backed by low probability of wrong decision for even worst case scenarios such as average runqueue length between 1 and 2. More detail on rationale behind using three tries: if p is the probability of finding rq length one on a particular cpu, and if we do n tries, then probability of exiting ple handler is: p^(n+1) [ because we would have come across one source with rq length 1 and n target cpu rqs with length 1 ] so num tries: probability of aborting ple handler (1.5x overcommit) 1 1/4 2 1/8 3 1/16 We can increase this probability with more tries, but the problem is the overhead. Also, If we have tried three times that means we would have iterated over 3 good eligible vcpus along with many non-eligible candidates. In worst case if we iterate all the vcpus, we reduce 1x performance and overcommit performance get hit. note that we do not update last boosted vcpu in failure cases. Thank Avi for raising question on aborting after first fail from yield_to. Reviewed-by: Srikar Dronamraju Signed-off-by: Raghavendra K T Tested-by: Chegu Vinod Signed-off-by: Gleb Natapov diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index abc23e2..a83ca63 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1694,6 +1694,7 @@ bool kvm_vcpu_yield_to(struct kvm_vcpu *target) { struct pid *pid; struct task_struct *task = NULL; + bool ret = false; rcu_read_lock(); pid = rcu_dereference(target->pid); @@ -1701,17 +1702,15 @@ bool kvm_vcpu_yield_to(struct kvm_vcpu *target) task = get_pid_task(target->pid, PIDTYPE_PID); rcu_read_unlock(); if (!task) - return false; + return ret; if (task->flags & PF_VCPU) { put_task_struct(task); - return false; - } - if (yield_to(task, 1)) { - put_task_struct(task); - return true; + return ret; } + ret = yield_to(task, 1); put_task_struct(task); - return false; + + return ret; } EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to); @@ -1752,12 +1751,14 @@ bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) return eligible; } #endif + void kvm_vcpu_on_spin(struct kvm_vcpu *me) { struct kvm *kvm = me->kvm; struct kvm_vcpu *vcpu; int last_boosted_vcpu = me->kvm->last_boosted_vcpu; int yielded = 0; + int try = 3; int pass; int i; @@ -1769,7 +1770,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me) * VCPU is holding the lock that we need and will release it. * We approximate round-robin by starting at the last boosted VCPU. */ - for (pass = 0; pass < 2 && !yielded; pass++) { + for (pass = 0; pass < 2 && !yielded && try; pass++) { kvm_for_each_vcpu(i, vcpu, kvm) { if (!pass && i <= last_boosted_vcpu) { i = last_boosted_vcpu; @@ -1782,10 +1783,15 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me) continue; if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) continue; - if (kvm_vcpu_yield_to(vcpu)) { + + yielded = kvm_vcpu_yield_to(vcpu); + if (yielded > 0) { kvm->last_boosted_vcpu = i; - yielded = 1; break; + } else if (yielded < 0) { + try--; + if (!try) + break; } } } -- cgit v0.10.2 From 15bc8d8457875f495c59d933b05770ba88d1eacb Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Fri, 25 Jan 2013 15:34:15 +0100 Subject: s390/kvm: Fix store status for ACRS/FPRS On store status we need to copy the current state of registers into a save area. Currently we might save stale versions: The sie state descriptor doesnt have fields for guest ACRS,FPRS, those registers are simply stored in the host registers. The host program must copy these away if needed. We do that in vcpu_put/load. If we now do a store status in KVM code between vcpu_put/load, the saved values are not up-to-date. Lets collect the ACRS/FPRS before saving them. This also fixes some strange problems with hotplug and virtio-ccw, since the low level machine check handler (on hotplug a machine check will happen) will revalidate all registers with the content of the save area. Signed-off-by: Christian Borntraeger CC: stable@vger.kernel.org Signed-off-by: Gleb Natapov diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 5b01f09..4377d18 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -770,6 +770,14 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) } else prefix = 0; + /* + * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy + * copying in vcpu load/put. Lets update our copies before we save + * it into the save area + */ + save_fp_regs(&vcpu->arch.guest_fpregs); + save_access_regs(vcpu->run->s.regs.acrs); + if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs), vcpu->arch.guest_fpregs.fprs, 128, prefix)) return -EFAULT; -- cgit v0.10.2 From c98d3683ce675d689121147ed00e7c4af4737518 Mon Sep 17 00:00:00 2001 From: Cornelia Huck Date: Fri, 25 Jan 2013 15:34:16 +0100 Subject: s390/virtio-ccw: Fix setup_vq error handling. virtio_ccw_setup_vq() failed to unwind correctly on errors. In particular, it failed to delete the virtqueue on errors, leading to list corruption when virtio_ccw_del_vqs() iterated over a virtqueue that had not been added to the vcdev's list. Fix this with redoing the error unwinding in virtio_ccw_setup_vq(), using a single path for all errors. Signed-off-by: Cornelia Huck Reviewed-by: Christian Borntraeger Signed-off-by: Christian Borntraeger Signed-off-by: Gleb Natapov diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c index 2edd94a..3217dfe 100644 --- a/drivers/s390/kvm/virtio_ccw.c +++ b/drivers/s390/kvm/virtio_ccw.c @@ -244,9 +244,9 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev, { struct virtio_ccw_device *vcdev = to_vc_device(vdev); int err; - struct virtqueue *vq; + struct virtqueue *vq = NULL; struct virtio_ccw_vq_info *info; - unsigned long size; + unsigned long size = 0; /* silence the compiler */ unsigned long flags; /* Allocate queue. */ @@ -279,11 +279,8 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev, /* For now, we fail if we can't get the requested size. */ dev_warn(&vcdev->cdev->dev, "no vq\n"); err = -ENOMEM; - free_pages_exact(info->queue, size); goto out_err; } - info->vq = vq; - vq->priv = info; /* Register it with the host. */ info->info_block->queue = (__u64)info->queue; @@ -297,12 +294,12 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev, err = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_VQ | i); if (err) { dev_warn(&vcdev->cdev->dev, "SET_VQ failed\n"); - free_pages_exact(info->queue, size); - info->vq = NULL; - vq->priv = NULL; goto out_err; } + info->vq = vq; + vq->priv = info; + /* Save it to our list. */ spin_lock_irqsave(&vcdev->lock, flags); list_add(&info->node, &vcdev->virtqueues); @@ -311,8 +308,13 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev, return vq; out_err: - if (info) + if (vq) + vring_del_virtqueue(vq); + if (info) { + if (info->queue) + free_pages_exact(info->queue, size); kfree(info->info_block); + } kfree(info); return ERR_PTR(err); } -- cgit v0.10.2 From 0c29b2293b25527a1759cbd22b106f19258636b6 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Fri, 25 Jan 2013 15:34:17 +0100 Subject: s390/kvm: Fix instruction decoding Instructions with long displacement have a signed displacement. Currently the sign bit is interpreted as 2^20: Lets fix it by doing the sign extension from 20bit to 32bit and then use it as a signed variable in the addition (see kvm_s390_get_base_disp_rsy). Furthermore, there are lots of "int" in that code. This is problematic, because shifting on a signed integer is undefined/implementation defined if the bit value happens to be negative. Fortunately the promotion rules will make the right hand side unsigned anyway, so there is no real problem right now. Let's convert them anyway to unsigned where appropriate to avoid problems if the code is changed or copy/pasted later on. Signed-off-by: Christian Borntraeger Reviewed-by: Cornelia Huck Signed-off-by: Gleb Natapov diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 3e05def..4d89d64 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -67,8 +67,8 @@ static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix) static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu) { - int base2 = vcpu->arch.sie_block->ipb >> 28; - int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); + u32 base2 = vcpu->arch.sie_block->ipb >> 28; + u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; } @@ -76,10 +76,10 @@ static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu) static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu, u64 *address1, u64 *address2) { - int base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28; - int disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16; - int base2 = (vcpu->arch.sie_block->ipb & 0xf000) >> 12; - int disp2 = vcpu->arch.sie_block->ipb & 0x0fff; + u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28; + u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16; + u32 base2 = (vcpu->arch.sie_block->ipb & 0xf000) >> 12; + u32 disp2 = vcpu->arch.sie_block->ipb & 0x0fff; *address1 = (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1; *address2 = (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; @@ -87,17 +87,20 @@ static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu, static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu) { - int base2 = vcpu->arch.sie_block->ipb >> 28; - int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) + + u32 base2 = vcpu->arch.sie_block->ipb >> 28; + u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) + ((vcpu->arch.sie_block->ipb & 0xff00) << 4); + /* The displacement is a 20bit _SIGNED_ value */ + if (disp2 & 0x80000) + disp2+=0xfff00000; - return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; + return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2; } static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu) { - int base2 = vcpu->arch.sie_block->ipb >> 28; - int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); + u32 base2 = vcpu->arch.sie_block->ipb >> 28; + u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; } -- cgit v0.10.2 From f64c0398939483eb1d8951f24fbc21e94ed54457 Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Tue, 29 Jan 2013 11:00:07 +0900 Subject: KVM: set_memory_region: Identify the requested change explicitly KVM_SET_USER_MEMORY_REGION forces __kvm_set_memory_region() to identify what kind of change is being requested by checking the arguments. The current code does this checking at various points in code and each condition being used there is not easy to understand at first glance. This patch consolidates these checks and introduces an enum to name the possible changes to clean up the code. Although this does not introduce any functional changes, there is one change which optimizes the code a bit: if we have nothing to change, the new code returns 0 immediately. Note that the return value for this case cannot be changed since QEMU relies on it: we noticed this when we changed it to -EINVAL and got a section mismatch error at the final stage of live migration. Signed-off-by: Takuya Yoshikawa Signed-off-by: Marcelo Tosatti diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index a83ca63..64c5dc3 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -719,6 +719,24 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm, } /* + * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations: + * - create a new memory slot + * - delete an existing memory slot + * - modify an existing memory slot + * -- move it in the guest physical memory space + * -- just change its flags + * + * Since flags can be changed by some of these operations, the following + * differentiation is the best we can do for __kvm_set_memory_region(): + */ +enum kvm_mr_change { + KVM_MR_CREATE, + KVM_MR_DELETE, + KVM_MR_MOVE, + KVM_MR_FLAGS_ONLY, +}; + +/* * Allocate some memory and give it an address in the guest physical address * space. * @@ -737,6 +755,7 @@ int __kvm_set_memory_region(struct kvm *kvm, struct kvm_memory_slot old, new; struct kvm_memslots *slots = NULL, *old_memslots; bool old_iommu_mapped; + enum kvm_mr_change change; r = check_memory_region_flags(mem); if (r) @@ -780,17 +799,30 @@ int __kvm_set_memory_region(struct kvm *kvm, old_iommu_mapped = old.npages; - /* - * Disallow changing a memory slot's size or changing anything about - * zero sized slots that doesn't involve making them non-zero. - */ r = -EINVAL; - if (npages && old.npages && npages != old.npages) - goto out; - if (!npages && !old.npages) + if (npages) { + if (!old.npages) + change = KVM_MR_CREATE; + else { /* Modify an existing slot. */ + if ((mem->userspace_addr != old.userspace_addr) || + (npages != old.npages)) + goto out; + + if (base_gfn != old.base_gfn) + change = KVM_MR_MOVE; + else if (new.flags != old.flags) + change = KVM_MR_FLAGS_ONLY; + else { /* Nothing to change. */ + r = 0; + goto out; + } + } + } else if (old.npages) { + change = KVM_MR_DELETE; + } else /* Modify a non-existent slot: disallowed. */ goto out; - if ((npages && !old.npages) || (base_gfn != old.base_gfn)) { + if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { /* Check for overlaps */ r = -EEXIST; kvm_for_each_memslot(slot, kvm->memslots) { @@ -808,20 +840,12 @@ int __kvm_set_memory_region(struct kvm *kvm, new.dirty_bitmap = NULL; r = -ENOMEM; - - /* - * Allocate if a slot is being created. If modifying a slot, - * the userspace_addr cannot change. - */ - if (!old.npages) { + if (change == KVM_MR_CREATE) { new.user_alloc = user_alloc; new.userspace_addr = mem->userspace_addr; if (kvm_arch_create_memslot(&new, npages)) goto out_free; - } else if (npages && mem->userspace_addr != old.userspace_addr) { - r = -EINVAL; - goto out_free; } /* Allocate page dirty bitmap if needed */ @@ -830,7 +854,7 @@ int __kvm_set_memory_region(struct kvm *kvm, goto out_free; } - if (!npages || base_gfn != old.base_gfn) { + if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) { r = -ENOMEM; slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots), GFP_KERNEL); @@ -881,7 +905,7 @@ int __kvm_set_memory_region(struct kvm *kvm, * slots (size changes, userspace addr changes) is disallowed above, * so any other attribute changes getting here can be skipped. */ - if (npages) { + if (change != KVM_MR_DELETE) { if (old_iommu_mapped && ((new.flags ^ old.flags) & KVM_MEM_READONLY)) { kvm_iommu_unmap_pages(kvm, &old); @@ -896,7 +920,7 @@ int __kvm_set_memory_region(struct kvm *kvm, } /* actual memory is freed via old in kvm_free_physmem_slot below */ - if (!npages) { + if (change == KVM_MR_DELETE) { new.dirty_bitmap = NULL; memset(&new.arch, 0, sizeof(new.arch)); } -- cgit v0.10.2 From 75d61fbcf563373696578570e914f555e12c8d97 Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Wed, 30 Jan 2013 19:40:41 +0900 Subject: KVM: set_memory_region: Disallow changing read-only attribute later As Xiao pointed out, there are a few problems with it: - kvm_arch_commit_memory_region() write protects the memory slot only for GET_DIRTY_LOG when modifying the flags. - FNAME(sync_page) uses the old spte value to set a new one without checking KVM_MEM_READONLY flag. Since we flush all shadow pages when creating a new slot, the simplest fix is to disallow such problematic flag changes: this is safe because no one is doing such things. Reviewed-by: Gleb Natapov Signed-off-by: Takuya Yoshikawa Cc: Xiao Guangrong Cc: Alex Williamson Signed-off-by: Marcelo Tosatti diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 09905cb..0e03b19 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -874,12 +874,12 @@ It is recommended that the lower 21 bits of guest_phys_addr and userspace_addr be identical. This allows large pages in the guest to be backed by large pages in the host. -The flags field supports two flag, KVM_MEM_LOG_DIRTY_PAGES, which instructs -kvm to keep track of writes to memory within the slot. See KVM_GET_DIRTY_LOG -ioctl. The KVM_CAP_READONLY_MEM capability indicates the availability of the -KVM_MEM_READONLY flag. When this flag is set for a memory region, KVM only -allows read accesses. Writes will be posted to userspace as KVM_EXIT_MMIO -exits. +The flags field supports two flags: KVM_MEM_LOG_DIRTY_PAGES and +KVM_MEM_READONLY. The former can be set to instruct KVM to keep track of +writes to memory within the slot. See KVM_GET_DIRTY_LOG ioctl to know how to +use it. The latter can be set, if KVM_CAP_READONLY_MEM capability allows it, +to make a new slot read-only. In this case, writes to this memory will be +posted to userspace as KVM_EXIT_MMIO exits. When the KVM_CAP_SYNC_MMU capability is available, changes in the backing of the memory region are automatically reflected into the guest. For example, an diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 64c5dc3..2e93630 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -754,7 +754,6 @@ int __kvm_set_memory_region(struct kvm *kvm, struct kvm_memory_slot *slot; struct kvm_memory_slot old, new; struct kvm_memslots *slots = NULL, *old_memslots; - bool old_iommu_mapped; enum kvm_mr_change change; r = check_memory_region_flags(mem); @@ -797,15 +796,14 @@ int __kvm_set_memory_region(struct kvm *kvm, new.npages = npages; new.flags = mem->flags; - old_iommu_mapped = old.npages; - r = -EINVAL; if (npages) { if (!old.npages) change = KVM_MR_CREATE; else { /* Modify an existing slot. */ if ((mem->userspace_addr != old.userspace_addr) || - (npages != old.npages)) + (npages != old.npages) || + ((new.flags ^ old.flags) & KVM_MEM_READONLY)) goto out; if (base_gfn != old.base_gfn) @@ -867,7 +865,6 @@ int __kvm_set_memory_region(struct kvm *kvm, /* slot was deleted or moved, clear iommu mapping */ kvm_iommu_unmap_pages(kvm, &old); - old_iommu_mapped = false; /* From this point no new shadow pages pointing to a deleted, * or moved, memslot will be created. * @@ -898,25 +895,17 @@ int __kvm_set_memory_region(struct kvm *kvm, /* * IOMMU mapping: New slots need to be mapped. Old slots need to be - * un-mapped and re-mapped if their base changes or if flags that the - * iommu cares about change (read-only). Base change unmapping is - * handled above with slot deletion, so we only unmap incompatible - * flags here. Anything else the iommu might care about for existing - * slots (size changes, userspace addr changes) is disallowed above, - * so any other attribute changes getting here can be skipped. + * un-mapped and re-mapped if their base changes. Since base change + * unmapping is handled above with slot deletion, mapping alone is + * needed here. Anything else the iommu might care about for existing + * slots (size changes, userspace addr changes and read-only flag + * changes) is disallowed above, so any other attribute changes getting + * here can be skipped. */ - if (change != KVM_MR_DELETE) { - if (old_iommu_mapped && - ((new.flags ^ old.flags) & KVM_MEM_READONLY)) { - kvm_iommu_unmap_pages(kvm, &old); - old_iommu_mapped = false; - } - - if (!old_iommu_mapped) { - r = kvm_iommu_map_pages(kvm, &new); - if (r) - goto out_slots; - } + if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { + r = kvm_iommu_map_pages(kvm, &new); + if (r) + goto out_slots; } /* actual memory is freed via old in kvm_free_physmem_slot below */ -- cgit v0.10.2 From feb3eb704a86d97edb296502e95da42d622dac61 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Wed, 30 Jan 2013 16:45:00 +0200 Subject: KVM: MMU: make spte_is_locklessly_modifiable() more clear spte_is_locklessly_modifiable() checks that both SPTE_HOST_WRITEABLE and SPTE_MMU_WRITEABLE are present on spte. Make it more explicit. Signed-off-by: Gleb Natapov Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 9f628f7..2fa82b0 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -448,7 +448,8 @@ static bool __check_direct_spte_mmio_pf(u64 spte) static bool spte_is_locklessly_modifiable(u64 spte) { - return !(~spte & (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE)); + return (spte & (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE)) == + (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE); } static bool spte_has_volatile_bits(u64 spte) -- cgit v0.10.2 From 9bb4f6b15ec038ab9afcf346aa6a590406ad6c17 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Wed, 30 Jan 2013 16:45:01 +0200 Subject: KVM: MMU: drop unneeded checks. Signed-off-by: Gleb Natapov Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 2fa82b0..40737b3 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2328,9 +2328,8 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, if (s->role.level != PT_PAGE_TABLE_LEVEL) return 1; - if (!need_unsync && !s->unsync) { + if (!s->unsync) need_unsync = true; - } } if (need_unsync) kvm_unsync_pages(vcpu, gfn); @@ -4008,7 +4007,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, !((sp->role.word ^ vcpu->arch.mmu.base_role.word) & mask.word) && rmap_can_add(vcpu)) mmu_pte_write_new_pte(vcpu, sp, spte, &gentry); - if (!remote_flush && need_remote_flush(entry, *spte)) + if (need_remote_flush(entry, *spte)) remote_flush = true; ++spte; } -- cgit v0.10.2 From 2c9afa52ef081334925905d6370d36b6602c328c Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Wed, 30 Jan 2013 16:45:02 +0200 Subject: KVM: MMU: set base_role.nxe during mmu initialization. Move base_role.nxe initialisation to where all other roles are initialized. Signed-off-by: Gleb Natapov Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 40737b3..8028ac6 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -3687,6 +3687,7 @@ int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context) else r = paging32_init_context(vcpu, context); + vcpu->arch.mmu.base_role.nxe = is_nx(vcpu); vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu); vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu); vcpu->arch.mmu.base_role.smep_andnot_wp diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index cf512e70..373e17a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -870,8 +870,6 @@ static int set_efer(struct kvm_vcpu *vcpu, u64 efer) kvm_x86_ops->set_efer(vcpu, efer); - vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled; - /* Update reserved bits */ if ((efer ^ old_efer) & EFER_NX) kvm_mmu_reset_context(vcpu); -- cgit v0.10.2 From 116eb3d30e7e121bfc6117ac037833865dad4971 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Wed, 30 Jan 2013 16:45:03 +0200 Subject: KVM: MMU: drop superfluous min() call. Signed-off-by: Gleb Natapov Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 8028ac6..42ba85c 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -3854,7 +3854,7 @@ static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa, /* Handle a 32-bit guest writing two halves of a 64-bit gpte */ *gpa &= ~(gpa_t)7; *bytes = 8; - r = kvm_read_guest(vcpu->kvm, *gpa, &gentry, min(*bytes, 8)); + r = kvm_read_guest(vcpu->kvm, *gpa, &gentry, 8); if (r) gentry = 0; new = (const u8 *)&gentry; -- cgit v0.10.2 From eb3fce87ccc5d38b1ad340f32e34abc09911fb83 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Wed, 30 Jan 2013 16:45:04 +0200 Subject: KVM: MMU: drop superfluous is_present_gpte() check. Gust page walker puts only present ptes into ptes[] array. No need to check it again. Signed-off-by: Gleb Natapov Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index ca69dcc..34c5c99 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -409,9 +409,6 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, unsigned direct_access, access = gw->pt_access; int top_level, emulate = 0; - if (!is_present_gpte(gw->ptes[gw->level - 1])) - return 0; - direct_access = gw->pte_access; top_level = vcpu->arch.mmu.root_level; -- cgit v0.10.2 From 834be0d83f9451573e6fadb381fe0714211c7e90 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Wed, 30 Jan 2013 16:45:05 +0200 Subject: Revert "KVM: MMU: split kvm_mmu_free_page" This reverts commit bd4c86eaa6ff10abc4e00d0f45d2a28b10b09df4. There is not user for kvm_mmu_isolate_page() any more. Signed-off-by: Gleb Natapov Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 42ba85c..0242a8a 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1461,28 +1461,14 @@ static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr) percpu_counter_add(&kvm_total_used_mmu_pages, nr); } -/* - * Remove the sp from shadow page cache, after call it, - * we can not find this sp from the cache, and the shadow - * page table is still valid. - * It should be under the protection of mmu lock. - */ -static void kvm_mmu_isolate_page(struct kvm_mmu_page *sp) +static void kvm_mmu_free_page(struct kvm_mmu_page *sp) { ASSERT(is_empty_shadow_page(sp->spt)); hlist_del(&sp->hash_link); - if (!sp->role.direct) - free_page((unsigned long)sp->gfns); -} - -/* - * Free the shadow page table and the sp, we can do it - * out of the protection of mmu lock. - */ -static void kvm_mmu_free_page(struct kvm_mmu_page *sp) -{ list_del(&sp->link); free_page((unsigned long)sp->spt); + if (!sp->role.direct) + free_page((unsigned long)sp->gfns); kmem_cache_free(mmu_page_header_cache, sp); } @@ -2126,7 +2112,6 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm, do { sp = list_first_entry(invalid_list, struct kvm_mmu_page, link); WARN_ON(!sp->role.invalid || sp->root_count); - kvm_mmu_isolate_page(sp); kvm_mmu_free_page(sp); } while (!list_empty(invalid_list)); } -- cgit v0.10.2 From 4293b5e5a68074431cafa74d549c1327ba1d0deb Mon Sep 17 00:00:00 2001 From: Geoff Levand Date: Thu, 31 Jan 2013 12:06:08 -0800 Subject: KVM: Remove duplicate text in api.txt Signed-off-by: Geoff Levand Signed-off-by: Marcelo Tosatti diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 0e03b19..c2534c3 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -219,19 +219,6 @@ allocation of vcpu ids. For example, if userspace wants single-threaded guest vcpus, it should make all vcpu ids be a multiple of the number of vcpus per vcore. -On powerpc using book3s_hv mode, the vcpus are mapped onto virtual -threads in one or more virtual CPU cores. (This is because the -hardware requires all the hardware threads in a CPU core to be in the -same partition.) The KVM_CAP_PPC_SMT capability indicates the number -of vcpus per virtual core (vcore). The vcore id is obtained by -dividing the vcpu id by the number of vcpus per vcore. The vcpus in a -given vcore will always be in the same physical core as each other -(though that might be a different physical core from time to time). -Userspace can control the threading (SMT) mode of the guest by its -allocation of vcpu ids. For example, if userspace wants -single-threaded guest vcpus, it should make all vcpu ids be a multiple -of the number of vcpus per vcore. - For virtual cpus that have been created with S390 user controlled virtual machines, the resulting vcpu fd can be memory mapped at page offset KVM_S390_SIE_PAGE_OFFSET in order to obtain a memory map of the virtual -- cgit v0.10.2 From c08800a56cb8622bb61577abb4a120c6fdc4b9be Mon Sep 17 00:00:00 2001 From: Dongxiao Xu Date: Mon, 4 Feb 2013 11:50:43 +0800 Subject: KVM: VMX: disable SMEP feature when guest is in non-paging mode SMEP is disabled if CPU is in non-paging mode in hardware. However KVM always uses paging mode to emulate guest non-paging mode with TDP. To emulate this behavior, SMEP needs to be manually disabled when guest switches to non-paging mode. We met an issue that, SMP Linux guest with recent kernel (enable SMEP support, for example, 3.5.3) would crash with triple fault if setting unrestricted_guest=0. This is because KVM uses an identity mapping page table to emulate the non-paging mode, where the page table is set with USER flag. If SMEP is still enabled in this case, guest will meet unhandlable page fault and then crash. Reviewed-by: Gleb Natapov Reviewed-by: Paolo Bonzini Signed-off-by: Dongxiao Xu Signed-off-by: Xiantao Zhang Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 0cf74a6..fe9a9cf 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3227,6 +3227,14 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) if (!is_paging(vcpu)) { hw_cr4 &= ~X86_CR4_PAE; hw_cr4 |= X86_CR4_PSE; + /* + * SMEP is disabled if CPU is in non-paging mode in + * hardware. However KVM always uses paging mode to + * emulate guest non-paging mode with TDP. + * To emulate this behavior, SMEP needs to be manually + * disabled when guest switches to non-paging mode. + */ + hw_cr4 &= ~X86_CR4_SMEP; } else if (!(cr4 & X86_CR4_PAE)) { hw_cr4 &= ~X86_CR4_PAE; } -- cgit v0.10.2 From b0da5bec30eca7ffbb2c89afa6fe503fd418d3a6 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Sun, 3 Feb 2013 18:17:17 +0200 Subject: KVM: VMX: add missing exit names to VMX_EXIT_REASONS array Signed-off-by: Gleb Natapov Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 694586c..5c9dbad 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -105,7 +105,12 @@ { EXIT_REASON_APIC_ACCESS, "APIC_ACCESS" }, \ { EXIT_REASON_EPT_VIOLATION, "EPT_VIOLATION" }, \ { EXIT_REASON_EPT_MISCONFIG, "EPT_MISCONFIG" }, \ - { EXIT_REASON_WBINVD, "WBINVD" } + { EXIT_REASON_WBINVD, "WBINVD" }, \ + { EXIT_REASON_APIC_WRITE, "APIC_WRITE" }, \ + { EXIT_REASON_EOI_INDUCED, "EOI_INDUCED" }, \ + { EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \ + { EXIT_REASON_INVD, "INVD" }, \ + { EXIT_REASON_INVPCID, "INVPCID" } #ifdef __KERNEL__ -- cgit v0.10.2 From 5037878e2223278aa627162aa0bf106dffac19d4 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Mon, 4 Feb 2013 16:00:28 +0200 Subject: KVM: VMX: cleanup vmx_set_cr0(). When calculating hw_cr0 teh current code masks bits that should be always on and re-adds them back immediately after. Cleanup the code by masking only those bits that should be dropped from hw_cr0. This allow us to get rid of some defines. Signed-off-by: Gleb Natapov Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index fe9a9cf..fe09fdc 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -95,12 +95,8 @@ module_param(enable_apicv_reg_vid, bool, S_IRUGO); static bool __read_mostly nested = 0; module_param(nested, bool, S_IRUGO); -#define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST \ - (X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD) -#define KVM_GUEST_CR0_MASK \ - (KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE) -#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST \ - (X86_CR0_WP | X86_CR0_NE) +#define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD) +#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST (X86_CR0_WP | X86_CR0_NE) #define KVM_VM_CR0_ALWAYS_ON \ (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE) #define KVM_CR4_GUEST_OWNED_BITS \ @@ -3137,11 +3133,11 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long hw_cr0; + hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK); if (enable_unrestricted_guest) - hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST) - | KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST; + hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST; else { - hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON; + hw_cr0 |= KVM_VM_CR0_ALWAYS_ON; if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE)) enter_pmode(vcpu); -- cgit v0.10.2 From caf6900f2d8aaebe404c976753f6813ccd31d95e Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Tue, 5 Feb 2013 15:11:09 +0800 Subject: KVM: MMU: lazily drop large spte Currently, kvm zaps the large spte if write-protected is needed, the later read can fault on that spte. Actually, we can make the large spte readonly instead of making them not present, the page fault caused by read access can be avoided The idea is from Avi: | As I mentioned before, write-protecting a large spte is a good idea, | since it moves some work from protect-time to fault-time, so it reduces | jitter. This removes the need for the return value. Reviewed-by: Gleb Natapov Signed-off-by: Xiao Guangrong Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 0242a8a..2a8d99a 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1106,8 +1106,7 @@ static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep) /* * Write-protect on the specified @sptep, @pt_protect indicates whether - * spte writ-protection is caused by protecting shadow page table. - * @flush indicates whether tlb need be flushed. + * spte write-protection is caused by protecting shadow page table. * * Note: write protection is difference between drity logging and spte * protection: @@ -1116,10 +1115,9 @@ static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep) * - for spte protection, the spte can be writable only after unsync-ing * shadow page. * - * Return true if the spte is dropped. + * Return true if tlb need be flushed. */ -static bool -spte_write_protect(struct kvm *kvm, u64 *sptep, bool *flush, bool pt_protect) +static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool pt_protect) { u64 spte = *sptep; @@ -1129,17 +1127,11 @@ spte_write_protect(struct kvm *kvm, u64 *sptep, bool *flush, bool pt_protect) rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep); - if (__drop_large_spte(kvm, sptep)) { - *flush |= true; - return true; - } - if (pt_protect) spte &= ~SPTE_MMU_WRITEABLE; spte = spte & ~PT_WRITABLE_MASK; - *flush |= mmu_spte_update(sptep, spte); - return false; + return mmu_spte_update(sptep, spte); } static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, @@ -1151,11 +1143,8 @@ static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, for (sptep = rmap_get_first(*rmapp, &iter); sptep;) { BUG_ON(!(*sptep & PT_PRESENT_MASK)); - if (spte_write_protect(kvm, sptep, &flush, pt_protect)) { - sptep = rmap_get_first(*rmapp, &iter); - continue; - } + flush |= spte_write_protect(kvm, sptep, pt_protect); sptep = rmap_get_next(&iter); } @@ -2596,6 +2585,8 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, break; } + drop_large_spte(vcpu, iterator.sptep); + if (!is_shadow_present_pte(*iterator.sptep)) { u64 base_addr = iterator.addr; -- cgit v0.10.2 From 55dd98c3a81e243447d5fbf496fa8b909ef3b7f6 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Tue, 5 Feb 2013 15:26:54 +0800 Subject: KVM: MMU: cleanup mapping-level Use min() to cleanup mapping_level Reviewed-by: Gleb Natapov Signed-off-by: Xiao Guangrong Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 2a8d99a..5356d8d 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -832,8 +832,7 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn) if (host_level == PT_PAGE_TABLE_LEVEL) return host_level; - max_level = kvm_x86_ops->get_lpage_level() < host_level ? - kvm_x86_ops->get_lpage_level() : host_level; + max_level = min(kvm_x86_ops->get_lpage_level(), host_level); for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level) if (has_wrprotected_page(vcpu->kvm, large_gfn, level)) -- cgit v0.10.2 From f761620377ebc791afba7ded078947d2116f48ce Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Tue, 5 Feb 2013 15:27:27 +0800 Subject: KVM: MMU: remove pt_access in mmu_set_spte It is only used in debug code, so drop it Reviewed-by: Gleb Natapov Signed-off-by: Xiao Guangrong Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 5356d8d..e956e9b 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2388,16 +2388,15 @@ done: } static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, - unsigned pt_access, unsigned pte_access, - int write_fault, int *emulate, int level, gfn_t gfn, - pfn_t pfn, bool speculative, bool host_writable) + unsigned pte_access, int write_fault, int *emulate, + int level, gfn_t gfn, pfn_t pfn, bool speculative, + bool host_writable) { int was_rmapped = 0; int rmap_count; - pgprintk("%s: spte %llx access %x write_fault %d gfn %llx\n", - __func__, *sptep, pt_access, - write_fault, gfn); + pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__, + *sptep, write_fault, gfn); if (is_rmap_spte(*sptep)) { /* @@ -2513,7 +2512,7 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, return -1; for (i = 0; i < ret; i++, gfn++, start++) - mmu_set_spte(vcpu, start, ACC_ALL, access, 0, NULL, + mmu_set_spte(vcpu, start, access, 0, NULL, sp->role.level, gfn, page_to_pfn(pages[i]), true, true); @@ -2574,9 +2573,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) { if (iterator.level == level) { - unsigned pte_access = ACC_ALL; - - mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, pte_access, + mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, write, &emulate, level, gfn, pfn, prefault, map_writable); direct_pte_prefetch(vcpu, iterator.sptep); diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 34c5c99..105dd5b 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -326,8 +326,8 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, * we call mmu_set_spte() with host_writable = true because * pte_prefetch_gfn_to_pfn always gets a writable pfn. */ - mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, - NULL, PT_PAGE_TABLE_LEVEL, gfn, pfn, true, true); + mmu_set_spte(vcpu, spte, pte_access, 0, NULL, PT_PAGE_TABLE_LEVEL, + gfn, pfn, true, true); return true; } @@ -470,9 +470,8 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, } clear_sp_write_flooding_count(it.sptep); - mmu_set_spte(vcpu, it.sptep, access, gw->pte_access, - write_fault, &emulate, it.level, - gw->gfn, pfn, prefault, map_writable); + mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault, &emulate, + it.level, gw->gfn, pfn, prefault, map_writable); FNAME(pte_prefetch)(vcpu, gw, it.sptep); return emulate; -- cgit v0.10.2 From 24db2734ad8123b6858ca98d690483ecdcceebb5 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Tue, 5 Feb 2013 15:28:02 +0800 Subject: KVM: MMU: cleanup __direct_map Use link_shadow_page to link the sp to the spte in __direct_map Reviewed-by: Gleb Natapov Signed-off-by: Xiao Guangrong Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index e956e9b..1cda1f3 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1947,9 +1947,9 @@ static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp) { u64 spte; - spte = __pa(sp->spt) - | PT_PRESENT_MASK | PT_ACCESSED_MASK - | PT_WRITABLE_MASK | PT_USER_MASK; + spte = __pa(sp->spt) | PT_PRESENT_MASK | PT_WRITABLE_MASK | + shadow_user_mask | shadow_x_mask | shadow_accessed_mask; + mmu_spte_set(sptep, spte); } @@ -2592,11 +2592,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, iterator.level - 1, 1, ACC_ALL, iterator.sptep); - mmu_spte_set(iterator.sptep, - __pa(sp->spt) - | PT_PRESENT_MASK | PT_WRITABLE_MASK - | shadow_user_mask | shadow_x_mask - | shadow_accessed_mask); + link_shadow_page(iterator.sptep, sp); } } return emulate; -- cgit v0.10.2 From 79fd50c67f91136add9726fb7719b57a66c6f763 Mon Sep 17 00:00:00 2001 From: Cornelia Huck Date: Thu, 7 Feb 2013 13:20:52 +0100 Subject: KVM: s390: Fix handling of iscs. There are two ways to express an interruption subclass: - As a bitmask, as used in cr6. - As a number, as used in the I/O interruption word. Unfortunately, we have treated the I/O interruption word as if it contained the bitmask as well, which went unnoticed so far as - (not-yet-released) qemu made the same mistake, and - Linux guest kernels don't check the isc value in the I/O interruption word for subchannel interrupts. Make sure that we treat the I/O interruption word correctly. Reviewed-by: Christian Borntraeger Signed-off-by: Cornelia Huck Signed-off-by: Gleb Natapov diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 9a12835..2f6ccb0 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -55,6 +55,13 @@ static int psw_interrupts_disabled(struct kvm_vcpu *vcpu) return 1; } +static u64 int_word_to_isc_bits(u32 int_word) +{ + u8 isc = (int_word & 0x38000000) >> 27; + + return (0x80 >> isc) << 24; +} + static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu, struct kvm_s390_interrupt_info *inti) { @@ -96,7 +103,8 @@ static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu, case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: if (psw_ioint_disabled(vcpu)) return 0; - if (vcpu->arch.sie_block->gcr[6] & inti->io.io_int_word) + if (vcpu->arch.sie_block->gcr[6] & + int_word_to_isc_bits(inti->io.io_int_word)) return 1; return 0; default: @@ -724,7 +732,8 @@ struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, list_for_each_entry(iter, &fi->list, list) { if (!is_ioint(iter->type)) continue; - if (cr6 && ((cr6 & iter->io.io_int_word) == 0)) + if (cr6 && + ((cr6 & int_word_to_isc_bits(iter->io.io_int_word)) == 0)) continue; if (schid) { if (((schid & 0x00000000ffff0000) >> 16) != @@ -811,11 +820,14 @@ int kvm_s390_inject_vm(struct kvm *kvm, if (!is_ioint(inti->type)) list_add_tail(&inti->list, &fi->list); else { + u64 isc_bits = int_word_to_isc_bits(inti->io.io_int_word); + /* Keep I/O interrupts sorted in isc order. */ list_for_each_entry(iter, &fi->list, list) { if (!is_ioint(iter->type)) continue; - if (iter->io.io_int_word <= inti->io.io_int_word) + if (int_word_to_isc_bits(iter->io.io_int_word) + <= isc_bits) continue; break; } -- cgit v0.10.2 From 257090f70233084488f7b3ebe99be8c159a23281 Mon Sep 17 00:00:00 2001 From: Yang Zhang Date: Sun, 10 Feb 2013 22:57:18 +0800 Subject: KVM: VMX: disable apicv by default Without Posted Interrupt, current code is broken. Just disable by default until Posted Interrupt is ready. Signed-off-by: Yang Zhang Signed-off-by: Gleb Natapov diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index fe09fdc..c794478 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -84,8 +84,7 @@ module_param(vmm_exclusive, bool, S_IRUGO); static bool __read_mostly fasteoi = 1; module_param(fasteoi, bool, S_IRUGO); -static bool __read_mostly enable_apicv_reg_vid = 1; -module_param(enable_apicv_reg_vid, bool, S_IRUGO); +static bool __read_mostly enable_apicv_reg_vid; /* * If nested=1, nested virtualization is supported, i.e., guests may use -- cgit v0.10.2 From 7a905b1485adf863607b5fc9e32a3fa3838bcc23 Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Thu, 7 Feb 2013 18:55:57 +0900 Subject: KVM: Remove user_alloc from struct kvm_memory_slot This field was needed to differentiate memory slots created by the new API, KVM_SET_USER_MEMORY_REGION, from those by the old equivalent, KVM_SET_MEMORY_REGION, whose support was dropped long before: commit b74a07beed0e64bfba413dcb70dd6749c57f43dc KVM: Remove kernel-allocated memory regions Although we also have private memory slots to which KVM allocates memory with vm_mmap(), !user_alloc slots in other words, the slot id should be enough for differentiating them. Note: corresponding function parameters will be removed later. Reviewed-by: Marcelo Tosatti Signed-off-by: Takuya Yoshikawa Signed-off-by: Gleb Natapov diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 373e17a..3c5bb6f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6897,33 +6897,28 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, bool user_alloc) { int npages = memslot->npages; - int map_flags = MAP_PRIVATE | MAP_ANONYMOUS; - /* Prevent internal slot pages from being moved by fork()/COW. */ - if (memslot->id >= KVM_USER_MEM_SLOTS) - map_flags = MAP_SHARED | MAP_ANONYMOUS; - - /*To keep backward compatibility with older userspace, - *x86 needs to handle !user_alloc case. + /* + * Only private memory slots need to be mapped here since + * KVM_SET_MEMORY_REGION ioctl is no longer supported. */ - if (!user_alloc) { - if (npages && !old.npages) { - unsigned long userspace_addr; + if ((memslot->id >= KVM_USER_MEM_SLOTS) && npages && !old.npages) { + unsigned long userspace_addr; - userspace_addr = vm_mmap(NULL, 0, - npages * PAGE_SIZE, - PROT_READ | PROT_WRITE, - map_flags, - 0); + /* + * MAP_SHARED to prevent internal slot pages from being moved + * by fork()/COW. + */ + userspace_addr = vm_mmap(NULL, 0, npages * PAGE_SIZE, + PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, 0); - if (IS_ERR((void *)userspace_addr)) - return PTR_ERR((void *)userspace_addr); + if (IS_ERR((void *)userspace_addr)) + return PTR_ERR((void *)userspace_addr); - memslot->userspace_addr = userspace_addr; - } + memslot->userspace_addr = userspace_addr; } - return 0; } @@ -6935,7 +6930,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, int nr_mmu_pages = 0, npages = mem->memory_size >> PAGE_SHIFT; - if (!user_alloc && !old.user_alloc && old.npages && !npages) { + if ((mem->slot >= KVM_USER_MEM_SLOTS) && old.npages && !npages) { int ret; ret = vm_munmap(old.userspace_addr, diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 0350e0d..722cae7 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -273,7 +273,6 @@ struct kvm_memory_slot { unsigned long userspace_addr; u32 flags; short id; - bool user_alloc; }; static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 2e93630..adc68fe 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -839,7 +839,6 @@ int __kvm_set_memory_region(struct kvm *kvm, r = -ENOMEM; if (change == KVM_MR_CREATE) { - new.user_alloc = user_alloc; new.userspace_addr = mem->userspace_addr; if (kvm_arch_create_memslot(&new, npages)) -- cgit v0.10.2 From ffe129ecd79779221fdb03305049ec8b5a8beb0f Mon Sep 17 00:00:00 2001 From: Bharat Bhushan Date: Tue, 15 Jan 2013 22:20:42 +0000 Subject: KVM: PPC: booke: use vcpu reference from thread_struct Like other places, use thread_struct to get vcpu reference. Signed-off-by: Bharat Bhushan Signed-off-by: Alexander Graf diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 97d3727..11ae3d8 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -919,8 +919,6 @@ #define SPRN_SPRG_RSCRATCH_DBG SPRN_SPRG9 #define SPRN_SPRG_WSCRATCH_DBG SPRN_SPRG9 #endif -#define SPRN_SPRG_RVCPU SPRN_SPRG1 -#define SPRN_SPRG_WVCPU SPRN_SPRG1 #endif #ifdef CONFIG_8xx diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 4e23ba2..46f6afd 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -117,7 +117,7 @@ int main(void) #ifdef CONFIG_KVM_BOOK3S_32_HANDLER DEFINE(THREAD_KVM_SVCPU, offsetof(struct thread_struct, kvm_shadow_vcpu)); #endif -#ifdef CONFIG_KVM_BOOKE_HV +#if defined(CONFIG_KVM) && defined(CONFIG_BOOKE) DEFINE(THREAD_KVM_VCPU, offsetof(struct thread_struct, kvm_vcpu)); #endif diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S index bb46b32..ca16d57 100644 --- a/arch/powerpc/kvm/booke_interrupts.S +++ b/arch/powerpc/kvm/booke_interrupts.S @@ -56,7 +56,8 @@ _GLOBAL(kvmppc_handler_\ivor_nr) /* Get pointer to vcpu and record exit number. */ mtspr \scratch , r4 - mfspr r4, SPRN_SPRG_RVCPU + mfspr r4, SPRN_SPRG_THREAD + lwz r4, THREAD_KVM_VCPU(r4) stw r3, VCPU_GPR(R3)(r4) stw r5, VCPU_GPR(R5)(r4) stw r6, VCPU_GPR(R6)(r4) @@ -402,9 +403,6 @@ lightweight_exit: lwz r8, kvmppc_booke_handlers@l(r8) mtspr SPRN_IVPR, r8 - /* Save vcpu pointer for the exception handlers. */ - mtspr SPRN_SPRG_WVCPU, r4 - lwz r5, VCPU_SHARED(r4) /* Can't switch the stack pointer until after IVPR is switched, -- cgit v0.10.2 From 1d542d9c2bbca9b99835fef6a938b9ae9dd7ca2a Mon Sep 17 00:00:00 2001 From: Bharat Bhushan Date: Tue, 15 Jan 2013 22:24:39 +0000 Subject: KVM: PPC: booke: Allow multiple exception types Current kvmppc_booke_handlers uses the same macro (KVM_HANDLER) and all handlers are considered to be the same size. This will not be the case if we want to use different macros for different handlers. This patch improves the kvmppc_booke_handler so that it can support different macros for different handlers. Signed-off-by: Liu Yu [bharat.bhushan@freescale.com: Substantial changes] Signed-off-by: Bharat Bhushan Signed-off-by: Alexander Graf diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 493630e..44a657a 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -49,8 +49,6 @@ enum emulation_result { extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); -extern char kvmppc_handlers_start[]; -extern unsigned long kvmppc_handler_len; extern void kvmppc_handler_highmem(void); extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu); diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 8779cd4c..d2f502d 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -1594,7 +1594,9 @@ int __init kvmppc_booke_init(void) { #ifndef CONFIG_KVM_BOOKE_HV unsigned long ivor[16]; + unsigned long *handler = kvmppc_booke_handler_addr; unsigned long max_ivor = 0; + unsigned long handler_len; int i; /* We install our own exception handlers by hijacking IVPR. IVPR must @@ -1627,14 +1629,16 @@ int __init kvmppc_booke_init(void) for (i = 0; i < 16; i++) { if (ivor[i] > max_ivor) - max_ivor = ivor[i]; + max_ivor = i; + handler_len = handler[i + 1] - handler[i]; memcpy((void *)kvmppc_booke_handlers + ivor[i], - kvmppc_handlers_start + i * kvmppc_handler_len, - kvmppc_handler_len); + (void *)handler[i], handler_len); } - flush_icache_range(kvmppc_booke_handlers, - kvmppc_booke_handlers + max_ivor + kvmppc_handler_len); + + handler_len = handler[max_ivor + 1] - handler[max_ivor]; + flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers + + ivor[max_ivor] + handler_len); #endif /* !BOOKE_HV */ return 0; } diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h index e9b88e4..5fd1ba6 100644 --- a/arch/powerpc/kvm/booke.h +++ b/arch/powerpc/kvm/booke.h @@ -65,6 +65,7 @@ (1 << BOOKE_IRQPRIO_CRITICAL)) extern unsigned long kvmppc_booke_handlers; +extern unsigned long kvmppc_booke_handler_addr[]; void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr); void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr); diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S index ca16d57..eae8483 100644 --- a/arch/powerpc/kvm/booke_interrupts.S +++ b/arch/powerpc/kvm/booke_interrupts.S @@ -74,6 +74,14 @@ _GLOBAL(kvmppc_handler_\ivor_nr) bctr .endm +.macro KVM_HANDLER_ADDR ivor_nr + .long kvmppc_handler_\ivor_nr +.endm + +.macro KVM_HANDLER_END + .long kvmppc_handlers_end +.endm + _GLOBAL(kvmppc_handlers_start) KVM_HANDLER BOOKE_INTERRUPT_CRITICAL SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0 KVM_HANDLER BOOKE_INTERRUPT_MACHINE_CHECK SPRN_SPRG_RSCRATCH_MC SPRN_MCSRR0 @@ -94,9 +102,7 @@ KVM_HANDLER BOOKE_INTERRUPT_DEBUG SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0 KVM_HANDLER BOOKE_INTERRUPT_SPE_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0 KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_DATA SPRN_SPRG_RSCRATCH0 SPRN_SRR0 KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_ROUND SPRN_SPRG_RSCRATCH0 SPRN_SRR0 - -_GLOBAL(kvmppc_handler_len) - .long kvmppc_handler_1 - kvmppc_handler_0 +_GLOBAL(kvmppc_handlers_end) /* Registers: * SPRG_SCRATCH0: guest r4 @@ -461,6 +467,31 @@ lightweight_exit: lwz r4, VCPU_GPR(R4)(r4) rfi + .data + .align 4 + .globl kvmppc_booke_handler_addr +kvmppc_booke_handler_addr: +KVM_HANDLER_ADDR BOOKE_INTERRUPT_CRITICAL +KVM_HANDLER_ADDR BOOKE_INTERRUPT_MACHINE_CHECK +KVM_HANDLER_ADDR BOOKE_INTERRUPT_DATA_STORAGE +KVM_HANDLER_ADDR BOOKE_INTERRUPT_INST_STORAGE +KVM_HANDLER_ADDR BOOKE_INTERRUPT_EXTERNAL +KVM_HANDLER_ADDR BOOKE_INTERRUPT_ALIGNMENT +KVM_HANDLER_ADDR BOOKE_INTERRUPT_PROGRAM +KVM_HANDLER_ADDR BOOKE_INTERRUPT_FP_UNAVAIL +KVM_HANDLER_ADDR BOOKE_INTERRUPT_SYSCALL +KVM_HANDLER_ADDR BOOKE_INTERRUPT_AP_UNAVAIL +KVM_HANDLER_ADDR BOOKE_INTERRUPT_DECREMENTER +KVM_HANDLER_ADDR BOOKE_INTERRUPT_FIT +KVM_HANDLER_ADDR BOOKE_INTERRUPT_WATCHDOG +KVM_HANDLER_ADDR BOOKE_INTERRUPT_DTLB_MISS +KVM_HANDLER_ADDR BOOKE_INTERRUPT_ITLB_MISS +KVM_HANDLER_ADDR BOOKE_INTERRUPT_DEBUG +KVM_HANDLER_ADDR BOOKE_INTERRUPT_SPE_UNAVAIL +KVM_HANDLER_ADDR BOOKE_INTERRUPT_SPE_FP_DATA +KVM_HANDLER_ADDR BOOKE_INTERRUPT_SPE_FP_ROUND +KVM_HANDLER_END /*Always keep this in end*/ + #ifdef CONFIG_SPE _GLOBAL(kvmppc_save_guest_spe) cmpi 0,r3,0 diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c index b479ed7..6dd4de7 100644 --- a/arch/powerpc/kvm/e500.c +++ b/arch/powerpc/kvm/e500.c @@ -491,6 +491,9 @@ static int __init kvmppc_e500_init(void) { int r, i; unsigned long ivor[3]; + /* Process remaining handlers above the generic first 16 */ + unsigned long *handler = &kvmppc_booke_handler_addr[16]; + unsigned long handler_len; unsigned long max_ivor = 0; r = kvmppc_core_check_processor_compat(); @@ -506,15 +509,16 @@ static int __init kvmppc_e500_init(void) ivor[1] = mfspr(SPRN_IVOR33); ivor[2] = mfspr(SPRN_IVOR34); for (i = 0; i < 3; i++) { - if (ivor[i] > max_ivor) - max_ivor = ivor[i]; + if (ivor[i] > ivor[max_ivor]) + max_ivor = i; + handler_len = handler[i + 1] - handler[i]; memcpy((void *)kvmppc_booke_handlers + ivor[i], - kvmppc_handlers_start + (i + 16) * kvmppc_handler_len, - kvmppc_handler_len); + (void *)handler[i], handler_len); } - flush_icache_range(kvmppc_booke_handlers, - kvmppc_booke_handlers + max_ivor + kvmppc_handler_len); + handler_len = handler[max_ivor + 1] - handler[max_ivor]; + flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers + + ivor[max_ivor] + handler_len); return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE); } -- cgit v0.10.2 From ee53e560a8f52cbc1fba877fdf4acffb0c163f29 Mon Sep 17 00:00:00 2001 From: Bharat Bhushan Date: Tue, 15 Jan 2013 22:24:43 +0000 Subject: booke: Added DBCR4 SPR number Signed-off-by: Bharat Bhushan Signed-off-by: Alexander Graf diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index e07e6af..b417de3 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h @@ -56,6 +56,7 @@ #define SPRN_SPRG7W 0x117 /* Special Purpose Register General 7 Write */ #define SPRN_EPCR 0x133 /* Embedded Processor Control Register */ #define SPRN_DBCR2 0x136 /* Debug Control Register 2 */ +#define SPRN_DBCR4 0x233 /* Debug Control Register 4 */ #define SPRN_MSRP 0x137 /* MSR Protect Register */ #define SPRN_IAC3 0x13A /* Instruction Address Compare 3 */ #define SPRN_IAC4 0x13B /* Instruction Address Compare 4 */ -- cgit v0.10.2 From 011da8996263f799a469a761ee15c998d7ef1acb Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Thu, 31 Jan 2013 14:17:38 +0100 Subject: KVM: PPC: BookE: Handle alignment interrupts When the guest triggers an alignment interrupt, we don't handle it properly today and instead BUG_ON(). This really shouldn't happen. Instead, we should just pass the interrupt back into the guest so it can deal with it. Reported-by: Gao Guanhua-B22826 Tested-by: Gao Guanhua-B22826 Signed-off-by: Alexander Graf diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index d2f502d..020923e 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -182,6 +182,14 @@ static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE); } +static void kvmppc_core_queue_alignment(struct kvm_vcpu *vcpu, ulong dear_flags, + ulong esr_flags) +{ + vcpu->arch.queued_dear = dear_flags; + vcpu->arch.queued_esr = esr_flags; + kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALIGNMENT); +} + void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags) { vcpu->arch.queued_esr = esr_flags; @@ -345,6 +353,7 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, switch (priority) { case BOOKE_IRQPRIO_DTLB_MISS: case BOOKE_IRQPRIO_DATA_STORAGE: + case BOOKE_IRQPRIO_ALIGNMENT: update_dear = true; /* fall through */ case BOOKE_IRQPRIO_INST_STORAGE: @@ -358,7 +367,6 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, case BOOKE_IRQPRIO_SPE_FP_DATA: case BOOKE_IRQPRIO_SPE_FP_ROUND: case BOOKE_IRQPRIO_AP_UNAVAIL: - case BOOKE_IRQPRIO_ALIGNMENT: allowed = 1; msr_mask = MSR_CE | MSR_ME | MSR_DE; int_class = INT_CLASS_NONCRIT; @@ -971,6 +979,12 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, r = RESUME_GUEST; break; + case BOOKE_INTERRUPT_ALIGNMENT: + kvmppc_core_queue_alignment(vcpu, vcpu->arch.fault_dear, + vcpu->arch.fault_esr); + r = RESUME_GUEST; + break; + #ifdef CONFIG_KVM_BOOKE_HV case BOOKE_INTERRUPT_HV_SYSCALL: if (!(vcpu->arch.shared->msr & MSR_PR)) { diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S index eae8483..f4bb55c 100644 --- a/arch/powerpc/kvm/booke_interrupts.S +++ b/arch/powerpc/kvm/booke_interrupts.S @@ -45,12 +45,14 @@ (1< Date: Wed, 13 Feb 2013 17:50:39 +0200 Subject: x86 emulator: fix parity calculation for AAD instruction Reported-by: Paolo Bonzini Suggested-by: Paolo Bonzini Reviewed-by: Paolo Bonzini Signed-off-by: Gleb Natapov diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 2b11318..a335cc6 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -2995,14 +2995,11 @@ static int em_aad(struct x86_emulate_ctxt *ctxt) ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al; - ctxt->eflags &= ~(X86_EFLAGS_PF | X86_EFLAGS_SF | X86_EFLAGS_ZF); - - if (!al) - ctxt->eflags |= X86_EFLAGS_ZF; - if (!(al & 1)) - ctxt->eflags |= X86_EFLAGS_PF; - if (al & 0x80) - ctxt->eflags |= X86_EFLAGS_SF; + /* Set PF, ZF, SF */ + ctxt->src.type = OP_IMM; + ctxt->src.val = 0; + ctxt->src.bytes = 1; + fastop(ctxt, em_or); return X86EMUL_CONTINUE; } -- cgit v0.10.2 From cbd29cb6e38af6119df2cdac0c58acf0e85c177e Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Mon, 11 Feb 2013 12:19:28 +0100 Subject: KVM: nVMX: Remove redundant get_vmcs12 from nested_vmx_exit_handled_msr We already pass vmcs12 as argument. Signed-off-by: Jan Kiszka Signed-off-by: Gleb Natapov diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index c794478..6667042 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -5920,7 +5920,7 @@ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX]; gpa_t bitmap; - if (!nested_cpu_has(get_vmcs12(vcpu), CPU_BASED_USE_MSR_BITMAPS)) + if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) return 1; /* -- cgit v0.10.2 From ed55705dd5008b408c48a8459b8b34b01f3de985 Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Mon, 18 Feb 2013 22:58:14 -0300 Subject: x86: pvclock kvm: align allocation size to page size To match whats mapped via vsyscalls to userspace. Reported-by: Peter Hurley Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 220a360..5bedbdd 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -218,6 +218,9 @@ static void kvm_shutdown(void) void __init kvmclock_init(void) { unsigned long mem; + int size; + + size = PAGE_ALIGN(sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS); if (!kvm_para_available()) return; @@ -231,16 +234,14 @@ void __init kvmclock_init(void) printk(KERN_INFO "kvm-clock: Using msrs %x and %x", msr_kvm_system_time, msr_kvm_wall_clock); - mem = memblock_alloc(sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS, - PAGE_SIZE); + mem = memblock_alloc(size, PAGE_SIZE); if (!mem) return; hv_clock = __va(mem); if (kvm_register_clock("boot clock")) { hv_clock = NULL; - memblock_free(mem, - sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS); + memblock_free(mem, size); return; } pv_time_ops.sched_clock = kvm_clock_read; @@ -275,7 +276,7 @@ int __init kvm_setup_vsyscall_timeinfo(void) struct pvclock_vcpu_time_info *vcpu_time; unsigned int size; - size = sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS; + size = PAGE_ALIGN(sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS); preempt_disable(); cpu = smp_processor_id(); -- cgit v0.10.2 From 6b73a96065e89dc9fa75ba4f78b1aa3a3bbd0470 Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Wed, 20 Feb 2013 18:52:02 -0300 Subject: Revert "KVM: MMU: lazily drop large spte" This reverts commit caf6900f2d8aaebe404c976753f6813ccd31d95e. It is causing migration failures, reference https://bugzilla.kernel.org/show_bug.cgi?id=54061. Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 1cda1f3..4ed3edb 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1105,7 +1105,8 @@ static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep) /* * Write-protect on the specified @sptep, @pt_protect indicates whether - * spte write-protection is caused by protecting shadow page table. + * spte writ-protection is caused by protecting shadow page table. + * @flush indicates whether tlb need be flushed. * * Note: write protection is difference between drity logging and spte * protection: @@ -1114,9 +1115,10 @@ static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep) * - for spte protection, the spte can be writable only after unsync-ing * shadow page. * - * Return true if tlb need be flushed. + * Return true if the spte is dropped. */ -static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool pt_protect) +static bool +spte_write_protect(struct kvm *kvm, u64 *sptep, bool *flush, bool pt_protect) { u64 spte = *sptep; @@ -1126,11 +1128,17 @@ static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool pt_protect) rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep); + if (__drop_large_spte(kvm, sptep)) { + *flush |= true; + return true; + } + if (pt_protect) spte &= ~SPTE_MMU_WRITEABLE; spte = spte & ~PT_WRITABLE_MASK; - return mmu_spte_update(sptep, spte); + *flush |= mmu_spte_update(sptep, spte); + return false; } static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, @@ -1142,8 +1150,11 @@ static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, for (sptep = rmap_get_first(*rmapp, &iter); sptep;) { BUG_ON(!(*sptep & PT_PRESENT_MASK)); + if (spte_write_protect(kvm, sptep, &flush, pt_protect)) { + sptep = rmap_get_first(*rmapp, &iter); + continue; + } - flush |= spte_write_protect(kvm, sptep, pt_protect); sptep = rmap_get_next(&iter); } @@ -2581,8 +2592,6 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, break; } - drop_large_spte(vcpu, iterator.sptep); - if (!is_shadow_present_pte(*iterator.sptep)) { u64 base_addr = iterator.addr; -- cgit v0.10.2