summaryrefslogtreecommitdiff
path: root/virt
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2013-02-13 11:56:14 (GMT)
committerAlexander Graf <agraf@suse.de>2013-02-13 11:56:14 (GMT)
commitdd92d6f2749c43ebab91c4762a1bc79e6523e936 (patch)
tree6e6730bdd09284679c0861df6d0fcbec08ea7a87 /virt
parentb9e3e208935e95ad62bd1b1bc4408c23a9ae3ada (diff)
parentb0da5bec30eca7ffbb2c89afa6fe503fd418d3a6 (diff)
downloadlinux-fsl-qoriq-dd92d6f2749c43ebab91c4762a1bc79e6523e936.tar.xz
Merge commit 'origin/next' into kvm-ppc-next
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/ioapic.c39
-rw-r--r--virt/kvm/ioapic.h4
-rw-r--r--virt/kvm/iommu.c4
-rw-r--r--virt/kvm/irq_comm.c25
-rw-r--r--virt/kvm/kvm_main.c106
5 files changed, 146 insertions, 32 deletions
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index f3abbef..ce82b94 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -35,6 +35,7 @@
#include <linux/hrtimer.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/current.h>
@@ -115,6 +116,42 @@ static void update_handled_vectors(struct kvm_ioapic *ioapic)
smp_wmb();
}
+void kvm_ioapic_calculate_eoi_exitmap(struct kvm_vcpu *vcpu,
+ u64 *eoi_exit_bitmap)
+{
+ struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
+ union kvm_ioapic_redirect_entry *e;
+ struct kvm_lapic_irq irqe;
+ int index;
+
+ spin_lock(&ioapic->lock);
+ /* traverse ioapic entry to set eoi exit bitmap*/
+ for (index = 0; index < IOAPIC_NUM_PINS; index++) {
+ e = &ioapic->redirtbl[index];
+ if (!e->fields.mask &&
+ (e->fields.trig_mode == IOAPIC_LEVEL_TRIG ||
+ kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC,
+ index))) {
+ irqe.dest_id = e->fields.dest_id;
+ irqe.vector = e->fields.vector;
+ irqe.dest_mode = e->fields.dest_mode;
+ irqe.delivery_mode = e->fields.delivery_mode << 8;
+ kvm_calculate_eoi_exitmap(vcpu, &irqe, eoi_exit_bitmap);
+ }
+ }
+ spin_unlock(&ioapic->lock);
+}
+EXPORT_SYMBOL_GPL(kvm_ioapic_calculate_eoi_exitmap);
+
+void kvm_ioapic_make_eoibitmap_request(struct kvm *kvm)
+{
+ struct kvm_ioapic *ioapic = kvm->arch.vioapic;
+
+ if (!kvm_apic_vid_enabled(kvm) || !ioapic)
+ return;
+ kvm_make_update_eoibitmap_request(kvm);
+}
+
static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
{
unsigned index;
@@ -156,6 +193,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG
&& ioapic->irr & (1 << index))
ioapic_service(ioapic, index);
+ kvm_ioapic_make_eoibitmap_request(ioapic->kvm);
break;
}
}
@@ -455,6 +493,7 @@ int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
spin_lock(&ioapic->lock);
memcpy(ioapic, state, sizeof(struct kvm_ioapic_state));
update_handled_vectors(ioapic);
+ kvm_ioapic_make_eoibitmap_request(kvm);
spin_unlock(&ioapic->lock);
return 0;
}
diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h
index a30abfe..0400a46 100644
--- a/virt/kvm/ioapic.h
+++ b/virt/kvm/ioapic.h
@@ -82,5 +82,9 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
struct kvm_lapic_irq *irq);
int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
+void kvm_ioapic_make_eoibitmap_request(struct kvm *kvm);
+void kvm_ioapic_calculate_eoi_exitmap(struct kvm_vcpu *vcpu,
+ u64 *eoi_exit_bitmap);
+
#endif
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
index 4a340cb..72a130b 100644
--- a/virt/kvm/iommu.c
+++ b/virt/kvm/iommu.c
@@ -76,7 +76,9 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
gfn = slot->base_gfn;
end_gfn = gfn + slot->npages;
- flags = IOMMU_READ | IOMMU_WRITE;
+ flags = IOMMU_READ;
+ if (!(slot->flags & KVM_MEM_READONLY))
+ flags |= IOMMU_WRITE;
if (kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY)
flags |= IOMMU_CACHE;
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index 656fa45..ff6d40e 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -22,6 +22,7 @@
#include <linux/kvm_host.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <trace/events/kvm.h>
#include <asm/msidef.h>
@@ -237,6 +238,28 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
return ret;
}
+bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
+{
+ struct kvm_irq_ack_notifier *kian;
+ struct hlist_node *n;
+ int gsi;
+
+ rcu_read_lock();
+ gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
+ if (gsi != -1)
+ hlist_for_each_entry_rcu(kian, n, &kvm->irq_ack_notifier_list,
+ link)
+ if (kian->gsi == gsi) {
+ rcu_read_unlock();
+ return true;
+ }
+
+ rcu_read_unlock();
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
+
void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
{
struct kvm_irq_ack_notifier *kian;
@@ -261,6 +284,7 @@ void kvm_register_irq_ack_notifier(struct kvm *kvm,
mutex_lock(&kvm->irq_lock);
hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
mutex_unlock(&kvm->irq_lock);
+ kvm_ioapic_make_eoibitmap_request(kvm);
}
void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
@@ -270,6 +294,7 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
hlist_del_init_rcu(&kian->link);
mutex_unlock(&kvm->irq_lock);
synchronize_rcu();
+ kvm_ioapic_make_eoibitmap_request(kvm);
}
int kvm_request_irq_source_id(struct kvm *kvm)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 5e709eb..2e93630 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -217,6 +217,11 @@ void kvm_make_mclock_inprogress_request(struct kvm *kvm)
make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS);
}
+void kvm_make_update_eoibitmap_request(struct kvm *kvm)
+{
+ make_all_cpus_request(kvm, KVM_REQ_EOIBITMAP);
+}
+
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
{
struct page *page;
@@ -714,6 +719,24 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
}
/*
+ * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations:
+ * - create a new memory slot
+ * - delete an existing memory slot
+ * - modify an existing memory slot
+ * -- move it in the guest physical memory space
+ * -- just change its flags
+ *
+ * Since flags can be changed by some of these operations, the following
+ * differentiation is the best we can do for __kvm_set_memory_region():
+ */
+enum kvm_mr_change {
+ KVM_MR_CREATE,
+ KVM_MR_DELETE,
+ KVM_MR_MOVE,
+ KVM_MR_FLAGS_ONLY,
+};
+
+/*
* Allocate some memory and give it an address in the guest physical address
* space.
*
@@ -731,6 +754,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
struct kvm_memory_slot *slot;
struct kvm_memory_slot old, new;
struct kvm_memslots *slots = NULL, *old_memslots;
+ enum kvm_mr_change change;
r = check_memory_region_flags(mem);
if (r)
@@ -772,17 +796,31 @@ int __kvm_set_memory_region(struct kvm *kvm,
new.npages = npages;
new.flags = mem->flags;
- /*
- * Disallow changing a memory slot's size or changing anything about
- * zero sized slots that doesn't involve making them non-zero.
- */
r = -EINVAL;
- if (npages && old.npages && npages != old.npages)
- goto out;
- if (!npages && !old.npages)
+ if (npages) {
+ if (!old.npages)
+ change = KVM_MR_CREATE;
+ else { /* Modify an existing slot. */
+ if ((mem->userspace_addr != old.userspace_addr) ||
+ (npages != old.npages) ||
+ ((new.flags ^ old.flags) & KVM_MEM_READONLY))
+ goto out;
+
+ if (base_gfn != old.base_gfn)
+ change = KVM_MR_MOVE;
+ else if (new.flags != old.flags)
+ change = KVM_MR_FLAGS_ONLY;
+ else { /* Nothing to change. */
+ r = 0;
+ goto out;
+ }
+ }
+ } else if (old.npages) {
+ change = KVM_MR_DELETE;
+ } else /* Modify a non-existent slot: disallowed. */
goto out;
- if ((npages && !old.npages) || (base_gfn != old.base_gfn)) {
+ if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
/* Check for overlaps */
r = -EEXIST;
kvm_for_each_memslot(slot, kvm->memslots) {
@@ -800,20 +838,12 @@ int __kvm_set_memory_region(struct kvm *kvm,
new.dirty_bitmap = NULL;
r = -ENOMEM;
-
- /*
- * Allocate if a slot is being created. If modifying a slot,
- * the userspace_addr cannot change.
- */
- if (!old.npages) {
+ if (change == KVM_MR_CREATE) {
new.user_alloc = user_alloc;
new.userspace_addr = mem->userspace_addr;
if (kvm_arch_create_memslot(&new, npages))
goto out_free;
- } else if (npages && mem->userspace_addr != old.userspace_addr) {
- r = -EINVAL;
- goto out_free;
}
/* Allocate page dirty bitmap if needed */
@@ -822,7 +852,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
goto out_free;
}
- if (!npages || base_gfn != old.base_gfn) {
+ if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {
r = -ENOMEM;
slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
GFP_KERNEL);
@@ -863,15 +893,23 @@ int __kvm_set_memory_region(struct kvm *kvm,
goto out_free;
}
- /* map new memory slot into the iommu */
- if (npages) {
+ /*
+ * IOMMU mapping: New slots need to be mapped. Old slots need to be
+ * un-mapped and re-mapped if their base changes. Since base change
+ * unmapping is handled above with slot deletion, mapping alone is
+ * needed here. Anything else the iommu might care about for existing
+ * slots (size changes, userspace addr changes and read-only flag
+ * changes) is disallowed above, so any other attribute changes getting
+ * here can be skipped.
+ */
+ if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
r = kvm_iommu_map_pages(kvm, &new);
if (r)
goto out_slots;
}
/* actual memory is freed via old in kvm_free_physmem_slot below */
- if (!npages) {
+ if (change == KVM_MR_DELETE) {
new.dirty_bitmap = NULL;
memset(&new.arch, 0, sizeof(new.arch));
}
@@ -1669,6 +1707,7 @@ bool kvm_vcpu_yield_to(struct kvm_vcpu *target)
{
struct pid *pid;
struct task_struct *task = NULL;
+ bool ret = false;
rcu_read_lock();
pid = rcu_dereference(target->pid);
@@ -1676,17 +1715,15 @@ bool kvm_vcpu_yield_to(struct kvm_vcpu *target)
task = get_pid_task(target->pid, PIDTYPE_PID);
rcu_read_unlock();
if (!task)
- return false;
+ return ret;
if (task->flags & PF_VCPU) {
put_task_struct(task);
- return false;
- }
- if (yield_to(task, 1)) {
- put_task_struct(task);
- return true;
+ return ret;
}
+ ret = yield_to(task, 1);
put_task_struct(task);
- return false;
+
+ return ret;
}
EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
@@ -1727,12 +1764,14 @@ bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
return eligible;
}
#endif
+
void kvm_vcpu_on_spin(struct kvm_vcpu *me)
{
struct kvm *kvm = me->kvm;
struct kvm_vcpu *vcpu;
int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
int yielded = 0;
+ int try = 3;
int pass;
int i;
@@ -1744,7 +1783,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me)
* VCPU is holding the lock that we need and will release it.
* We approximate round-robin by starting at the last boosted VCPU.
*/
- for (pass = 0; pass < 2 && !yielded; pass++) {
+ for (pass = 0; pass < 2 && !yielded && try; pass++) {
kvm_for_each_vcpu(i, vcpu, kvm) {
if (!pass && i <= last_boosted_vcpu) {
i = last_boosted_vcpu;
@@ -1757,10 +1796,15 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me)
continue;
if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
continue;
- if (kvm_vcpu_yield_to(vcpu)) {
+
+ yielded = kvm_vcpu_yield_to(vcpu);
+ if (yielded > 0) {
kvm->last_boosted_vcpu = i;
- yielded = 1;
break;
+ } else if (yielded < 0) {
+ try--;
+ if (!try)
+ break;
}
}
}