diff options
author | Marcelo Tosatti <mtosatti@redhat.com> | 2009-06-04 18:08:22 (GMT) |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-09-10 05:32:49 (GMT) |
commit | 64a2268dcfc9c3626aa7f70902690e2fc10c1630 (patch) | |
tree | 51e37e86dc9a75f10708ff4df7614c63dfa7c079 | |
parent | 9f4cc12765ea48a40347449d6802a3322ced8709 (diff) | |
download | linux-fsl-qoriq-64a2268dcfc9c3626aa7f70902690e2fc10c1630.tar.xz |
KVM: move coalesced_mmio locking to its own device
Move coalesced_mmio locking to its own device, instead of relying on
kvm->lock.
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r-- | virt/kvm/coalesced_mmio.c | 10 | ||||
-rw-r--r-- | virt/kvm/coalesced_mmio.h | 1 |
2 files changed, 5 insertions, 6 deletions
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c index 7549068..397f419 100644 --- a/virt/kvm/coalesced_mmio.c +++ b/virt/kvm/coalesced_mmio.c @@ -31,10 +31,6 @@ static int coalesced_mmio_in_range(struct kvm_io_device *this, if (!is_write) return 0; - /* kvm->lock is taken by the caller and must be not released before - * dev.read/write - */ - /* Are we able to batch it ? */ /* last is the first free entry @@ -43,7 +39,7 @@ static int coalesced_mmio_in_range(struct kvm_io_device *this, */ ring = dev->kvm->coalesced_mmio_ring; avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX; - if (avail < 1) { + if (avail < KVM_MAX_VCPUS) { /* full */ return 0; } @@ -70,7 +66,7 @@ static void coalesced_mmio_write(struct kvm_io_device *this, struct kvm_coalesced_mmio_dev *dev = to_mmio(this); struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring; - /* kvm->lock must be taken by caller before call to in_range()*/ + spin_lock(&dev->lock); /* copy data in first free entry of the ring */ @@ -79,6 +75,7 @@ static void coalesced_mmio_write(struct kvm_io_device *this, memcpy(ring->coalesced_mmio[ring->last].data, val, len); smp_wmb(); ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX; + spin_unlock(&dev->lock); } static void coalesced_mmio_destructor(struct kvm_io_device *this) @@ -101,6 +98,7 @@ int kvm_coalesced_mmio_init(struct kvm *kvm) dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL); if (!dev) return -ENOMEM; + spin_lock_init(&dev->lock); kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops); dev->kvm = kvm; kvm->coalesced_mmio_dev = dev; diff --git a/virt/kvm/coalesced_mmio.h b/virt/kvm/coalesced_mmio.h index 5ac0ec6..4b49f27 100644 --- a/virt/kvm/coalesced_mmio.h +++ b/virt/kvm/coalesced_mmio.h @@ -12,6 +12,7 @@ struct kvm_coalesced_mmio_dev { struct kvm_io_device dev; struct kvm *kvm; + spinlock_t lock; int nb_zones; struct kvm_coalesced_mmio_zone zone[KVM_COALESCED_MMIO_ZONE_MAX]; }; |