summaryrefslogtreecommitdiff
path: root/arch/ia64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/kvm/Kconfig2
-rw-r--r--arch/ia64/kvm/kvm-ia64.c12
-rw-r--r--arch/ia64/kvm/vcpu.h5
3 files changed, 13 insertions, 6 deletions
diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig
index 8e99fed..f833a0b 100644
--- a/arch/ia64/kvm/Kconfig
+++ b/arch/ia64/kvm/Kconfig
@@ -20,6 +20,8 @@ if VIRTUALIZATION
config KVM
tristate "Kernel-based Virtual Machine (KVM) support"
depends on HAVE_KVM && EXPERIMENTAL
+ # for device assignment:
+ depends on PCI
select PREEMPT_NOTIFIERS
select ANON_INODES
---help---
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 3caac47..af1464f 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -673,16 +673,16 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
vcpu_load(vcpu);
+ if (vcpu->sigset_active)
+ sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+
if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
kvm_vcpu_block(vcpu);
clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
- vcpu_put(vcpu);
- return -EAGAIN;
+ r = -EAGAIN;
+ goto out;
}
- if (vcpu->sigset_active)
- sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
-
if (vcpu->mmio_needed) {
memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
kvm_set_mmio_data(vcpu);
@@ -690,7 +690,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
vcpu->mmio_needed = 0;
}
r = __vcpu_run(vcpu, kvm_run);
-
+out:
if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &sigsaved, NULL);
diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h
index 341e3fe..e9b2a4e 100644
--- a/arch/ia64/kvm/vcpu.h
+++ b/arch/ia64/kvm/vcpu.h
@@ -384,6 +384,10 @@ static inline u64 __gpfn_is_io(u64 gpfn)
#define MODE_IND(psr) \
(((psr).it << 2) + ((psr).dt << 1) + (psr).rt)
+#ifndef CONFIG_SMP
+#define _vmm_raw_spin_lock(x) do {}while(0)
+#define _vmm_raw_spin_unlock(x) do {}while(0)
+#else
#define _vmm_raw_spin_lock(x) \
do { \
__u32 *ia64_spinlock_ptr = (__u32 *) (x); \
@@ -403,6 +407,7 @@ static inline u64 __gpfn_is_io(u64 gpfn)
do { barrier(); \
((spinlock_t *)x)->raw_lock.lock = 0; } \
while (0)
+#endif
void vmm_spin_lock(spinlock_t *lock);
void vmm_spin_unlock(spinlock_t *lock);