diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-11-12 13:38:42 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-11-12 13:38:42 -0500 |
commit | 5d2007ebc278525cbe8d2e3ef559a4b191ee4ded (patch) | |
tree | 04287151da2ecbd8055703d34b0b572709651137 /arch/ia64/kvm | |
parent | 1c1271850494f06b63ae6b485e2e1b9c27ffb2d1 (diff) | |
parent | e17d1dc0863767bab8fde4ba9be92c7f79e7fe50 (diff) |
Merge branch 'kvm-updates/2.6.28' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm
* 'kvm-updates/2.6.28' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm:
KVM: Fix pit memory leak if unable to allocate irq source id
KVM: ia64: fix vmm_spin_{un}lock for !CONFIG_SMP
KVM: VMX: Set IGMT bit in EPT entry
KVM: Require the PCI subsystem
x86: KVM guest: fix section mismatch warning in kvmclock.c
KVM: ia64: Use guest signal mask when blocking
KVM: MMU: increase per-vcpu rmap cache alloc size
Diffstat (limited to 'arch/ia64/kvm')
-rw-r--r-- | arch/ia64/kvm/Kconfig | 2 | ||||
-rw-r--r-- | arch/ia64/kvm/kvm-ia64.c | 12 | ||||
-rw-r--r-- | arch/ia64/kvm/vcpu.h | 5 |
3 files changed, 13 insertions, 6 deletions
diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig index 8e99fed6b3fd..f833a0b4188d 100644 --- a/arch/ia64/kvm/Kconfig +++ b/arch/ia64/kvm/Kconfig | |||
@@ -20,6 +20,8 @@ if VIRTUALIZATION | |||
20 | config KVM | 20 | config KVM |
21 | tristate "Kernel-based Virtual Machine (KVM) support" | 21 | tristate "Kernel-based Virtual Machine (KVM) support" |
22 | depends on HAVE_KVM && EXPERIMENTAL | 22 | depends on HAVE_KVM && EXPERIMENTAL |
23 | # for device assignment: | ||
24 | depends on PCI | ||
23 | select PREEMPT_NOTIFIERS | 25 | select PREEMPT_NOTIFIERS |
24 | select ANON_INODES | 26 | select ANON_INODES |
25 | ---help--- | 27 | ---help--- |
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 3caac477de9e..af1464f7a6ad 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c | |||
@@ -673,16 +673,16 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
673 | 673 | ||
674 | vcpu_load(vcpu); | 674 | vcpu_load(vcpu); |
675 | 675 | ||
676 | if (vcpu->sigset_active) | ||
677 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); | ||
678 | |||
676 | if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { | 679 | if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { |
677 | kvm_vcpu_block(vcpu); | 680 | kvm_vcpu_block(vcpu); |
678 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); | 681 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); |
679 | vcpu_put(vcpu); | 682 | r = -EAGAIN; |
680 | return -EAGAIN; | 683 | goto out; |
681 | } | 684 | } |
682 | 685 | ||
683 | if (vcpu->sigset_active) | ||
684 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); | ||
685 | |||
686 | if (vcpu->mmio_needed) { | 686 | if (vcpu->mmio_needed) { |
687 | memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8); | 687 | memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8); |
688 | kvm_set_mmio_data(vcpu); | 688 | kvm_set_mmio_data(vcpu); |
@@ -690,7 +690,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
690 | vcpu->mmio_needed = 0; | 690 | vcpu->mmio_needed = 0; |
691 | } | 691 | } |
692 | r = __vcpu_run(vcpu, kvm_run); | 692 | r = __vcpu_run(vcpu, kvm_run); |
693 | 693 | out: | |
694 | if (vcpu->sigset_active) | 694 | if (vcpu->sigset_active) |
695 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); | 695 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); |
696 | 696 | ||
diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h index 341e3fee280c..e9b2a4e121c0 100644 --- a/arch/ia64/kvm/vcpu.h +++ b/arch/ia64/kvm/vcpu.h | |||
@@ -384,6 +384,10 @@ static inline u64 __gpfn_is_io(u64 gpfn) | |||
384 | #define MODE_IND(psr) \ | 384 | #define MODE_IND(psr) \ |
385 | (((psr).it << 2) + ((psr).dt << 1) + (psr).rt) | 385 | (((psr).it << 2) + ((psr).dt << 1) + (psr).rt) |
386 | 386 | ||
387 | #ifndef CONFIG_SMP | ||
388 | #define _vmm_raw_spin_lock(x) do {}while(0) | ||
389 | #define _vmm_raw_spin_unlock(x) do {}while(0) | ||
390 | #else | ||
387 | #define _vmm_raw_spin_lock(x) \ | 391 | #define _vmm_raw_spin_lock(x) \ |
388 | do { \ | 392 | do { \ |
389 | __u32 *ia64_spinlock_ptr = (__u32 *) (x); \ | 393 | __u32 *ia64_spinlock_ptr = (__u32 *) (x); \ |
@@ -403,6 +407,7 @@ static inline u64 __gpfn_is_io(u64 gpfn) | |||
403 | do { barrier(); \ | 407 | do { barrier(); \ |
404 | ((spinlock_t *)x)->raw_lock.lock = 0; } \ | 408 | ((spinlock_t *)x)->raw_lock.lock = 0; } \ |
405 | while (0) | 409 | while (0) |
410 | #endif | ||
406 | 411 | ||
407 | void vmm_spin_lock(spinlock_t *lock); | 412 | void vmm_spin_lock(spinlock_t *lock); |
408 | void vmm_spin_unlock(spinlock_t *lock); | 413 | void vmm_spin_unlock(spinlock_t *lock); |