aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2013-09-25 07:53:07 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2013-09-30 03:21:51 -0400
commit2f303b74a62fb74983c0a66e2df353be963c527c (patch)
tree430c287a89d709fb9d497d0c2a5a33acc434b020
parent4a937f96f3a29c58b7edd349d2e4dfac371efdf2 (diff)
KVM: Convert kvm_lock back to non-raw spinlock
In commit e935b8372cf8 ("KVM: Convert kvm_lock to raw_spinlock"), the kvm_lock was made a raw lock. However, the kvm mmu_shrink() function tries to grab the (non-raw) mmu_lock within the scope of the raw locked kvm_lock being held. This leads to the following: BUG: sleeping function called from invalid context at kernel/rtmutex.c:659 in_atomic(): 1, irqs_disabled(): 0, pid: 55, name: kswapd0 Preemption disabled at:[<ffffffffa0376eac>] mmu_shrink+0x5c/0x1b0 [kvm] Pid: 55, comm: kswapd0 Not tainted 3.4.34_preempt-rt Call Trace: [<ffffffff8106f2ad>] __might_sleep+0xfd/0x160 [<ffffffff817d8d64>] rt_spin_lock+0x24/0x50 [<ffffffffa0376f3c>] mmu_shrink+0xec/0x1b0 [kvm] [<ffffffff8111455d>] shrink_slab+0x17d/0x3a0 [<ffffffff81151f00>] ? mem_cgroup_iter+0x130/0x260 [<ffffffff8111824a>] balance_pgdat+0x54a/0x730 [<ffffffff8111fe47>] ? set_pgdat_percpu_threshold+0xa7/0xd0 [<ffffffff811185bf>] kswapd+0x18f/0x490 [<ffffffff81070961>] ? get_parent_ip+0x11/0x50 [<ffffffff81061970>] ? __init_waitqueue_head+0x50/0x50 [<ffffffff81118430>] ? balance_pgdat+0x730/0x730 [<ffffffff81060d2b>] kthread+0xdb/0xe0 [<ffffffff8106e122>] ? finish_task_switch+0x52/0x100 [<ffffffff817e1e94>] kernel_thread_helper+0x4/0x10 [<ffffffff81060c50>] ? __init_kthread_worker+0x After the previous patch, kvm_lock need not be a raw spinlock anymore, so change it back. Reported-by: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: kvm@vger.kernel.org Cc: gleb@redhat.com Cc: jan.kiszka@siemens.com Reviewed-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--Documentation/virtual/kvm/locking.txt2
-rw-r--r--arch/x86/kvm/mmu.c5
-rw-r--r--arch/x86/kvm/x86.c8
-rw-r--r--include/linux/kvm_host.h2
-rw-r--r--virt/kvm/kvm_main.c18
5 files changed, 17 insertions, 18 deletions
diff --git a/Documentation/virtual/kvm/locking.txt b/Documentation/virtual/kvm/locking.txt
index ba9e1c2150c2..f8869410d40c 100644
--- a/Documentation/virtual/kvm/locking.txt
+++ b/Documentation/virtual/kvm/locking.txt
@@ -132,7 +132,7 @@ See the comments in spte_has_volatile_bits() and mmu_spte_update().
132------------ 132------------
133 133
134Name: kvm_lock 134Name: kvm_lock
135Type: raw_spinlock 135Type: spinlock_t
136Arch: any 136Arch: any
137Protects: - vm_list 137Protects: - vm_list
138 138
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index dce0df8150df..cf95cfe050a6 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4428,7 +4428,7 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
4428 int nr_to_scan = sc->nr_to_scan; 4428 int nr_to_scan = sc->nr_to_scan;
4429 unsigned long freed = 0; 4429 unsigned long freed = 0;
4430 4430
4431 raw_spin_lock(&kvm_lock); 4431 spin_lock(&kvm_lock);
4432 4432
4433 list_for_each_entry(kvm, &vm_list, vm_list) { 4433 list_for_each_entry(kvm, &vm_list, vm_list) {
4434 int idx; 4434 int idx;
@@ -4478,9 +4478,8 @@ unlock:
4478 break; 4478 break;
4479 } 4479 }
4480 4480
4481 raw_spin_unlock(&kvm_lock); 4481 spin_unlock(&kvm_lock);
4482 return freed; 4482 return freed;
4483
4484} 4483}
4485 4484
4486static unsigned long 4485static unsigned long
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e5ca72a5cdb6..187f824b1454 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5263,7 +5263,7 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va
5263 5263
5264 smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); 5264 smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
5265 5265
5266 raw_spin_lock(&kvm_lock); 5266 spin_lock(&kvm_lock);
5267 list_for_each_entry(kvm, &vm_list, vm_list) { 5267 list_for_each_entry(kvm, &vm_list, vm_list) {
5268 kvm_for_each_vcpu(i, vcpu, kvm) { 5268 kvm_for_each_vcpu(i, vcpu, kvm) {
5269 if (vcpu->cpu != freq->cpu) 5269 if (vcpu->cpu != freq->cpu)
@@ -5273,7 +5273,7 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va
5273 send_ipi = 1; 5273 send_ipi = 1;
5274 } 5274 }
5275 } 5275 }
5276 raw_spin_unlock(&kvm_lock); 5276 spin_unlock(&kvm_lock);
5277 5277
5278 if (freq->old < freq->new && send_ipi) { 5278 if (freq->old < freq->new && send_ipi) {
5279 /* 5279 /*
@@ -5426,12 +5426,12 @@ static void pvclock_gtod_update_fn(struct work_struct *work)
5426 struct kvm_vcpu *vcpu; 5426 struct kvm_vcpu *vcpu;
5427 int i; 5427 int i;
5428 5428
5429 raw_spin_lock(&kvm_lock); 5429 spin_lock(&kvm_lock);
5430 list_for_each_entry(kvm, &vm_list, vm_list) 5430 list_for_each_entry(kvm, &vm_list, vm_list)
5431 kvm_for_each_vcpu(i, vcpu, kvm) 5431 kvm_for_each_vcpu(i, vcpu, kvm)
5432 set_bit(KVM_REQ_MASTERCLOCK_UPDATE, &vcpu->requests); 5432 set_bit(KVM_REQ_MASTERCLOCK_UPDATE, &vcpu->requests);
5433 atomic_set(&kvm_guest_has_master_clock, 0); 5433 atomic_set(&kvm_guest_has_master_clock, 0);
5434 raw_spin_unlock(&kvm_lock); 5434 spin_unlock(&kvm_lock);
5435} 5435}
5436 5436
5437static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn); 5437static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 749bdb12cd15..7c961e1e9270 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -142,7 +142,7 @@ struct kvm;
142struct kvm_vcpu; 142struct kvm_vcpu;
143extern struct kmem_cache *kvm_vcpu_cache; 143extern struct kmem_cache *kvm_vcpu_cache;
144 144
145extern raw_spinlock_t kvm_lock; 145extern spinlock_t kvm_lock;
146extern struct list_head vm_list; 146extern struct list_head vm_list;
147 147
148struct kvm_io_range { 148struct kvm_io_range {
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index eb94343c2ed2..d469114aff09 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -70,7 +70,7 @@ MODULE_LICENSE("GPL");
70 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock 70 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock
71 */ 71 */
72 72
73DEFINE_RAW_SPINLOCK(kvm_lock); 73DEFINE_SPINLOCK(kvm_lock);
74static DEFINE_RAW_SPINLOCK(kvm_count_lock); 74static DEFINE_RAW_SPINLOCK(kvm_count_lock);
75LIST_HEAD(vm_list); 75LIST_HEAD(vm_list);
76 76
@@ -491,9 +491,9 @@ static struct kvm *kvm_create_vm(unsigned long type)
491 if (r) 491 if (r)
492 goto out_err; 492 goto out_err;
493 493
494 raw_spin_lock(&kvm_lock); 494 spin_lock(&kvm_lock);
495 list_add(&kvm->vm_list, &vm_list); 495 list_add(&kvm->vm_list, &vm_list);
496 raw_spin_unlock(&kvm_lock); 496 spin_unlock(&kvm_lock);
497 497
498 return kvm; 498 return kvm;
499 499
@@ -582,9 +582,9 @@ static void kvm_destroy_vm(struct kvm *kvm)
582 struct mm_struct *mm = kvm->mm; 582 struct mm_struct *mm = kvm->mm;
583 583
584 kvm_arch_sync_events(kvm); 584 kvm_arch_sync_events(kvm);
585 raw_spin_lock(&kvm_lock); 585 spin_lock(&kvm_lock);
586 list_del(&kvm->vm_list); 586 list_del(&kvm->vm_list);
587 raw_spin_unlock(&kvm_lock); 587 spin_unlock(&kvm_lock);
588 kvm_free_irq_routing(kvm); 588 kvm_free_irq_routing(kvm);
589 for (i = 0; i < KVM_NR_BUSES; i++) 589 for (i = 0; i < KVM_NR_BUSES; i++)
590 kvm_io_bus_destroy(kvm->buses[i]); 590 kvm_io_bus_destroy(kvm->buses[i]);
@@ -3054,10 +3054,10 @@ static int vm_stat_get(void *_offset, u64 *val)
3054 struct kvm *kvm; 3054 struct kvm *kvm;
3055 3055
3056 *val = 0; 3056 *val = 0;
3057 raw_spin_lock(&kvm_lock); 3057 spin_lock(&kvm_lock);
3058 list_for_each_entry(kvm, &vm_list, vm_list) 3058 list_for_each_entry(kvm, &vm_list, vm_list)
3059 *val += *(u32 *)((void *)kvm + offset); 3059 *val += *(u32 *)((void *)kvm + offset);
3060 raw_spin_unlock(&kvm_lock); 3060 spin_unlock(&kvm_lock);
3061 return 0; 3061 return 0;
3062} 3062}
3063 3063
@@ -3071,12 +3071,12 @@ static int vcpu_stat_get(void *_offset, u64 *val)
3071 int i; 3071 int i;
3072 3072
3073 *val = 0; 3073 *val = 0;
3074 raw_spin_lock(&kvm_lock); 3074 spin_lock(&kvm_lock);
3075 list_for_each_entry(kvm, &vm_list, vm_list) 3075 list_for_each_entry(kvm, &vm_list, vm_list)
3076 kvm_for_each_vcpu(i, vcpu, kvm) 3076 kvm_for_each_vcpu(i, vcpu, kvm)
3077 *val += *(u32 *)((void *)vcpu + offset); 3077 *val += *(u32 *)((void *)vcpu + offset);
3078 3078
3079 raw_spin_unlock(&kvm_lock); 3079 spin_unlock(&kvm_lock);
3080 return 0; 3080 return 0;
3081} 3081}
3082 3082