diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2013-09-25 07:53:07 -0400 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2013-09-30 03:21:51 -0400 |
commit | 2f303b74a62fb74983c0a66e2df353be963c527c (patch) | |
tree | 430c287a89d709fb9d497d0c2a5a33acc434b020 /virt | |
parent | 4a937f96f3a29c58b7edd349d2e4dfac371efdf2 (diff) |
KVM: Convert kvm_lock back to non-raw spinlock
In commit e935b8372cf8 ("KVM: Convert kvm_lock to raw_spinlock"),
the kvm_lock was made a raw lock. However, the kvm mmu_shrink()
function tries to grab the (non-raw) mmu_lock within the scope of
the raw locked kvm_lock being held. This leads to the following:
BUG: sleeping function called from invalid context at kernel/rtmutex.c:659
in_atomic(): 1, irqs_disabled(): 0, pid: 55, name: kswapd0
Preemption disabled at:[<ffffffffa0376eac>] mmu_shrink+0x5c/0x1b0 [kvm]
Pid: 55, comm: kswapd0 Not tainted 3.4.34_preempt-rt
Call Trace:
[<ffffffff8106f2ad>] __might_sleep+0xfd/0x160
[<ffffffff817d8d64>] rt_spin_lock+0x24/0x50
[<ffffffffa0376f3c>] mmu_shrink+0xec/0x1b0 [kvm]
[<ffffffff8111455d>] shrink_slab+0x17d/0x3a0
[<ffffffff81151f00>] ? mem_cgroup_iter+0x130/0x260
[<ffffffff8111824a>] balance_pgdat+0x54a/0x730
[<ffffffff8111fe47>] ? set_pgdat_percpu_threshold+0xa7/0xd0
[<ffffffff811185bf>] kswapd+0x18f/0x490
[<ffffffff81070961>] ? get_parent_ip+0x11/0x50
[<ffffffff81061970>] ? __init_waitqueue_head+0x50/0x50
[<ffffffff81118430>] ? balance_pgdat+0x730/0x730
[<ffffffff81060d2b>] kthread+0xdb/0xe0
[<ffffffff8106e122>] ? finish_task_switch+0x52/0x100
[<ffffffff817e1e94>] kernel_thread_helper+0x4/0x10
[<ffffffff81060c50>] ? __init_kthread_worker+0x
After the previous patch, kvm_lock need not be a raw spinlock anymore,
so change it back.
Reported-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: kvm@vger.kernel.org
Cc: gleb@redhat.com
Cc: jan.kiszka@siemens.com
Reviewed-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/kvm_main.c | 18 |
1 files changed, 9 insertions, 9 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index eb94343c2ed2..d469114aff09 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -70,7 +70,7 @@ MODULE_LICENSE("GPL"); | |||
70 | * kvm->lock --> kvm->slots_lock --> kvm->irq_lock | 70 | * kvm->lock --> kvm->slots_lock --> kvm->irq_lock |
71 | */ | 71 | */ |
72 | 72 | ||
73 | DEFINE_RAW_SPINLOCK(kvm_lock); | 73 | DEFINE_SPINLOCK(kvm_lock); |
74 | static DEFINE_RAW_SPINLOCK(kvm_count_lock); | 74 | static DEFINE_RAW_SPINLOCK(kvm_count_lock); |
75 | LIST_HEAD(vm_list); | 75 | LIST_HEAD(vm_list); |
76 | 76 | ||
@@ -491,9 +491,9 @@ static struct kvm *kvm_create_vm(unsigned long type) | |||
491 | if (r) | 491 | if (r) |
492 | goto out_err; | 492 | goto out_err; |
493 | 493 | ||
494 | raw_spin_lock(&kvm_lock); | 494 | spin_lock(&kvm_lock); |
495 | list_add(&kvm->vm_list, &vm_list); | 495 | list_add(&kvm->vm_list, &vm_list); |
496 | raw_spin_unlock(&kvm_lock); | 496 | spin_unlock(&kvm_lock); |
497 | 497 | ||
498 | return kvm; | 498 | return kvm; |
499 | 499 | ||
@@ -582,9 +582,9 @@ static void kvm_destroy_vm(struct kvm *kvm) | |||
582 | struct mm_struct *mm = kvm->mm; | 582 | struct mm_struct *mm = kvm->mm; |
583 | 583 | ||
584 | kvm_arch_sync_events(kvm); | 584 | kvm_arch_sync_events(kvm); |
585 | raw_spin_lock(&kvm_lock); | 585 | spin_lock(&kvm_lock); |
586 | list_del(&kvm->vm_list); | 586 | list_del(&kvm->vm_list); |
587 | raw_spin_unlock(&kvm_lock); | 587 | spin_unlock(&kvm_lock); |
588 | kvm_free_irq_routing(kvm); | 588 | kvm_free_irq_routing(kvm); |
589 | for (i = 0; i < KVM_NR_BUSES; i++) | 589 | for (i = 0; i < KVM_NR_BUSES; i++) |
590 | kvm_io_bus_destroy(kvm->buses[i]); | 590 | kvm_io_bus_destroy(kvm->buses[i]); |
@@ -3054,10 +3054,10 @@ static int vm_stat_get(void *_offset, u64 *val) | |||
3054 | struct kvm *kvm; | 3054 | struct kvm *kvm; |
3055 | 3055 | ||
3056 | *val = 0; | 3056 | *val = 0; |
3057 | raw_spin_lock(&kvm_lock); | 3057 | spin_lock(&kvm_lock); |
3058 | list_for_each_entry(kvm, &vm_list, vm_list) | 3058 | list_for_each_entry(kvm, &vm_list, vm_list) |
3059 | *val += *(u32 *)((void *)kvm + offset); | 3059 | *val += *(u32 *)((void *)kvm + offset); |
3060 | raw_spin_unlock(&kvm_lock); | 3060 | spin_unlock(&kvm_lock); |
3061 | return 0; | 3061 | return 0; |
3062 | } | 3062 | } |
3063 | 3063 | ||
@@ -3071,12 +3071,12 @@ static int vcpu_stat_get(void *_offset, u64 *val) | |||
3071 | int i; | 3071 | int i; |
3072 | 3072 | ||
3073 | *val = 0; | 3073 | *val = 0; |
3074 | raw_spin_lock(&kvm_lock); | 3074 | spin_lock(&kvm_lock); |
3075 | list_for_each_entry(kvm, &vm_list, vm_list) | 3075 | list_for_each_entry(kvm, &vm_list, vm_list) |
3076 | kvm_for_each_vcpu(i, vcpu, kvm) | 3076 | kvm_for_each_vcpu(i, vcpu, kvm) |
3077 | *val += *(u32 *)((void *)vcpu + offset); | 3077 | *val += *(u32 *)((void *)vcpu + offset); |
3078 | 3078 | ||
3079 | raw_spin_unlock(&kvm_lock); | 3079 | spin_unlock(&kvm_lock); |
3080 | return 0; | 3080 | return 0; |
3081 | } | 3081 | } |
3082 | 3082 | ||