diff options
author | Jan Kiszka <jan.kiszka@siemens.com> | 2011-02-08 06:55:33 -0500 |
---|---|---|
committer | Marcelo Tosatti <mtosatti@redhat.com> | 2011-03-17 12:08:30 -0400 |
commit | e935b8372cf8c63dc618a9f2b24ab360a225f1cd (patch) | |
tree | 024c5f7a7f76dab86fc905398ab31e28e5465821 /arch/x86 | |
parent | bd3d1ec3d26b61120bb4f60b18ee99aa81839e6b (diff) |
KVM: Convert kvm_lock to raw_spinlock
Code under this lock requires non-preemptibility. Ensure this also over
-rt by converting it to raw spinlock.
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 2 | ||||
-rw-r--r-- | arch/x86/kvm/mmu.c | 4 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 4 |
3 files changed, 5 insertions, 5 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index ffd7f8d29187..a58aebef5188 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -85,7 +85,7 @@ | |||
85 | 85 | ||
86 | #define ASYNC_PF_PER_VCPU 64 | 86 | #define ASYNC_PF_PER_VCPU 64 |
87 | 87 | ||
88 | extern spinlock_t kvm_lock; | 88 | extern raw_spinlock_t kvm_lock; |
89 | extern struct list_head vm_list; | 89 | extern struct list_head vm_list; |
90 | 90 | ||
91 | struct kvm_vcpu; | 91 | struct kvm_vcpu; |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index ccacf0b1b540..b6a9963400a7 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -3587,7 +3587,7 @@ static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) | |||
3587 | if (nr_to_scan == 0) | 3587 | if (nr_to_scan == 0) |
3588 | goto out; | 3588 | goto out; |
3589 | 3589 | ||
3590 | spin_lock(&kvm_lock); | 3590 | raw_spin_lock(&kvm_lock); |
3591 | 3591 | ||
3592 | list_for_each_entry(kvm, &vm_list, vm_list) { | 3592 | list_for_each_entry(kvm, &vm_list, vm_list) { |
3593 | int idx, freed_pages; | 3593 | int idx, freed_pages; |
@@ -3610,7 +3610,7 @@ static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) | |||
3610 | if (kvm_freed) | 3610 | if (kvm_freed) |
3611 | list_move_tail(&kvm_freed->vm_list, &vm_list); | 3611 | list_move_tail(&kvm_freed->vm_list, &vm_list); |
3612 | 3612 | ||
3613 | spin_unlock(&kvm_lock); | 3613 | raw_spin_unlock(&kvm_lock); |
3614 | 3614 | ||
3615 | out: | 3615 | out: |
3616 | return percpu_counter_read_positive(&kvm_total_used_mmu_pages); | 3616 | return percpu_counter_read_positive(&kvm_total_used_mmu_pages); |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index bd59e8ede88e..d9855b8584cf 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -4557,7 +4557,7 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va | |||
4557 | 4557 | ||
4558 | smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); | 4558 | smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); |
4559 | 4559 | ||
4560 | spin_lock(&kvm_lock); | 4560 | raw_spin_lock(&kvm_lock); |
4561 | list_for_each_entry(kvm, &vm_list, vm_list) { | 4561 | list_for_each_entry(kvm, &vm_list, vm_list) { |
4562 | kvm_for_each_vcpu(i, vcpu, kvm) { | 4562 | kvm_for_each_vcpu(i, vcpu, kvm) { |
4563 | if (vcpu->cpu != freq->cpu) | 4563 | if (vcpu->cpu != freq->cpu) |
@@ -4567,7 +4567,7 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va | |||
4567 | send_ipi = 1; | 4567 | send_ipi = 1; |
4568 | } | 4568 | } |
4569 | } | 4569 | } |
4570 | spin_unlock(&kvm_lock); | 4570 | raw_spin_unlock(&kvm_lock); |
4571 | 4571 | ||
4572 | if (freq->old < freq->new && send_ipi) { | 4572 | if (freq->old < freq->new && send_ipi) { |
4573 | /* | 4573 | /* |