aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/kvm/mmu.c4
-rw-r--r--arch/x86/kvm/x86.c4
-rw-r--r--virt/kvm/kvm_main.c36
4 files changed, 23 insertions, 23 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index ffd7f8d2918..a58aebef518 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -85,7 +85,7 @@
85 85
86#define ASYNC_PF_PER_VCPU 64 86#define ASYNC_PF_PER_VCPU 64
87 87
88extern spinlock_t kvm_lock; 88extern raw_spinlock_t kvm_lock;
89extern struct list_head vm_list; 89extern struct list_head vm_list;
90 90
91struct kvm_vcpu; 91struct kvm_vcpu;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index ccacf0b1b54..b6a9963400a 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3587,7 +3587,7 @@ static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
3587 if (nr_to_scan == 0) 3587 if (nr_to_scan == 0)
3588 goto out; 3588 goto out;
3589 3589
3590 spin_lock(&kvm_lock); 3590 raw_spin_lock(&kvm_lock);
3591 3591
3592 list_for_each_entry(kvm, &vm_list, vm_list) { 3592 list_for_each_entry(kvm, &vm_list, vm_list) {
3593 int idx, freed_pages; 3593 int idx, freed_pages;
@@ -3610,7 +3610,7 @@ static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
3610 if (kvm_freed) 3610 if (kvm_freed)
3611 list_move_tail(&kvm_freed->vm_list, &vm_list); 3611 list_move_tail(&kvm_freed->vm_list, &vm_list);
3612 3612
3613 spin_unlock(&kvm_lock); 3613 raw_spin_unlock(&kvm_lock);
3614 3614
3615out: 3615out:
3616 return percpu_counter_read_positive(&kvm_total_used_mmu_pages); 3616 return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index bd59e8ede88..d9855b8584c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4557,7 +4557,7 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va
4557 4557
4558 smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); 4558 smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
4559 4559
4560 spin_lock(&kvm_lock); 4560 raw_spin_lock(&kvm_lock);
4561 list_for_each_entry(kvm, &vm_list, vm_list) { 4561 list_for_each_entry(kvm, &vm_list, vm_list) {
4562 kvm_for_each_vcpu(i, vcpu, kvm) { 4562 kvm_for_each_vcpu(i, vcpu, kvm) {
4563 if (vcpu->cpu != freq->cpu) 4563 if (vcpu->cpu != freq->cpu)
@@ -4567,7 +4567,7 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va
4567 send_ipi = 1; 4567 send_ipi = 1;
4568 } 4568 }
4569 } 4569 }
4570 spin_unlock(&kvm_lock); 4570 raw_spin_unlock(&kvm_lock);
4571 4571
4572 if (freq->old < freq->new && send_ipi) { 4572 if (freq->old < freq->new && send_ipi) {
4573 /* 4573 /*
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 2dc53a6dc28..1fa0d292119 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -69,7 +69,7 @@ MODULE_LICENSE("GPL");
69 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock 69 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock
70 */ 70 */
71 71
72DEFINE_SPINLOCK(kvm_lock); 72DEFINE_RAW_SPINLOCK(kvm_lock);
73LIST_HEAD(vm_list); 73LIST_HEAD(vm_list);
74 74
75static cpumask_var_t cpus_hardware_enabled; 75static cpumask_var_t cpus_hardware_enabled;
@@ -481,9 +481,9 @@ static struct kvm *kvm_create_vm(void)
481 mutex_init(&kvm->irq_lock); 481 mutex_init(&kvm->irq_lock);
482 mutex_init(&kvm->slots_lock); 482 mutex_init(&kvm->slots_lock);
483 atomic_set(&kvm->users_count, 1); 483 atomic_set(&kvm->users_count, 1);
484 spin_lock(&kvm_lock); 484 raw_spin_lock(&kvm_lock);
485 list_add(&kvm->vm_list, &vm_list); 485 list_add(&kvm->vm_list, &vm_list);
486 spin_unlock(&kvm_lock); 486 raw_spin_unlock(&kvm_lock);
487 487
488 return kvm; 488 return kvm;
489 489
@@ -556,9 +556,9 @@ static void kvm_destroy_vm(struct kvm *kvm)
556 struct mm_struct *mm = kvm->mm; 556 struct mm_struct *mm = kvm->mm;
557 557
558 kvm_arch_sync_events(kvm); 558 kvm_arch_sync_events(kvm);
559 spin_lock(&kvm_lock); 559 raw_spin_lock(&kvm_lock);
560 list_del(&kvm->vm_list); 560 list_del(&kvm->vm_list);
561 spin_unlock(&kvm_lock); 561 raw_spin_unlock(&kvm_lock);
562 kvm_free_irq_routing(kvm); 562 kvm_free_irq_routing(kvm);
563 for (i = 0; i < KVM_NR_BUSES; i++) 563 for (i = 0; i < KVM_NR_BUSES; i++)
564 kvm_io_bus_destroy(kvm->buses[i]); 564 kvm_io_bus_destroy(kvm->buses[i]);
@@ -2177,9 +2177,9 @@ static void hardware_enable_nolock(void *junk)
2177 2177
2178static void hardware_enable(void *junk) 2178static void hardware_enable(void *junk)
2179{ 2179{
2180 spin_lock(&kvm_lock); 2180 raw_spin_lock(&kvm_lock);
2181 hardware_enable_nolock(junk); 2181 hardware_enable_nolock(junk);
2182 spin_unlock(&kvm_lock); 2182 raw_spin_unlock(&kvm_lock);
2183} 2183}
2184 2184
2185static void hardware_disable_nolock(void *junk) 2185static void hardware_disable_nolock(void *junk)
@@ -2194,9 +2194,9 @@ static void hardware_disable_nolock(void *junk)
2194 2194
2195static void hardware_disable(void *junk) 2195static void hardware_disable(void *junk)
2196{ 2196{
2197 spin_lock(&kvm_lock); 2197 raw_spin_lock(&kvm_lock);
2198 hardware_disable_nolock(junk); 2198 hardware_disable_nolock(junk);
2199 spin_unlock(&kvm_lock); 2199 raw_spin_unlock(&kvm_lock);
2200} 2200}
2201 2201
2202static void hardware_disable_all_nolock(void) 2202static void hardware_disable_all_nolock(void)
@@ -2210,16 +2210,16 @@ static void hardware_disable_all_nolock(void)
2210 2210
2211static void hardware_disable_all(void) 2211static void hardware_disable_all(void)
2212{ 2212{
2213 spin_lock(&kvm_lock); 2213 raw_spin_lock(&kvm_lock);
2214 hardware_disable_all_nolock(); 2214 hardware_disable_all_nolock();
2215 spin_unlock(&kvm_lock); 2215 raw_spin_unlock(&kvm_lock);
2216} 2216}
2217 2217
2218static int hardware_enable_all(void) 2218static int hardware_enable_all(void)
2219{ 2219{
2220 int r = 0; 2220 int r = 0;
2221 2221
2222 spin_lock(&kvm_lock); 2222 raw_spin_lock(&kvm_lock);
2223 2223
2224 kvm_usage_count++; 2224 kvm_usage_count++;
2225 if (kvm_usage_count == 1) { 2225 if (kvm_usage_count == 1) {
@@ -2232,7 +2232,7 @@ static int hardware_enable_all(void)
2232 } 2232 }
2233 } 2233 }
2234 2234
2235 spin_unlock(&kvm_lock); 2235 raw_spin_unlock(&kvm_lock);
2236 2236
2237 return r; 2237 return r;
2238} 2238}
@@ -2394,10 +2394,10 @@ static int vm_stat_get(void *_offset, u64 *val)
2394 struct kvm *kvm; 2394 struct kvm *kvm;
2395 2395
2396 *val = 0; 2396 *val = 0;
2397 spin_lock(&kvm_lock); 2397 raw_spin_lock(&kvm_lock);
2398 list_for_each_entry(kvm, &vm_list, vm_list) 2398 list_for_each_entry(kvm, &vm_list, vm_list)
2399 *val += *(u32 *)((void *)kvm + offset); 2399 *val += *(u32 *)((void *)kvm + offset);
2400 spin_unlock(&kvm_lock); 2400 raw_spin_unlock(&kvm_lock);
2401 return 0; 2401 return 0;
2402} 2402}
2403 2403
@@ -2411,12 +2411,12 @@ static int vcpu_stat_get(void *_offset, u64 *val)
2411 int i; 2411 int i;
2412 2412
2413 *val = 0; 2413 *val = 0;
2414 spin_lock(&kvm_lock); 2414 raw_spin_lock(&kvm_lock);
2415 list_for_each_entry(kvm, &vm_list, vm_list) 2415 list_for_each_entry(kvm, &vm_list, vm_list)
2416 kvm_for_each_vcpu(i, vcpu, kvm) 2416 kvm_for_each_vcpu(i, vcpu, kvm)
2417 *val += *(u32 *)((void *)vcpu + offset); 2417 *val += *(u32 *)((void *)vcpu + offset);
2418 2418
2419 spin_unlock(&kvm_lock); 2419 raw_spin_unlock(&kvm_lock);
2420 return 0; 2420 return 0;
2421} 2421}
2422 2422
@@ -2457,7 +2457,7 @@ static int kvm_suspend(struct sys_device *dev, pm_message_t state)
2457static int kvm_resume(struct sys_device *dev) 2457static int kvm_resume(struct sys_device *dev)
2458{ 2458{
2459 if (kvm_usage_count) { 2459 if (kvm_usage_count) {
2460 WARN_ON(spin_is_locked(&kvm_lock)); 2460 WARN_ON(raw_spin_is_locked(&kvm_lock));
2461 hardware_enable_nolock(NULL); 2461 hardware_enable_nolock(NULL);
2462 } 2462 }
2463 return 0; 2463 return 0;