aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-01-10 12:05:18 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-01-10 12:05:18 -0500
commitccae663cd4f62890d862c660e5ed762eb9821c14 (patch)
tree7c9f110db7992aad60fcc01718a827affc4f33df /arch
parent4ffd4ebf9d19c07285ea8a26d30a17ff28767132 (diff)
parent013f6a5d3dd9e4ebf4b49ca427b9c1f2e2a1b767 (diff)
Merge git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM bugfixes from Marcelo Tosatti. * git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: x86: use dynamic percpu allocations for shared msrs area KVM: PPC: Book3S HV: Fix compilation without CONFIG_PPC_POWERNV powerpc: Corrected include header path in kvm_para.h Add rcu user eqs exception hooks for async page fault
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/uapi/asm/kvm_para.h2
-rw-r--r--arch/powerpc/kvm/book3s_hv_ras.c4
-rw-r--r--arch/x86/kernel/kvm.c12
-rw-r--r--arch/x86/kvm/x86.c24
4 files changed, 33 insertions, 9 deletions
diff --git a/arch/powerpc/include/uapi/asm/kvm_para.h b/arch/powerpc/include/uapi/asm/kvm_para.h
index ed0e0254b47f..e3af3286a068 100644
--- a/arch/powerpc/include/uapi/asm/kvm_para.h
+++ b/arch/powerpc/include/uapi/asm/kvm_para.h
@@ -78,7 +78,7 @@ struct kvm_vcpu_arch_shared {
78 78
79#define KVM_HCALL_TOKEN(num) _EV_HCALL_TOKEN(EV_KVM_VENDOR_ID, num) 79#define KVM_HCALL_TOKEN(num) _EV_HCALL_TOKEN(EV_KVM_VENDOR_ID, num)
80 80
81#include <uapi/asm/epapr_hcalls.h> 81#include <asm/epapr_hcalls.h>
82 82
83#define KVM_FEATURE_MAGIC_PAGE 1 83#define KVM_FEATURE_MAGIC_PAGE 1
84 84
diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c
index 35f3cf0269b3..a353c485808c 100644
--- a/arch/powerpc/kvm/book3s_hv_ras.c
+++ b/arch/powerpc/kvm/book3s_hv_ras.c
@@ -79,7 +79,9 @@ static void flush_tlb_power7(struct kvm_vcpu *vcpu)
79static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu) 79static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
80{ 80{
81 unsigned long srr1 = vcpu->arch.shregs.msr; 81 unsigned long srr1 = vcpu->arch.shregs.msr;
82#ifdef CONFIG_PPC_POWERNV
82 struct opal_machine_check_event *opal_evt; 83 struct opal_machine_check_event *opal_evt;
84#endif
83 long handled = 1; 85 long handled = 1;
84 86
85 if (srr1 & SRR1_MC_LDSTERR) { 87 if (srr1 & SRR1_MC_LDSTERR) {
@@ -117,6 +119,7 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
117 handled = 0; 119 handled = 0;
118 } 120 }
119 121
122#ifdef CONFIG_PPC_POWERNV
120 /* 123 /*
121 * See if OPAL has already handled the condition. 124 * See if OPAL has already handled the condition.
122 * We assume that if the condition is recovered then OPAL 125 * We assume that if the condition is recovered then OPAL
@@ -131,6 +134,7 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
131 134
132 if (handled) 135 if (handled)
133 opal_evt->in_use = 0; 136 opal_evt->in_use = 0;
137#endif
134 138
135 return handled; 139 return handled;
136} 140}
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 08b973f64032..9c2bd8bd4b4c 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -43,6 +43,7 @@
43#include <asm/apicdef.h> 43#include <asm/apicdef.h>
44#include <asm/hypervisor.h> 44#include <asm/hypervisor.h>
45#include <asm/kvm_guest.h> 45#include <asm/kvm_guest.h>
46#include <asm/context_tracking.h>
46 47
47static int kvmapf = 1; 48static int kvmapf = 1;
48 49
@@ -121,6 +122,8 @@ void kvm_async_pf_task_wait(u32 token)
121 struct kvm_task_sleep_node n, *e; 122 struct kvm_task_sleep_node n, *e;
122 DEFINE_WAIT(wait); 123 DEFINE_WAIT(wait);
123 124
125 rcu_irq_enter();
126
124 spin_lock(&b->lock); 127 spin_lock(&b->lock);
125 e = _find_apf_task(b, token); 128 e = _find_apf_task(b, token);
126 if (e) { 129 if (e) {
@@ -128,6 +131,8 @@ void kvm_async_pf_task_wait(u32 token)
128 hlist_del(&e->link); 131 hlist_del(&e->link);
129 kfree(e); 132 kfree(e);
130 spin_unlock(&b->lock); 133 spin_unlock(&b->lock);
134
135 rcu_irq_exit();
131 return; 136 return;
132 } 137 }
133 138
@@ -152,13 +157,16 @@ void kvm_async_pf_task_wait(u32 token)
152 /* 157 /*
153 * We cannot reschedule. So halt. 158 * We cannot reschedule. So halt.
154 */ 159 */
160 rcu_irq_exit();
155 native_safe_halt(); 161 native_safe_halt();
162 rcu_irq_enter();
156 local_irq_disable(); 163 local_irq_disable();
157 } 164 }
158 } 165 }
159 if (!n.halted) 166 if (!n.halted)
160 finish_wait(&n.wq, &wait); 167 finish_wait(&n.wq, &wait);
161 168
169 rcu_irq_exit();
162 return; 170 return;
163} 171}
164EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait); 172EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
@@ -252,10 +260,10 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
252 break; 260 break;
253 case KVM_PV_REASON_PAGE_NOT_PRESENT: 261 case KVM_PV_REASON_PAGE_NOT_PRESENT:
254 /* page is swapped out by the host. */ 262 /* page is swapped out by the host. */
255 rcu_irq_enter(); 263 exception_enter(regs);
256 exit_idle(); 264 exit_idle();
257 kvm_async_pf_task_wait((u32)read_cr2()); 265 kvm_async_pf_task_wait((u32)read_cr2());
258 rcu_irq_exit(); 266 exception_exit(regs);
259 break; 267 break;
260 case KVM_PV_REASON_PAGE_READY: 268 case KVM_PV_REASON_PAGE_READY:
261 rcu_irq_enter(); 269 rcu_irq_enter();
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 76f54461f7cb..c243b81e3c74 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -120,7 +120,7 @@ struct kvm_shared_msrs {
120}; 120};
121 121
122static struct kvm_shared_msrs_global __read_mostly shared_msrs_global; 122static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
123static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs); 123static struct kvm_shared_msrs __percpu *shared_msrs;
124 124
125struct kvm_stats_debugfs_item debugfs_entries[] = { 125struct kvm_stats_debugfs_item debugfs_entries[] = {
126 { "pf_fixed", VCPU_STAT(pf_fixed) }, 126 { "pf_fixed", VCPU_STAT(pf_fixed) },
@@ -191,10 +191,10 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
191 191
192static void shared_msr_update(unsigned slot, u32 msr) 192static void shared_msr_update(unsigned slot, u32 msr)
193{ 193{
194 struct kvm_shared_msrs *smsr;
195 u64 value; 194 u64 value;
195 unsigned int cpu = smp_processor_id();
196 struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
196 197
197 smsr = &__get_cpu_var(shared_msrs);
198 /* only read, and nobody should modify it at this time, 198 /* only read, and nobody should modify it at this time,
199 * so don't need lock */ 199 * so don't need lock */
200 if (slot >= shared_msrs_global.nr) { 200 if (slot >= shared_msrs_global.nr) {
@@ -226,7 +226,8 @@ static void kvm_shared_msr_cpu_online(void)
226 226
227void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask) 227void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
228{ 228{
229 struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs); 229 unsigned int cpu = smp_processor_id();
230 struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
230 231
231 if (((value ^ smsr->values[slot].curr) & mask) == 0) 232 if (((value ^ smsr->values[slot].curr) & mask) == 0)
232 return; 233 return;
@@ -242,7 +243,8 @@ EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
242 243
243static void drop_user_return_notifiers(void *ignore) 244static void drop_user_return_notifiers(void *ignore)
244{ 245{
245 struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs); 246 unsigned int cpu = smp_processor_id();
247 struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
246 248
247 if (smsr->registered) 249 if (smsr->registered)
248 kvm_on_user_return(&smsr->urn); 250 kvm_on_user_return(&smsr->urn);
@@ -5233,9 +5235,16 @@ int kvm_arch_init(void *opaque)
5233 goto out; 5235 goto out;
5234 } 5236 }
5235 5237
5238 r = -ENOMEM;
5239 shared_msrs = alloc_percpu(struct kvm_shared_msrs);
5240 if (!shared_msrs) {
5241 printk(KERN_ERR "kvm: failed to allocate percpu kvm_shared_msrs\n");
5242 goto out;
5243 }
5244
5236 r = kvm_mmu_module_init(); 5245 r = kvm_mmu_module_init();
5237 if (r) 5246 if (r)
5238 goto out; 5247 goto out_free_percpu;
5239 5248
5240 kvm_set_mmio_spte_mask(); 5249 kvm_set_mmio_spte_mask();
5241 kvm_init_msr_list(); 5250 kvm_init_msr_list();
@@ -5258,6 +5267,8 @@ int kvm_arch_init(void *opaque)
5258 5267
5259 return 0; 5268 return 0;
5260 5269
5270out_free_percpu:
5271 free_percpu(shared_msrs);
5261out: 5272out:
5262 return r; 5273 return r;
5263} 5274}
@@ -5275,6 +5286,7 @@ void kvm_arch_exit(void)
5275#endif 5286#endif
5276 kvm_x86_ops = NULL; 5287 kvm_x86_ops = NULL;
5277 kvm_mmu_module_exit(); 5288 kvm_mmu_module_exit();
5289 free_percpu(shared_msrs);
5278} 5290}
5279 5291
5280int kvm_emulate_halt(struct kvm_vcpu *vcpu) 5292int kvm_emulate_halt(struct kvm_vcpu *vcpu)