aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-06-11 13:35:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-06-11 13:35:44 -0400
commita4df1ac12dd2d2812713a5fdd56af954d1bc251d (patch)
tree1cf0a68f8960892d8ff83fdb233e79013b954858
parent2a212f699671c967dd0fad133f62e6f3e721c73d (diff)
parent3c9155106d589584f67b026ec444e69c4a68d7dc (diff)
Merge branch 'kvm-updates-2.6.26' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm
* 'kvm-updates-2.6.26' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm: KVM: MMU: Fix is_empty_shadow_page() check KVM: MMU: Fix printk() format string KVM: IOAPIC: only set remote_irr if interrupt was injected KVM: MMU: reschedule during shadow teardown KVM: VMX: Clear CR4.VMXE in hardware_disable KVM: migrate PIT timer KVM: ppc: Report bad GFNs KVM: ppc: Use a read lock around MMU operations, and release it on error KVM: ppc: Remove unmatched kunmap() call KVM: ppc: add lwzx/stwz emulation KVM: ppc: Remove duplicate function KVM: s390: Fix race condition in kvm_s390_handle_wait KVM: s390: Send program check on access error KVM: s390: fix interrupt delivery KVM: s390: handle machine checks when guest is running KVM: s390: fix locking order problem in enable_sie KVM: s390: use yield instead of schedule to implement diag 0x44 KVM: x86 emulator: fix hypercall return value on AMD KVM: ia64: fix zero extending for mmio ld1/2/4 emulation in KVM
-rw-r--r--arch/ia64/kvm/mmio.c3
-rw-r--r--arch/powerpc/kvm/44x_tlb.c9
-rw-r--r--arch/powerpc/kvm/booke_guest.c33
-rw-r--r--arch/powerpc/kvm/emulate.c12
-rw-r--r--arch/s390/kvm/diag.c2
-rw-r--r--arch/s390/kvm/interrupt.c7
-rw-r--r--arch/s390/kvm/kvm-s390.c13
-rw-r--r--arch/s390/mm/pgtable.c44
-rw-r--r--arch/x86/kvm/i8254.c14
-rw-r--r--arch/x86/kvm/irq.c6
-rw-r--r--arch/x86/kvm/irq.h2
-rw-r--r--arch/x86/kvm/mmu.c3
-rw-r--r--arch/x86/kvm/paging_tmpl.h2
-rw-r--r--arch/x86/kvm/svm.c2
-rw-r--r--arch/x86/kvm/vmx.c3
-rw-r--r--arch/x86/kvm/x86.c2
-rw-r--r--arch/x86/kvm/x86_emulate.c3
-rw-r--r--drivers/s390/s390mach.c1
-rw-r--r--include/asm-powerpc/kvm_ppc.h1
-rw-r--r--include/linux/kvm_host.h2
-rw-r--r--virt/kvm/ioapic.c21
21 files changed, 106 insertions, 79 deletions
diff --git a/arch/ia64/kvm/mmio.c b/arch/ia64/kvm/mmio.c
index 351bf70da463..7f1a858bc69f 100644
--- a/arch/ia64/kvm/mmio.c
+++ b/arch/ia64/kvm/mmio.c
@@ -159,7 +159,8 @@ static void mmio_access(struct kvm_vcpu *vcpu, u64 src_pa, u64 *dest,
159 159
160 if (p->u.ioreq.state == STATE_IORESP_READY) { 160 if (p->u.ioreq.state == STATE_IORESP_READY) {
161 if (dir == IOREQ_READ) 161 if (dir == IOREQ_READ)
162 *dest = p->u.ioreq.data; 162 /* it's necessary to ensure zero extending */
163 *dest = p->u.ioreq.data & (~0UL >> (64-(s*8)));
163 } else 164 } else
164 panic_vm(vcpu); 165 panic_vm(vcpu);
165out: 166out:
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index f5d7a5eab96e..75dff7cfa814 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -116,8 +116,6 @@ static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu,
116 struct tlbe *stlbe = &vcpu->arch.shadow_tlb[index]; 116 struct tlbe *stlbe = &vcpu->arch.shadow_tlb[index];
117 struct page *page = vcpu->arch.shadow_pages[index]; 117 struct page *page = vcpu->arch.shadow_pages[index];
118 118
119 kunmap(vcpu->arch.shadow_pages[index]);
120
121 if (get_tlb_v(stlbe)) { 119 if (get_tlb_v(stlbe)) {
122 if (kvmppc_44x_tlbe_is_writable(stlbe)) 120 if (kvmppc_44x_tlbe_is_writable(stlbe))
123 kvm_release_page_dirty(page); 121 kvm_release_page_dirty(page);
@@ -144,18 +142,19 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
144 stlbe = &vcpu->arch.shadow_tlb[victim]; 142 stlbe = &vcpu->arch.shadow_tlb[victim];
145 143
146 /* Get reference to new page. */ 144 /* Get reference to new page. */
147 down_write(&current->mm->mmap_sem); 145 down_read(&current->mm->mmap_sem);
148 new_page = gfn_to_page(vcpu->kvm, gfn); 146 new_page = gfn_to_page(vcpu->kvm, gfn);
149 if (is_error_page(new_page)) { 147 if (is_error_page(new_page)) {
150 printk(KERN_ERR "Couldn't get guest page!\n"); 148 printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn);
151 kvm_release_page_clean(new_page); 149 kvm_release_page_clean(new_page);
150 up_read(&current->mm->mmap_sem);
152 return; 151 return;
153 } 152 }
154 hpaddr = page_to_phys(new_page); 153 hpaddr = page_to_phys(new_page);
155 154
156 /* Drop reference to old page. */ 155 /* Drop reference to old page. */
157 kvmppc_44x_shadow_release(vcpu, victim); 156 kvmppc_44x_shadow_release(vcpu, victim);
158 up_write(&current->mm->mmap_sem); 157 up_read(&current->mm->mmap_sem);
159 158
160 vcpu->arch.shadow_pages[victim] = new_page; 159 vcpu->arch.shadow_pages[victim] = new_page;
161 160
diff --git a/arch/powerpc/kvm/booke_guest.c b/arch/powerpc/kvm/booke_guest.c
index 712d89a28c46..9c8ad850c6e3 100644
--- a/arch/powerpc/kvm/booke_guest.c
+++ b/arch/powerpc/kvm/booke_guest.c
@@ -227,39 +227,6 @@ void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu)
227 } 227 }
228} 228}
229 229
230static int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
231{
232 enum emulation_result er;
233 int r;
234
235 er = kvmppc_emulate_instruction(run, vcpu);
236 switch (er) {
237 case EMULATE_DONE:
238 /* Future optimization: only reload non-volatiles if they were
239 * actually modified. */
240 r = RESUME_GUEST_NV;
241 break;
242 case EMULATE_DO_MMIO:
243 run->exit_reason = KVM_EXIT_MMIO;
244 /* We must reload nonvolatiles because "update" load/store
245 * instructions modify register state. */
246 /* Future optimization: only reload non-volatiles if they were
247 * actually modified. */
248 r = RESUME_HOST_NV;
249 break;
250 case EMULATE_FAIL:
251 /* XXX Deliver Program interrupt to guest. */
252 printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
253 vcpu->arch.last_inst);
254 r = RESUME_HOST;
255 break;
256 default:
257 BUG();
258 }
259
260 return r;
261}
262
263/** 230/**
264 * kvmppc_handle_exit 231 * kvmppc_handle_exit
265 * 232 *
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index a03fe0c80698..000097461283 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -246,6 +246,11 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
246 case 31: 246 case 31:
247 switch (get_xop(inst)) { 247 switch (get_xop(inst)) {
248 248
249 case 23: /* lwzx */
250 rt = get_rt(inst);
251 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
252 break;
253
249 case 83: /* mfmsr */ 254 case 83: /* mfmsr */
250 rt = get_rt(inst); 255 rt = get_rt(inst);
251 vcpu->arch.gpr[rt] = vcpu->arch.msr; 256 vcpu->arch.gpr[rt] = vcpu->arch.msr;
@@ -267,6 +272,13 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
267 kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]); 272 kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]);
268 break; 273 break;
269 274
275 case 151: /* stwx */
276 rs = get_rs(inst);
277 emulated = kvmppc_handle_store(run, vcpu,
278 vcpu->arch.gpr[rs],
279 4, 1);
280 break;
281
270 case 163: /* wrteei */ 282 case 163: /* wrteei */
271 vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE) 283 vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
272 | (inst & MSR_EE); 284 | (inst & MSR_EE);
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index f639a152869f..a0775e1f08df 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -20,7 +20,7 @@ static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
20 VCPU_EVENT(vcpu, 5, "%s", "diag time slice end"); 20 VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
21 vcpu->stat.diagnose_44++; 21 vcpu->stat.diagnose_44++;
22 vcpu_put(vcpu); 22 vcpu_put(vcpu);
23 schedule(); 23 yield();
24 vcpu_load(vcpu); 24 vcpu_load(vcpu);
25 return 0; 25 return 0;
26} 26}
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index fcd1ed8015c1..84a7fed4cd4e 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -339,6 +339,11 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
339 if (kvm_cpu_has_interrupt(vcpu)) 339 if (kvm_cpu_has_interrupt(vcpu))
340 return 0; 340 return 0;
341 341
342 __set_cpu_idle(vcpu);
343 spin_lock_bh(&vcpu->arch.local_int.lock);
344 vcpu->arch.local_int.timer_due = 0;
345 spin_unlock_bh(&vcpu->arch.local_int.lock);
346
342 if (psw_interrupts_disabled(vcpu)) { 347 if (psw_interrupts_disabled(vcpu)) {
343 VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); 348 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
344 __unset_cpu_idle(vcpu); 349 __unset_cpu_idle(vcpu);
@@ -366,8 +371,6 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
366no_timer: 371no_timer:
367 spin_lock_bh(&vcpu->arch.local_int.float_int->lock); 372 spin_lock_bh(&vcpu->arch.local_int.float_int->lock);
368 spin_lock_bh(&vcpu->arch.local_int.lock); 373 spin_lock_bh(&vcpu->arch.local_int.lock);
369 __set_cpu_idle(vcpu);
370 vcpu->arch.local_int.timer_due = 0;
371 add_wait_queue(&vcpu->arch.local_int.wq, &wait); 374 add_wait_queue(&vcpu->arch.local_int.wq, &wait);
372 while (list_empty(&vcpu->arch.local_int.list) && 375 while (list_empty(&vcpu->arch.local_int.list) &&
373 list_empty(&vcpu->arch.local_int.float_int->list) && 376 list_empty(&vcpu->arch.local_int.float_int->list) &&
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 0ac36a649eba..6558b09ff579 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -423,6 +423,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
423 return -EINVAL; /* not implemented yet */ 423 return -EINVAL; /* not implemented yet */
424} 424}
425 425
426extern void s390_handle_mcck(void);
427
426static void __vcpu_run(struct kvm_vcpu *vcpu) 428static void __vcpu_run(struct kvm_vcpu *vcpu)
427{ 429{
428 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16); 430 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
@@ -430,13 +432,21 @@ static void __vcpu_run(struct kvm_vcpu *vcpu)
430 if (need_resched()) 432 if (need_resched())
431 schedule(); 433 schedule();
432 434
435 if (test_thread_flag(TIF_MCCK_PENDING))
436 s390_handle_mcck();
437
438 kvm_s390_deliver_pending_interrupts(vcpu);
439
433 vcpu->arch.sie_block->icptcode = 0; 440 vcpu->arch.sie_block->icptcode = 0;
434 local_irq_disable(); 441 local_irq_disable();
435 kvm_guest_enter(); 442 kvm_guest_enter();
436 local_irq_enable(); 443 local_irq_enable();
437 VCPU_EVENT(vcpu, 6, "entering sie flags %x", 444 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
438 atomic_read(&vcpu->arch.sie_block->cpuflags)); 445 atomic_read(&vcpu->arch.sie_block->cpuflags));
439 sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs); 446 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
447 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
448 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
449 }
440 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", 450 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
441 vcpu->arch.sie_block->icptcode); 451 vcpu->arch.sie_block->icptcode);
442 local_irq_disable(); 452 local_irq_disable();
@@ -475,7 +485,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
475 might_sleep(); 485 might_sleep();
476 486
477 do { 487 do {
478 kvm_s390_deliver_pending_interrupts(vcpu);
479 __vcpu_run(vcpu); 488 __vcpu_run(vcpu);
480 rc = kvm_handle_sie_intercept(vcpu); 489 rc = kvm_handle_sie_intercept(vcpu);
481 } while (!signal_pending(current) && !rc); 490 } while (!signal_pending(current) && !rc);
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 5c1aea97cd12..3d98ba82ea67 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -254,36 +254,46 @@ void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
254int s390_enable_sie(void) 254int s390_enable_sie(void)
255{ 255{
256 struct task_struct *tsk = current; 256 struct task_struct *tsk = current;
257 struct mm_struct *mm; 257 struct mm_struct *mm, *old_mm;
258 int rc;
259 258
260 task_lock(tsk); 259 /* Do we have pgstes? if yes, we are done */
261
262 rc = 0;
263 if (tsk->mm->context.pgstes) 260 if (tsk->mm->context.pgstes)
264 goto unlock; 261 return 0;
265 262
266 rc = -EINVAL; 263 /* lets check if we are allowed to replace the mm */
264 task_lock(tsk);
267 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || 265 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
268 tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) 266 tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) {
269 goto unlock; 267 task_unlock(tsk);
268 return -EINVAL;
269 }
270 task_unlock(tsk);
270 271
271 tsk->mm->context.pgstes = 1; /* dirty little tricks .. */ 272 /* we copy the mm with pgstes enabled */
273 tsk->mm->context.pgstes = 1;
272 mm = dup_mm(tsk); 274 mm = dup_mm(tsk);
273 tsk->mm->context.pgstes = 0; 275 tsk->mm->context.pgstes = 0;
274
275 rc = -ENOMEM;
276 if (!mm) 276 if (!mm)
277 goto unlock; 277 return -ENOMEM;
278 mmput(tsk->mm); 278
279 /* Now lets check again if somebody attached ptrace etc */
280 task_lock(tsk);
281 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
282 tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) {
283 mmput(mm);
284 task_unlock(tsk);
285 return -EINVAL;
286 }
287
288 /* ok, we are alone. No ptrace, no threads, etc. */
289 old_mm = tsk->mm;
279 tsk->mm = tsk->active_mm = mm; 290 tsk->mm = tsk->active_mm = mm;
280 preempt_disable(); 291 preempt_disable();
281 update_mm(mm, tsk); 292 update_mm(mm, tsk);
282 cpu_set(smp_processor_id(), mm->cpu_vm_mask); 293 cpu_set(smp_processor_id(), mm->cpu_vm_mask);
283 preempt_enable(); 294 preempt_enable();
284 rc = 0;
285unlock:
286 task_unlock(tsk); 295 task_unlock(tsk);
287 return rc; 296 mmput(old_mm);
297 return 0;
288} 298}
289EXPORT_SYMBOL_GPL(s390_enable_sie); 299EXPORT_SYMBOL_GPL(s390_enable_sie);
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 7c077a9d9777..f2f5d260874e 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -200,7 +200,6 @@ int __pit_timer_fn(struct kvm_kpit_state *ps)
200 200
201 atomic_inc(&pt->pending); 201 atomic_inc(&pt->pending);
202 smp_mb__after_atomic_inc(); 202 smp_mb__after_atomic_inc();
203 /* FIXME: handle case where the guest is in guest mode */
204 if (vcpu0 && waitqueue_active(&vcpu0->wq)) { 203 if (vcpu0 && waitqueue_active(&vcpu0->wq)) {
205 vcpu0->arch.mp_state = KVM_MP_STATE_RUNNABLE; 204 vcpu0->arch.mp_state = KVM_MP_STATE_RUNNABLE;
206 wake_up_interruptible(&vcpu0->wq); 205 wake_up_interruptible(&vcpu0->wq);
@@ -237,6 +236,19 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
237 return HRTIMER_NORESTART; 236 return HRTIMER_NORESTART;
238} 237}
239 238
239void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
240{
241 struct kvm_pit *pit = vcpu->kvm->arch.vpit;
242 struct hrtimer *timer;
243
244 if (vcpu->vcpu_id != 0 || !pit)
245 return;
246
247 timer = &pit->pit_state.pit_timer.timer;
248 if (hrtimer_cancel(timer))
249 hrtimer_start(timer, timer->expires, HRTIMER_MODE_ABS);
250}
251
240static void destroy_pit_timer(struct kvm_kpit_timer *pt) 252static void destroy_pit_timer(struct kvm_kpit_timer *pt)
241{ 253{
242 pr_debug("pit: execute del timer!\n"); 254 pr_debug("pit: execute del timer!\n");
diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
index ce1f583459b1..76d736b5f664 100644
--- a/arch/x86/kvm/irq.c
+++ b/arch/x86/kvm/irq.c
@@ -94,3 +94,9 @@ void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec)
94 /* TODO: PIT, RTC etc. */ 94 /* TODO: PIT, RTC etc. */
95} 95}
96EXPORT_SYMBOL_GPL(kvm_timer_intr_post); 96EXPORT_SYMBOL_GPL(kvm_timer_intr_post);
97
98void __kvm_migrate_timers(struct kvm_vcpu *vcpu)
99{
100 __kvm_migrate_apic_timer(vcpu);
101 __kvm_migrate_pit_timer(vcpu);
102}
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
index 1802134b836f..2a15be2275c0 100644
--- a/arch/x86/kvm/irq.h
+++ b/arch/x86/kvm/irq.h
@@ -84,6 +84,8 @@ void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec);
84void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu); 84void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu);
85void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu); 85void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu);
86void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu); 86void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu);
87void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu);
88void __kvm_migrate_timers(struct kvm_vcpu *vcpu);
87 89
88int pit_has_pending_timer(struct kvm_vcpu *vcpu); 90int pit_has_pending_timer(struct kvm_vcpu *vcpu);
89int apic_has_pending_timer(struct kvm_vcpu *vcpu); 91int apic_has_pending_timer(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 7246b60afb96..ee3f53098f0c 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -658,7 +658,7 @@ static int is_empty_shadow_page(u64 *spt)
658 u64 *end; 658 u64 *end;
659 659
660 for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++) 660 for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
661 if (*pos != shadow_trap_nonpresent_pte) { 661 if (is_shadow_present_pte(*pos)) {
662 printk(KERN_ERR "%s: %p %llx\n", __func__, 662 printk(KERN_ERR "%s: %p %llx\n", __func__,
663 pos, *pos); 663 pos, *pos);
664 return 0; 664 return 0;
@@ -1858,6 +1858,7 @@ static void free_mmu_pages(struct kvm_vcpu *vcpu)
1858 sp = container_of(vcpu->kvm->arch.active_mmu_pages.next, 1858 sp = container_of(vcpu->kvm->arch.active_mmu_pages.next,
1859 struct kvm_mmu_page, link); 1859 struct kvm_mmu_page, link);
1860 kvm_mmu_zap_page(vcpu->kvm, sp); 1860 kvm_mmu_zap_page(vcpu->kvm, sp);
1861 cond_resched();
1861 } 1862 }
1862 free_page((unsigned long)vcpu->arch.mmu.pae_root); 1863 free_page((unsigned long)vcpu->arch.mmu.pae_root);
1863} 1864}
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 156fe10288ae..934c7b619396 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -418,7 +418,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
418 418
419 /* mmio */ 419 /* mmio */
420 if (is_error_pfn(pfn)) { 420 if (is_error_pfn(pfn)) {
421 pgprintk("gfn %x is mmio\n", walker.gfn); 421 pgprintk("gfn %lx is mmio\n", walker.gfn);
422 kvm_release_pfn_clean(pfn); 422 kvm_release_pfn_clean(pfn);
423 return 1; 423 return 1;
424 } 424 }
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index ab22615eee89..6b0d5fa5bab3 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -688,7 +688,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
688 delta = vcpu->arch.host_tsc - tsc_this; 688 delta = vcpu->arch.host_tsc - tsc_this;
689 svm->vmcb->control.tsc_offset += delta; 689 svm->vmcb->control.tsc_offset += delta;
690 vcpu->cpu = cpu; 690 vcpu->cpu = cpu;
691 kvm_migrate_apic_timer(vcpu); 691 kvm_migrate_timers(vcpu);
692 } 692 }
693 693
694 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) 694 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index bfe4db11989c..02efbe75f317 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -608,7 +608,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
608 608
609 if (vcpu->cpu != cpu) { 609 if (vcpu->cpu != cpu) {
610 vcpu_clear(vmx); 610 vcpu_clear(vmx);
611 kvm_migrate_apic_timer(vcpu); 611 kvm_migrate_timers(vcpu);
612 vpid_sync_vcpu_all(vmx); 612 vpid_sync_vcpu_all(vmx);
613 } 613 }
614 614
@@ -1036,6 +1036,7 @@ static void hardware_enable(void *garbage)
1036static void hardware_disable(void *garbage) 1036static void hardware_disable(void *garbage)
1037{ 1037{
1038 asm volatile (ASM_VMX_VMXOFF : : : "cc"); 1038 asm volatile (ASM_VMX_VMXOFF : : : "cc");
1039 write_cr4(read_cr4() & ~X86_CR4_VMXE);
1039} 1040}
1040 1041
1041static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, 1042static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 21338bdb28ff..00acf1301a15 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2758,7 +2758,7 @@ again:
2758 2758
2759 if (vcpu->requests) { 2759 if (vcpu->requests) {
2760 if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests)) 2760 if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests))
2761 __kvm_migrate_apic_timer(vcpu); 2761 __kvm_migrate_timers(vcpu);
2762 if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS, 2762 if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
2763 &vcpu->requests)) { 2763 &vcpu->requests)) {
2764 kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS; 2764 kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS;
diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c
index 8a96320ab071..932f216d890c 100644
--- a/arch/x86/kvm/x86_emulate.c
+++ b/arch/x86/kvm/x86_emulate.c
@@ -1727,7 +1727,8 @@ twobyte_insn:
1727 if (rc) 1727 if (rc)
1728 goto done; 1728 goto done;
1729 1729
1730 kvm_emulate_hypercall(ctxt->vcpu); 1730 /* Let the processor re-execute the fixed hypercall */
1731 c->eip = ctxt->vcpu->arch.rip;
1731 /* Disable writeback. */ 1732 /* Disable writeback. */
1732 c->dst.type = OP_NONE; 1733 c->dst.type = OP_NONE;
1733 break; 1734 break;
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c
index 5080f343ad74..5bfbe7659830 100644
--- a/drivers/s390/s390mach.c
+++ b/drivers/s390/s390mach.c
@@ -207,6 +207,7 @@ s390_handle_mcck(void)
207 do_exit(SIGSEGV); 207 do_exit(SIGSEGV);
208 } 208 }
209} 209}
210EXPORT_SYMBOL_GPL(s390_handle_mcck);
210 211
211/* 212/*
212 * returns 0 if all registers could be validated 213 * returns 0 if all registers could be validated
diff --git a/include/asm-powerpc/kvm_ppc.h b/include/asm-powerpc/kvm_ppc.h
index b35a7e3ef978..5a21115228af 100644
--- a/include/asm-powerpc/kvm_ppc.h
+++ b/include/asm-powerpc/kvm_ppc.h
@@ -57,6 +57,7 @@ extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
57 57
58extern int kvmppc_emulate_instruction(struct kvm_run *run, 58extern int kvmppc_emulate_instruction(struct kvm_run *run,
59 struct kvm_vcpu *vcpu); 59 struct kvm_vcpu *vcpu);
60extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
60 61
61extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, 62extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn,
62 u64 asid, u32 flags); 63 u64 asid, u32 flags);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 398978972b7a..092b1b25291d 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -297,7 +297,7 @@ static inline gpa_t gfn_to_gpa(gfn_t gfn)
297 return (gpa_t)gfn << PAGE_SHIFT; 297 return (gpa_t)gfn << PAGE_SHIFT;
298} 298}
299 299
300static inline void kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) 300static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
301{ 301{
302 set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests); 302 set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
303} 303}
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index 4232fd75dd20..98778cb69c6e 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -45,7 +45,7 @@
45#else 45#else
46#define ioapic_debug(fmt, arg...) 46#define ioapic_debug(fmt, arg...)
47#endif 47#endif
48static void ioapic_deliver(struct kvm_ioapic *vioapic, int irq); 48static int ioapic_deliver(struct kvm_ioapic *vioapic, int irq);
49 49
50static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, 50static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
51 unsigned long addr, 51 unsigned long addr,
@@ -89,8 +89,8 @@ static void ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx)
89 pent = &ioapic->redirtbl[idx]; 89 pent = &ioapic->redirtbl[idx];
90 90
91 if (!pent->fields.mask) { 91 if (!pent->fields.mask) {
92 ioapic_deliver(ioapic, idx); 92 int injected = ioapic_deliver(ioapic, idx);
93 if (pent->fields.trig_mode == IOAPIC_LEVEL_TRIG) 93 if (injected && pent->fields.trig_mode == IOAPIC_LEVEL_TRIG)
94 pent->fields.remote_irr = 1; 94 pent->fields.remote_irr = 1;
95 } 95 }
96 if (!pent->fields.trig_mode) 96 if (!pent->fields.trig_mode)
@@ -133,7 +133,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
133 } 133 }
134} 134}
135 135
136static void ioapic_inj_irq(struct kvm_ioapic *ioapic, 136static int ioapic_inj_irq(struct kvm_ioapic *ioapic,
137 struct kvm_vcpu *vcpu, 137 struct kvm_vcpu *vcpu,
138 u8 vector, u8 trig_mode, u8 delivery_mode) 138 u8 vector, u8 trig_mode, u8 delivery_mode)
139{ 139{
@@ -143,7 +143,7 @@ static void ioapic_inj_irq(struct kvm_ioapic *ioapic,
143 ASSERT((delivery_mode == IOAPIC_FIXED) || 143 ASSERT((delivery_mode == IOAPIC_FIXED) ||
144 (delivery_mode == IOAPIC_LOWEST_PRIORITY)); 144 (delivery_mode == IOAPIC_LOWEST_PRIORITY));
145 145
146 kvm_apic_set_irq(vcpu, vector, trig_mode); 146 return kvm_apic_set_irq(vcpu, vector, trig_mode);
147} 147}
148 148
149static u32 ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest, 149static u32 ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
@@ -186,7 +186,7 @@ static u32 ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
186 return mask; 186 return mask;
187} 187}
188 188
189static void ioapic_deliver(struct kvm_ioapic *ioapic, int irq) 189static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
190{ 190{
191 u8 dest = ioapic->redirtbl[irq].fields.dest_id; 191 u8 dest = ioapic->redirtbl[irq].fields.dest_id;
192 u8 dest_mode = ioapic->redirtbl[irq].fields.dest_mode; 192 u8 dest_mode = ioapic->redirtbl[irq].fields.dest_mode;
@@ -195,7 +195,7 @@ static void ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
195 u8 trig_mode = ioapic->redirtbl[irq].fields.trig_mode; 195 u8 trig_mode = ioapic->redirtbl[irq].fields.trig_mode;
196 u32 deliver_bitmask; 196 u32 deliver_bitmask;
197 struct kvm_vcpu *vcpu; 197 struct kvm_vcpu *vcpu;
198 int vcpu_id; 198 int vcpu_id, r = 0;
199 199
200 ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x " 200 ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
201 "vector=%x trig_mode=%x\n", 201 "vector=%x trig_mode=%x\n",
@@ -204,7 +204,7 @@ static void ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
204 deliver_bitmask = ioapic_get_delivery_bitmask(ioapic, dest, dest_mode); 204 deliver_bitmask = ioapic_get_delivery_bitmask(ioapic, dest, dest_mode);
205 if (!deliver_bitmask) { 205 if (!deliver_bitmask) {
206 ioapic_debug("no target on destination\n"); 206 ioapic_debug("no target on destination\n");
207 return; 207 return 0;
208 } 208 }
209 209
210 switch (delivery_mode) { 210 switch (delivery_mode) {
@@ -216,7 +216,7 @@ static void ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
216 vcpu = ioapic->kvm->vcpus[0]; 216 vcpu = ioapic->kvm->vcpus[0];
217#endif 217#endif
218 if (vcpu != NULL) 218 if (vcpu != NULL)
219 ioapic_inj_irq(ioapic, vcpu, vector, 219 r = ioapic_inj_irq(ioapic, vcpu, vector,
220 trig_mode, delivery_mode); 220 trig_mode, delivery_mode);
221 else 221 else
222 ioapic_debug("null lowest prio vcpu: " 222 ioapic_debug("null lowest prio vcpu: "
@@ -234,7 +234,7 @@ static void ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
234 deliver_bitmask &= ~(1 << vcpu_id); 234 deliver_bitmask &= ~(1 << vcpu_id);
235 vcpu = ioapic->kvm->vcpus[vcpu_id]; 235 vcpu = ioapic->kvm->vcpus[vcpu_id];
236 if (vcpu) { 236 if (vcpu) {
237 ioapic_inj_irq(ioapic, vcpu, vector, 237 r = ioapic_inj_irq(ioapic, vcpu, vector,
238 trig_mode, delivery_mode); 238 trig_mode, delivery_mode);
239 } 239 }
240 } 240 }
@@ -246,6 +246,7 @@ static void ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
246 delivery_mode); 246 delivery_mode);
247 break; 247 break;
248 } 248 }
249 return r;
249} 250}
250 251
251void kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level) 252void kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level)