aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-10-16 18:36:00 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-16 18:36:00 -0400
commit08d19f51f05a68ce89a289320ce4ed96e757df72 (patch)
tree31c5d718d0aeaff5083fe533cd6e1f9fbbe846bb /arch/powerpc
parent1c95e1b69073cff5ff179e592fa1a1e182c78a17 (diff)
parent2381ad241d0bea1253a37f314b270848067640bb (diff)
Merge branch 'kvm-updates/2.6.28' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm
* 'kvm-updates/2.6.28' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm: (134 commits) KVM: ia64: Add intel iommu support for guests. KVM: ia64: add directed mmio range support for kvm guests KVM: ia64: Make pmt table be able to hold physical mmio entries. KVM: Move irqchip_in_kernel() from ioapic.h to irq.h KVM: Separate irq ack notification out of arch/x86/kvm/irq.c KVM: Change is_mmio_pfn to kvm_is_mmio_pfn, and make it common for all archs KVM: Move device assignment logic to common code KVM: Device Assignment: Move vtd.c from arch/x86/kvm/ to virt/kvm/ KVM: VMX: enable invlpg exiting if EPT is disabled KVM: x86: Silence various LAPIC-related host kernel messages KVM: Device Assignment: Map mmio pages into VT-d page table KVM: PIC: enhance IPI avoidance KVM: MMU: add "oos_shadow" parameter to disable oos KVM: MMU: speed up mmu_unsync_walk KVM: MMU: out of sync shadow core KVM: MMU: mmu_convert_notrap helper KVM: MMU: awareness of new kvm_mmu_zap_page behaviour KVM: MMU: mmu_parent_walk KVM: x86: trap invlpg KVM: MMU: sync roots on mmu reload ...
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/kvm_host.h14
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h12
-rw-r--r--arch/powerpc/kernel/asm-offsets.c4
-rw-r--r--arch/powerpc/kvm/44x_tlb.c53
-rw-r--r--arch/powerpc/kvm/Kconfig11
-rw-r--r--arch/powerpc/kvm/Makefile6
-rw-r--r--arch/powerpc/kvm/booke_guest.c17
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S79
-rw-r--r--arch/powerpc/kvm/emulate.c8
-rw-r--r--arch/powerpc/kvm/powerpc.c99
10 files changed, 246 insertions, 57 deletions
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 2655e2a4831e..34b52b7180cd 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -81,11 +81,17 @@ struct kvm_vcpu_arch {
81 struct tlbe shadow_tlb[PPC44x_TLB_SIZE]; 81 struct tlbe shadow_tlb[PPC44x_TLB_SIZE];
82 /* Pages which are referenced in the shadow TLB. */ 82 /* Pages which are referenced in the shadow TLB. */
83 struct page *shadow_pages[PPC44x_TLB_SIZE]; 83 struct page *shadow_pages[PPC44x_TLB_SIZE];
84 /* Copy of the host's TLB. */ 84
85 struct tlbe host_tlb[PPC44x_TLB_SIZE]; 85 /* Track which TLB entries we've modified in the current exit. */
86 u8 shadow_tlb_mod[PPC44x_TLB_SIZE];
86 87
87 u32 host_stack; 88 u32 host_stack;
88 u32 host_pid; 89 u32 host_pid;
90 u32 host_dbcr0;
91 u32 host_dbcr1;
92 u32 host_dbcr2;
93 u32 host_iac[4];
94 u32 host_msr;
89 95
90 u64 fpr[32]; 96 u64 fpr[32];
91 u32 gpr[32]; 97 u32 gpr[32];
@@ -123,7 +129,11 @@ struct kvm_vcpu_arch {
123 u32 ivor[16]; 129 u32 ivor[16];
124 u32 ivpr; 130 u32 ivpr;
125 u32 pir; 131 u32 pir;
132
133 u32 shadow_pid;
126 u32 pid; 134 u32 pid;
135 u32 swap_pid;
136
127 u32 pvr; 137 u32 pvr;
128 u32 ccr0; 138 u32 ccr0;
129 u32 ccr1; 139 u32 ccr1;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index a8b068792260..8931ba729d2b 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -64,6 +64,10 @@ extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn,
64extern void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr, 64extern void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,
65 gva_t eend, u32 asid); 65 gva_t eend, u32 asid);
66extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); 66extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
67extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
68
69/* XXX Book E specific */
70extern void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i);
67 71
68extern void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu); 72extern void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu);
69 73
@@ -92,4 +96,12 @@ static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
92 kvm_vcpu_block(vcpu); 96 kvm_vcpu_block(vcpu);
93} 97}
94 98
99static inline void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid)
100{
101 if (vcpu->arch.pid != new_pid) {
102 vcpu->arch.pid = new_pid;
103 vcpu->arch.swap_pid = 1;
104 }
105}
106
95#endif /* __POWERPC_KVM_PPC_H__ */ 107#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 09febc582584..75c5dd0138fd 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -359,8 +359,8 @@ int main(void)
359 359
360 DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); 360 DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack));
361 DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); 361 DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
362 DEFINE(VCPU_HOST_TLB, offsetof(struct kvm_vcpu, arch.host_tlb));
363 DEFINE(VCPU_SHADOW_TLB, offsetof(struct kvm_vcpu, arch.shadow_tlb)); 362 DEFINE(VCPU_SHADOW_TLB, offsetof(struct kvm_vcpu, arch.shadow_tlb));
363 DEFINE(VCPU_SHADOW_MOD, offsetof(struct kvm_vcpu, arch.shadow_tlb_mod));
364 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); 364 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
365 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); 365 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
366 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); 366 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
@@ -372,7 +372,7 @@ int main(void)
372 DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5)); 372 DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5));
373 DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6)); 373 DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6));
374 DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7)); 374 DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7));
375 DEFINE(VCPU_PID, offsetof(struct kvm_vcpu, arch.pid)); 375 DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid));
376 376
377 DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); 377 DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
378 DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear)); 378 DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index 5a5602da5091..2e227a412bc2 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -19,6 +19,7 @@
19 19
20#include <linux/types.h> 20#include <linux/types.h>
21#include <linux/string.h> 21#include <linux/string.h>
22#include <linux/kvm.h>
22#include <linux/kvm_host.h> 23#include <linux/kvm_host.h>
23#include <linux/highmem.h> 24#include <linux/highmem.h>
24#include <asm/mmu-44x.h> 25#include <asm/mmu-44x.h>
@@ -109,7 +110,6 @@ static int kvmppc_44x_tlbe_is_writable(struct tlbe *tlbe)
109 return tlbe->word2 & (PPC44x_TLB_SW|PPC44x_TLB_UW); 110 return tlbe->word2 & (PPC44x_TLB_SW|PPC44x_TLB_UW);
110} 111}
111 112
112/* Must be called with mmap_sem locked for writing. */
113static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu, 113static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu,
114 unsigned int index) 114 unsigned int index)
115{ 115{
@@ -124,6 +124,11 @@ static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu,
124 } 124 }
125} 125}
126 126
127void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i)
128{
129 vcpu->arch.shadow_tlb_mod[i] = 1;
130}
131
127/* Caller must ensure that the specified guest TLB entry is safe to insert into 132/* Caller must ensure that the specified guest TLB entry is safe to insert into
128 * the shadow TLB. */ 133 * the shadow TLB. */
129void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid, 134void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
@@ -142,19 +147,16 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
142 stlbe = &vcpu->arch.shadow_tlb[victim]; 147 stlbe = &vcpu->arch.shadow_tlb[victim];
143 148
144 /* Get reference to new page. */ 149 /* Get reference to new page. */
145 down_read(&current->mm->mmap_sem);
146 new_page = gfn_to_page(vcpu->kvm, gfn); 150 new_page = gfn_to_page(vcpu->kvm, gfn);
147 if (is_error_page(new_page)) { 151 if (is_error_page(new_page)) {
148 printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn); 152 printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn);
149 kvm_release_page_clean(new_page); 153 kvm_release_page_clean(new_page);
150 up_read(&current->mm->mmap_sem);
151 return; 154 return;
152 } 155 }
153 hpaddr = page_to_phys(new_page); 156 hpaddr = page_to_phys(new_page);
154 157
155 /* Drop reference to old page. */ 158 /* Drop reference to old page. */
156 kvmppc_44x_shadow_release(vcpu, victim); 159 kvmppc_44x_shadow_release(vcpu, victim);
157 up_read(&current->mm->mmap_sem);
158 160
159 vcpu->arch.shadow_pages[victim] = new_page; 161 vcpu->arch.shadow_pages[victim] = new_page;
160 162
@@ -164,27 +166,30 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
164 166
165 /* XXX what about AS? */ 167 /* XXX what about AS? */
166 168
167 stlbe->tid = asid & 0xff; 169 stlbe->tid = !(asid & 0xff);
168 170
169 /* Force TS=1 for all guest mappings. */ 171 /* Force TS=1 for all guest mappings. */
170 /* For now we hardcode 4KB mappings, but it will be important to 172 /* For now we hardcode 4KB mappings, but it will be important to
171 * use host large pages in the future. */ 173 * use host large pages in the future. */
172 stlbe->word0 = (gvaddr & PAGE_MASK) | PPC44x_TLB_VALID | PPC44x_TLB_TS 174 stlbe->word0 = (gvaddr & PAGE_MASK) | PPC44x_TLB_VALID | PPC44x_TLB_TS
173 | PPC44x_TLB_4K; 175 | PPC44x_TLB_4K;
174
175 stlbe->word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf); 176 stlbe->word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf);
176 stlbe->word2 = kvmppc_44x_tlb_shadow_attrib(flags, 177 stlbe->word2 = kvmppc_44x_tlb_shadow_attrib(flags,
177 vcpu->arch.msr & MSR_PR); 178 vcpu->arch.msr & MSR_PR);
179 kvmppc_tlbe_set_modified(vcpu, victim);
180
181 KVMTRACE_5D(STLB_WRITE, vcpu, victim,
182 stlbe->tid, stlbe->word0, stlbe->word1, stlbe->word2,
183 handler);
178} 184}
179 185
180void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr, 186void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,
181 gva_t eend, u32 asid) 187 gva_t eend, u32 asid)
182{ 188{
183 unsigned int pid = asid & 0xff; 189 unsigned int pid = !(asid & 0xff);
184 int i; 190 int i;
185 191
186 /* XXX Replace loop with fancy data structures. */ 192 /* XXX Replace loop with fancy data structures. */
187 down_write(&current->mm->mmap_sem);
188 for (i = 0; i <= tlb_44x_hwater; i++) { 193 for (i = 0; i <= tlb_44x_hwater; i++) {
189 struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i]; 194 struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i];
190 unsigned int tid; 195 unsigned int tid;
@@ -204,21 +209,35 @@ void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,
204 209
205 kvmppc_44x_shadow_release(vcpu, i); 210 kvmppc_44x_shadow_release(vcpu, i);
206 stlbe->word0 = 0; 211 stlbe->word0 = 0;
212 kvmppc_tlbe_set_modified(vcpu, i);
213 KVMTRACE_5D(STLB_INVAL, vcpu, i,
214 stlbe->tid, stlbe->word0, stlbe->word1,
215 stlbe->word2, handler);
207 } 216 }
208 up_write(&current->mm->mmap_sem);
209} 217}
210 218
211/* Invalidate all mappings, so that when they fault back in they will get the 219/* Invalidate all mappings on the privilege switch after PID has been changed.
212 * proper permission bits. */ 220 * The guest always runs with PID=1, so we must clear the entire TLB when
221 * switching address spaces. */
213void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode) 222void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
214{ 223{
215 int i; 224 int i;
216 225
217 /* XXX Replace loop with fancy data structures. */ 226 if (vcpu->arch.swap_pid) {
218 down_write(&current->mm->mmap_sem); 227 /* XXX Replace loop with fancy data structures. */
219 for (i = 0; i <= tlb_44x_hwater; i++) { 228 for (i = 0; i <= tlb_44x_hwater; i++) {
220 kvmppc_44x_shadow_release(vcpu, i); 229 struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i];
221 vcpu->arch.shadow_tlb[i].word0 = 0; 230
231 /* Future optimization: clear only userspace mappings. */
232 kvmppc_44x_shadow_release(vcpu, i);
233 stlbe->word0 = 0;
234 kvmppc_tlbe_set_modified(vcpu, i);
235 KVMTRACE_5D(STLB_INVAL, vcpu, i,
236 stlbe->tid, stlbe->word0, stlbe->word1,
237 stlbe->word2, handler);
238 }
239 vcpu->arch.swap_pid = 0;
222 } 240 }
223 up_write(&current->mm->mmap_sem); 241
242 vcpu->arch.shadow_pid = !usermode;
224} 243}
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 6b076010213b..53aaa66b25e5 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -37,6 +37,17 @@ config KVM_BOOKE_HOST
37 Provides host support for KVM on Book E PowerPC processors. Currently 37 Provides host support for KVM on Book E PowerPC processors. Currently
38 this works on 440 processors only. 38 this works on 440 processors only.
39 39
40config KVM_TRACE
41 bool "KVM trace support"
42 depends on KVM && MARKERS && SYSFS
43 select RELAY
44 select DEBUG_FS
45 default n
46 ---help---
47 This option allows reading a trace of kvm-related events through
48 relayfs. Note the ABI is not considered stable and will be
49 modified in future updates.
50
40source drivers/virtio/Kconfig 51source drivers/virtio/Kconfig
41 52
42endif # VIRTUALIZATION 53endif # VIRTUALIZATION
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 04e3449e1f42..2a5d4397ac4b 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -4,9 +4,11 @@
4 4
5EXTRA_CFLAGS += -Ivirt/kvm -Iarch/powerpc/kvm 5EXTRA_CFLAGS += -Ivirt/kvm -Iarch/powerpc/kvm
6 6
7common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) 7common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
8 8
9kvm-objs := $(common-objs) powerpc.o emulate.o booke_guest.o 9common-objs-$(CONFIG_KVM_TRACE) += $(addprefix ../../../virt/kvm/, kvm_trace.o)
10
11kvm-objs := $(common-objs-y) powerpc.o emulate.o booke_guest.o
10obj-$(CONFIG_KVM) += kvm.o 12obj-$(CONFIG_KVM) += kvm.o
11 13
12AFLAGS_booke_interrupts.o := -I$(obj) 14AFLAGS_booke_interrupts.o := -I$(obj)
diff --git a/arch/powerpc/kvm/booke_guest.c b/arch/powerpc/kvm/booke_guest.c
index 9c8ad850c6e3..7b2591e26bae 100644
--- a/arch/powerpc/kvm/booke_guest.c
+++ b/arch/powerpc/kvm/booke_guest.c
@@ -410,6 +410,21 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
410 break; 410 break;
411 } 411 }
412 412
413 case BOOKE_INTERRUPT_DEBUG: {
414 u32 dbsr;
415
416 vcpu->arch.pc = mfspr(SPRN_CSRR0);
417
418 /* clear IAC events in DBSR register */
419 dbsr = mfspr(SPRN_DBSR);
420 dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4;
421 mtspr(SPRN_DBSR, dbsr);
422
423 run->exit_reason = KVM_EXIT_DEBUG;
424 r = RESUME_HOST;
425 break;
426 }
427
413 default: 428 default:
414 printk(KERN_EMERG "exit_nr %d\n", exit_nr); 429 printk(KERN_EMERG "exit_nr %d\n", exit_nr);
415 BUG(); 430 BUG();
@@ -471,6 +486,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
471 vcpu->arch.msr = 0; 486 vcpu->arch.msr = 0;
472 vcpu->arch.gpr[1] = (16<<20) - 8; /* -8 for the callee-save LR slot */ 487 vcpu->arch.gpr[1] = (16<<20) - 8; /* -8 for the callee-save LR slot */
473 488
489 vcpu->arch.shadow_pid = 1;
490
474 /* Eye-catching number so we know if the guest takes an interrupt 491 /* Eye-catching number so we know if the guest takes an interrupt
475 * before it's programmed its own IVPR. */ 492 * before it's programmed its own IVPR. */
476 vcpu->arch.ivpr = 0x55550000; 493 vcpu->arch.ivpr = 0x55550000;
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index 3b653b5309b8..95e165baf85f 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -42,7 +42,8 @@
42#define HOST_STACK_LR (HOST_STACK_SIZE + 4) /* In caller stack frame. */ 42#define HOST_STACK_LR (HOST_STACK_SIZE + 4) /* In caller stack frame. */
43 43
44#define NEED_INST_MASK ((1<<BOOKE_INTERRUPT_PROGRAM) | \ 44#define NEED_INST_MASK ((1<<BOOKE_INTERRUPT_PROGRAM) | \
45 (1<<BOOKE_INTERRUPT_DTLB_MISS)) 45 (1<<BOOKE_INTERRUPT_DTLB_MISS) | \
46 (1<<BOOKE_INTERRUPT_DEBUG))
46 47
47#define NEED_DEAR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \ 48#define NEED_DEAR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \
48 (1<<BOOKE_INTERRUPT_DTLB_MISS)) 49 (1<<BOOKE_INTERRUPT_DTLB_MISS))
@@ -331,51 +332,57 @@ lightweight_exit:
331 332
332 mfspr r3, SPRN_PID 333 mfspr r3, SPRN_PID
333 stw r3, VCPU_HOST_PID(r4) 334 stw r3, VCPU_HOST_PID(r4)
334 lwz r3, VCPU_PID(r4) 335 lwz r3, VCPU_SHADOW_PID(r4)
335 mtspr SPRN_PID, r3 336 mtspr SPRN_PID, r3
336 337
337 /* Prevent all TLB updates. */ 338 /* Prevent all asynchronous TLB updates. */
338 mfmsr r5 339 mfmsr r5
339 lis r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@h 340 lis r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@h
340 ori r6, r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l 341 ori r6, r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
341 andc r6, r5, r6 342 andc r6, r5, r6
342 mtmsr r6 343 mtmsr r6
343 344
344 /* Save the host's non-pinned TLB mappings, and load the guest mappings 345 /* Load the guest mappings, leaving the host's "pinned" kernel mappings
345 * over them. Leave the host's "pinned" kernel mappings in place. */ 346 * in place. */
346 /* XXX optimization: use generation count to avoid swapping unmodified
347 * entries. */
348 mfspr r10, SPRN_MMUCR /* Save host MMUCR. */ 347 mfspr r10, SPRN_MMUCR /* Save host MMUCR. */
349 lis r8, tlb_44x_hwater@ha 348 li r5, PPC44x_TLB_SIZE
350 lwz r8, tlb_44x_hwater@l(r8) 349 lis r5, tlb_44x_hwater@ha
351 addi r3, r4, VCPU_HOST_TLB - 4 350 lwz r5, tlb_44x_hwater@l(r5)
352 addi r9, r4, VCPU_SHADOW_TLB - 4 351 mtctr r5
353 li r6, 0 352 addi r9, r4, VCPU_SHADOW_TLB
353 addi r5, r4, VCPU_SHADOW_MOD
354 li r3, 0
3541: 3551:
355 /* Save host entry. */ 356 lbzx r7, r3, r5
356 tlbre r7, r6, PPC44x_TLB_PAGEID 357 cmpwi r7, 0
357 mfspr r5, SPRN_MMUCR 358 beq 3f
358 stwu r5, 4(r3) 359
359 stwu r7, 4(r3)
360 tlbre r7, r6, PPC44x_TLB_XLAT
361 stwu r7, 4(r3)
362 tlbre r7, r6, PPC44x_TLB_ATTRIB
363 stwu r7, 4(r3)
364 /* Load guest entry. */ 360 /* Load guest entry. */
365 lwzu r7, 4(r9) 361 mulli r11, r3, TLBE_BYTES
362 add r11, r11, r9
363 lwz r7, 0(r11)
366 mtspr SPRN_MMUCR, r7 364 mtspr SPRN_MMUCR, r7
367 lwzu r7, 4(r9) 365 lwz r7, 4(r11)
368 tlbwe r7, r6, PPC44x_TLB_PAGEID 366 tlbwe r7, r3, PPC44x_TLB_PAGEID
369 lwzu r7, 4(r9) 367 lwz r7, 8(r11)
370 tlbwe r7, r6, PPC44x_TLB_XLAT 368 tlbwe r7, r3, PPC44x_TLB_XLAT
371 lwzu r7, 4(r9) 369 lwz r7, 12(r11)
372 tlbwe r7, r6, PPC44x_TLB_ATTRIB 370 tlbwe r7, r3, PPC44x_TLB_ATTRIB
373 /* Increment index. */ 3713:
374 addi r6, r6, 1 372 addi r3, r3, 1 /* Increment index. */
375 cmpw r6, r8 373 bdnz 1b
376 blt 1b 374
377 mtspr SPRN_MMUCR, r10 /* Restore host MMUCR. */ 375 mtspr SPRN_MMUCR, r10 /* Restore host MMUCR. */
378 376
377 /* Clear bitmap of modified TLB entries */
378 li r5, PPC44x_TLB_SIZE>>2
379 mtctr r5
380 addi r5, r4, VCPU_SHADOW_MOD - 4
381 li r6, 0
3821:
383 stwu r6, 4(r5)
384 bdnz 1b
385
379 iccci 0, 0 /* XXX hack */ 386 iccci 0, 0 /* XXX hack */
380 387
381 /* Load some guest volatiles. */ 388 /* Load some guest volatiles. */
@@ -431,6 +438,14 @@ lightweight_exit:
431 oris r3, r3, KVMPPC_MSR_MASK@h 438 oris r3, r3, KVMPPC_MSR_MASK@h
432 ori r3, r3, KVMPPC_MSR_MASK@l 439 ori r3, r3, KVMPPC_MSR_MASK@l
433 mtsrr1 r3 440 mtsrr1 r3
441
442 /* Clear any debug events which occurred since we disabled MSR[DE].
443 * XXX This gives us a 3-instruction window in which a breakpoint
444 * intended for guest context could fire in the host instead. */
445 lis r3, 0xffff
446 ori r3, r3, 0xffff
447 mtspr SPRN_DBSR, r3
448
434 lwz r3, VCPU_GPR(r3)(r4) 449 lwz r3, VCPU_GPR(r3)(r4)
435 lwz r4, VCPU_GPR(r4)(r4) 450 lwz r4, VCPU_GPR(r4)(r4)
436 rfi 451 rfi
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 8c605d0a5488..0fce4fbdc20d 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -170,6 +170,10 @@ static int kvmppc_emul_tlbwe(struct kvm_vcpu *vcpu, u32 inst)
170 kvmppc_mmu_map(vcpu, eaddr, raddr >> PAGE_SHIFT, asid, flags); 170 kvmppc_mmu_map(vcpu, eaddr, raddr >> PAGE_SHIFT, asid, flags);
171 } 171 }
172 172
173 KVMTRACE_5D(GTLB_WRITE, vcpu, index,
174 tlbe->tid, tlbe->word0, tlbe->word1, tlbe->word2,
175 handler);
176
173 return EMULATE_DONE; 177 return EMULATE_DONE;
174} 178}
175 179
@@ -504,7 +508,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
504 case SPRN_MMUCR: 508 case SPRN_MMUCR:
505 vcpu->arch.mmucr = vcpu->arch.gpr[rs]; break; 509 vcpu->arch.mmucr = vcpu->arch.gpr[rs]; break;
506 case SPRN_PID: 510 case SPRN_PID:
507 vcpu->arch.pid = vcpu->arch.gpr[rs]; break; 511 kvmppc_set_pid(vcpu, vcpu->arch.gpr[rs]); break;
508 case SPRN_CCR0: 512 case SPRN_CCR0:
509 vcpu->arch.ccr0 = vcpu->arch.gpr[rs]; break; 513 vcpu->arch.ccr0 = vcpu->arch.gpr[rs]; break;
510 case SPRN_CCR1: 514 case SPRN_CCR1:
@@ -765,6 +769,8 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
765 break; 769 break;
766 } 770 }
767 771
772 KVMTRACE_3D(PPC_INSTR, vcpu, inst, vcpu->arch.pc, emulated, entryexit);
773
768 if (advance) 774 if (advance)
769 vcpu->arch.pc += 4; /* Advance past emulated instruction. */ 775 vcpu->arch.pc += 4; /* Advance past emulated instruction. */
770 776
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 53826a5f6c06..90a6fc422b23 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -27,6 +27,7 @@
27#include <asm/cputable.h> 27#include <asm/cputable.h>
28#include <asm/uaccess.h> 28#include <asm/uaccess.h>
29#include <asm/kvm_ppc.h> 29#include <asm/kvm_ppc.h>
30#include <asm/tlbflush.h>
30 31
31 32
32gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) 33gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
@@ -239,18 +240,114 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
239{ 240{
240} 241}
241 242
243/* Note: clearing MSR[DE] just means that the debug interrupt will not be
244 * delivered *immediately*. Instead, it simply sets the appropriate DBSR bits.
245 * If those DBSR bits are still set when MSR[DE] is re-enabled, the interrupt
246 * will be delivered as an "imprecise debug event" (which is indicated by
247 * DBSR[IDE].
248 */
249static void kvmppc_disable_debug_interrupts(void)
250{
251 mtmsr(mfmsr() & ~MSR_DE);
252}
253
254static void kvmppc_restore_host_debug_state(struct kvm_vcpu *vcpu)
255{
256 kvmppc_disable_debug_interrupts();
257
258 mtspr(SPRN_IAC1, vcpu->arch.host_iac[0]);
259 mtspr(SPRN_IAC2, vcpu->arch.host_iac[1]);
260 mtspr(SPRN_IAC3, vcpu->arch.host_iac[2]);
261 mtspr(SPRN_IAC4, vcpu->arch.host_iac[3]);
262 mtspr(SPRN_DBCR1, vcpu->arch.host_dbcr1);
263 mtspr(SPRN_DBCR2, vcpu->arch.host_dbcr2);
264 mtspr(SPRN_DBCR0, vcpu->arch.host_dbcr0);
265 mtmsr(vcpu->arch.host_msr);
266}
267
268static void kvmppc_load_guest_debug_registers(struct kvm_vcpu *vcpu)
269{
270 struct kvm_guest_debug *dbg = &vcpu->guest_debug;
271 u32 dbcr0 = 0;
272
273 vcpu->arch.host_msr = mfmsr();
274 kvmppc_disable_debug_interrupts();
275
276 /* Save host debug register state. */
277 vcpu->arch.host_iac[0] = mfspr(SPRN_IAC1);
278 vcpu->arch.host_iac[1] = mfspr(SPRN_IAC2);
279 vcpu->arch.host_iac[2] = mfspr(SPRN_IAC3);
280 vcpu->arch.host_iac[3] = mfspr(SPRN_IAC4);
281 vcpu->arch.host_dbcr0 = mfspr(SPRN_DBCR0);
282 vcpu->arch.host_dbcr1 = mfspr(SPRN_DBCR1);
283 vcpu->arch.host_dbcr2 = mfspr(SPRN_DBCR2);
284
285 /* set registers up for guest */
286
287 if (dbg->bp[0]) {
288 mtspr(SPRN_IAC1, dbg->bp[0]);
289 dbcr0 |= DBCR0_IAC1 | DBCR0_IDM;
290 }
291 if (dbg->bp[1]) {
292 mtspr(SPRN_IAC2, dbg->bp[1]);
293 dbcr0 |= DBCR0_IAC2 | DBCR0_IDM;
294 }
295 if (dbg->bp[2]) {
296 mtspr(SPRN_IAC3, dbg->bp[2]);
297 dbcr0 |= DBCR0_IAC3 | DBCR0_IDM;
298 }
299 if (dbg->bp[3]) {
300 mtspr(SPRN_IAC4, dbg->bp[3]);
301 dbcr0 |= DBCR0_IAC4 | DBCR0_IDM;
302 }
303
304 mtspr(SPRN_DBCR0, dbcr0);
305 mtspr(SPRN_DBCR1, 0);
306 mtspr(SPRN_DBCR2, 0);
307}
308
242void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 309void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
243{ 310{
311 int i;
312
313 if (vcpu->guest_debug.enabled)
314 kvmppc_load_guest_debug_registers(vcpu);
315
316 /* Mark every guest entry in the shadow TLB entry modified, so that they
317 * will all be reloaded on the next vcpu run (instead of being
318 * demand-faulted). */
319 for (i = 0; i <= tlb_44x_hwater; i++)
320 kvmppc_tlbe_set_modified(vcpu, i);
244} 321}
245 322
246void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 323void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
247{ 324{
325 if (vcpu->guest_debug.enabled)
326 kvmppc_restore_host_debug_state(vcpu);
327
328 /* Don't leave guest TLB entries resident when being de-scheduled. */
329 /* XXX It would be nice to differentiate between heavyweight exit and
330 * sched_out here, since we could avoid the TLB flush for heavyweight
331 * exits. */
332 _tlbia();
248} 333}
249 334
250int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, 335int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
251 struct kvm_debug_guest *dbg) 336 struct kvm_debug_guest *dbg)
252{ 337{
253 return -ENOTSUPP; 338 int i;
339
340 vcpu->guest_debug.enabled = dbg->enabled;
341 if (vcpu->guest_debug.enabled) {
342 for (i=0; i < ARRAY_SIZE(vcpu->guest_debug.bp); i++) {
343 if (dbg->breakpoints[i].enabled)
344 vcpu->guest_debug.bp[i] = dbg->breakpoints[i].address;
345 else
346 vcpu->guest_debug.bp[i] = 0;
347 }
348 }
349
350 return 0;
254} 351}
255 352
256static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, 353static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,