diff options
author | Hollis Blanchard <hollisb@us.ibm.com> | 2008-04-17 00:28:09 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-04-27 11:21:39 -0400 |
commit | bbf45ba57eaec56569918a8bab96ab653bd45ec1 (patch) | |
tree | 63c53b1c1d93ec6559c7695c16b2345238e270f5 | |
parent | 513014b717203d1d689652d0fda86eee959a6a8a (diff) |
KVM: ppc: PowerPC 440 KVM implementation
This functionality is definitely experimental, but is capable of running
unmodified PowerPC 440 Linux kernels as guests on a PowerPC 440 host. (Only
tested with 440EP "Bamboo" guests so far, but with appropriate userspace
support other SoC/board combinations should work.)
See Documentation/powerpc/kvm_440.txt for technical details.
[stephen: build fix]
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
Acked-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Avi Kivity <avi@qumranet.com>
-rw-r--r-- | Documentation/powerpc/kvm_440.txt | 41 | ||||
-rw-r--r-- | arch/powerpc/Kconfig | 1 | ||||
-rw-r--r-- | arch/powerpc/Kconfig.debug | 3 | ||||
-rw-r--r-- | arch/powerpc/Makefile | 1 | ||||
-rw-r--r-- | arch/powerpc/kernel/asm-offsets.c | 28 | ||||
-rw-r--r-- | arch/powerpc/kvm/44x_tlb.c | 224 | ||||
-rw-r--r-- | arch/powerpc/kvm/44x_tlb.h | 91 | ||||
-rw-r--r-- | arch/powerpc/kvm/Kconfig | 42 | ||||
-rw-r--r-- | arch/powerpc/kvm/Makefile | 15 | ||||
-rw-r--r-- | arch/powerpc/kvm/booke_guest.c | 615 | ||||
-rw-r--r-- | arch/powerpc/kvm/booke_host.c | 83 | ||||
-rw-r--r-- | arch/powerpc/kvm/booke_interrupts.S | 436 | ||||
-rw-r--r-- | arch/powerpc/kvm/emulate.c | 760 | ||||
-rw-r--r-- | arch/powerpc/kvm/powerpc.c | 436 | ||||
-rw-r--r-- | include/asm-powerpc/kvm.h | 53 | ||||
-rw-r--r-- | include/asm-powerpc/kvm_asm.h | 55 | ||||
-rw-r--r-- | include/asm-powerpc/kvm_host.h | 152 | ||||
-rw-r--r-- | include/asm-powerpc/kvm_para.h | 37 | ||||
-rw-r--r-- | include/asm-powerpc/kvm_ppc.h | 88 |
19 files changed, 3159 insertions, 2 deletions
diff --git a/Documentation/powerpc/kvm_440.txt b/Documentation/powerpc/kvm_440.txt new file mode 100644 index 000000000000..c02a003fa03a --- /dev/null +++ b/Documentation/powerpc/kvm_440.txt | |||
@@ -0,0 +1,41 @@ | |||
1 | Hollis Blanchard <hollisb@us.ibm.com> | ||
2 | 15 Apr 2008 | ||
3 | |||
4 | Various notes on the implementation of KVM for PowerPC 440: | ||
5 | |||
6 | To enforce isolation, host userspace, guest kernel, and guest userspace all | ||
7 | run at user privilege level. Only the host kernel runs in supervisor mode. | ||
8 | Executing privileged instructions in the guest traps into KVM (in the host | ||
9 | kernel), where we decode and emulate them. Through this technique, unmodified | ||
10 | 440 Linux kernels can be run (slowly) as guests. Future performance work will | ||
11 | focus on reducing the overhead and frequency of these traps. | ||
12 | |||
13 | The usual code flow is started from userspace invoking an "run" ioctl, which | ||
14 | causes KVM to switch into guest context. We use IVPR to hijack the host | ||
15 | interrupt vectors while running the guest, which allows us to direct all | ||
16 | interrupts to kvmppc_handle_interrupt(). At this point, we could either | ||
17 | - handle the interrupt completely (e.g. emulate "mtspr SPRG0"), or | ||
18 | - let the host interrupt handler run (e.g. when the decrementer fires), or | ||
19 | - return to host userspace (e.g. when the guest performs device MMIO) | ||
20 | |||
21 | Address spaces: We take advantage of the fact that Linux doesn't use the AS=1 | ||
22 | address space (in host or guest), which gives us virtual address space to use | ||
23 | for guest mappings. While the guest is running, the host kernel remains mapped | ||
24 | in AS=0, but the guest can only use AS=1 mappings. | ||
25 | |||
26 | TLB entries: The TLB entries covering the host linear mapping remain | ||
27 | present while running the guest. This reduces the overhead of lightweight | ||
28 | exits, which are handled by KVM running in the host kernel. We keep three | ||
29 | copies of the TLB: | ||
30 | - guest TLB: contents of the TLB as the guest sees it | ||
31 | - shadow TLB: the TLB that is actually in hardware while guest is running | ||
32 | - host TLB: to restore TLB state when context switching guest -> host | ||
33 | When a TLB miss occurs because a mapping was not present in the shadow TLB, | ||
34 | but was present in the guest TLB, KVM handles the fault without invoking the | ||
35 | guest. Large guest pages are backed by multiple 4KB shadow pages through this | ||
36 | mechanism. | ||
37 | |||
38 | IO: MMIO and DCR accesses are emulated by userspace. We use virtio for network | ||
39 | and block IO, so those drivers must be enabled in the guest. It's possible | ||
40 | that some qemu device emulation (e.g. e1000 or rtl8139) may also work with | ||
41 | little effort. | ||
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 20f45a8b87e3..4e40c122bf26 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -803,3 +803,4 @@ config PPC_CLOCK | |||
803 | config PPC_LIB_RHEAP | 803 | config PPC_LIB_RHEAP |
804 | bool | 804 | bool |
805 | 805 | ||
806 | source "arch/powerpc/kvm/Kconfig" | ||
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug index a86d8d853214..807a2dce6263 100644 --- a/arch/powerpc/Kconfig.debug +++ b/arch/powerpc/Kconfig.debug | |||
@@ -151,6 +151,9 @@ config BOOTX_TEXT | |||
151 | 151 | ||
152 | config PPC_EARLY_DEBUG | 152 | config PPC_EARLY_DEBUG |
153 | bool "Early debugging (dangerous)" | 153 | bool "Early debugging (dangerous)" |
154 | # PPC_EARLY_DEBUG on 440 leaves AS=1 mappings above the TLB high water | ||
155 | # mark, which doesn't work with current 440 KVM. | ||
156 | depends on !KVM | ||
154 | help | 157 | help |
155 | Say Y to enable some early debugging facilities that may be available | 158 | Say Y to enable some early debugging facilities that may be available |
156 | for your processor/board combination. Those facilities are hacks | 159 | for your processor/board combination. Those facilities are hacks |
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index e2ec4a91ccef..9dcdc036cdf7 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile | |||
@@ -145,6 +145,7 @@ core-y += arch/powerpc/kernel/ \ | |||
145 | arch/powerpc/platforms/ | 145 | arch/powerpc/platforms/ |
146 | core-$(CONFIG_MATH_EMULATION) += arch/powerpc/math-emu/ | 146 | core-$(CONFIG_MATH_EMULATION) += arch/powerpc/math-emu/ |
147 | core-$(CONFIG_XMON) += arch/powerpc/xmon/ | 147 | core-$(CONFIG_XMON) += arch/powerpc/xmon/ |
148 | core-$(CONFIG_KVM) += arch/powerpc/kvm/ | ||
148 | 149 | ||
149 | drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/ | 150 | drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/ |
150 | 151 | ||
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index adf1d09d726f..62134845af08 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -23,6 +23,9 @@ | |||
23 | #include <linux/mm.h> | 23 | #include <linux/mm.h> |
24 | #include <linux/suspend.h> | 24 | #include <linux/suspend.h> |
25 | #include <linux/hrtimer.h> | 25 | #include <linux/hrtimer.h> |
26 | #ifdef CONFIG_KVM | ||
27 | #include <linux/kvm_host.h> | ||
28 | #endif | ||
26 | #ifdef CONFIG_PPC64 | 29 | #ifdef CONFIG_PPC64 |
27 | #include <linux/time.h> | 30 | #include <linux/time.h> |
28 | #include <linux/hardirq.h> | 31 | #include <linux/hardirq.h> |
@@ -324,5 +327,30 @@ int main(void) | |||
324 | 327 | ||
325 | DEFINE(PGD_TABLE_SIZE, PGD_TABLE_SIZE); | 328 | DEFINE(PGD_TABLE_SIZE, PGD_TABLE_SIZE); |
326 | 329 | ||
330 | #ifdef CONFIG_KVM | ||
331 | DEFINE(TLBE_BYTES, sizeof(struct tlbe)); | ||
332 | |||
333 | DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); | ||
334 | DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); | ||
335 | DEFINE(VCPU_HOST_TLB, offsetof(struct kvm_vcpu, arch.host_tlb)); | ||
336 | DEFINE(VCPU_SHADOW_TLB, offsetof(struct kvm_vcpu, arch.shadow_tlb)); | ||
337 | DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); | ||
338 | DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); | ||
339 | DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); | ||
340 | DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); | ||
341 | DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); | ||
342 | DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); | ||
343 | DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.msr)); | ||
344 | DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4)); | ||
345 | DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5)); | ||
346 | DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6)); | ||
347 | DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7)); | ||
348 | DEFINE(VCPU_PID, offsetof(struct kvm_vcpu, arch.pid)); | ||
349 | |||
350 | DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); | ||
351 | DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear)); | ||
352 | DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr)); | ||
353 | #endif | ||
354 | |||
327 | return 0; | 355 | return 0; |
328 | } | 356 | } |
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c new file mode 100644 index 000000000000..f5d7a5eab96e --- /dev/null +++ b/arch/powerpc/kvm/44x_tlb.c | |||
@@ -0,0 +1,224 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright IBM Corp. 2007 | ||
16 | * | ||
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | ||
18 | */ | ||
19 | |||
20 | #include <linux/types.h> | ||
21 | #include <linux/string.h> | ||
22 | #include <linux/kvm_host.h> | ||
23 | #include <linux/highmem.h> | ||
24 | #include <asm/mmu-44x.h> | ||
25 | #include <asm/kvm_ppc.h> | ||
26 | |||
27 | #include "44x_tlb.h" | ||
28 | |||
29 | #define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW) | ||
30 | #define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW) | ||
31 | |||
32 | static unsigned int kvmppc_tlb_44x_pos; | ||
33 | |||
34 | static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode) | ||
35 | { | ||
36 | /* Mask off reserved bits. */ | ||
37 | attrib &= PPC44x_TLB_PERM_MASK|PPC44x_TLB_ATTR_MASK; | ||
38 | |||
39 | if (!usermode) { | ||
40 | /* Guest is in supervisor mode, so we need to translate guest | ||
41 | * supervisor permissions into user permissions. */ | ||
42 | attrib &= ~PPC44x_TLB_USER_PERM_MASK; | ||
43 | attrib |= (attrib & PPC44x_TLB_SUPER_PERM_MASK) << 3; | ||
44 | } | ||
45 | |||
46 | /* Make sure host can always access this memory. */ | ||
47 | attrib |= PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW; | ||
48 | |||
49 | return attrib; | ||
50 | } | ||
51 | |||
52 | /* Search the guest TLB for a matching entry. */ | ||
53 | int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid, | ||
54 | unsigned int as) | ||
55 | { | ||
56 | int i; | ||
57 | |||
58 | /* XXX Replace loop with fancy data structures. */ | ||
59 | for (i = 0; i < PPC44x_TLB_SIZE; i++) { | ||
60 | struct tlbe *tlbe = &vcpu->arch.guest_tlb[i]; | ||
61 | unsigned int tid; | ||
62 | |||
63 | if (eaddr < get_tlb_eaddr(tlbe)) | ||
64 | continue; | ||
65 | |||
66 | if (eaddr > get_tlb_end(tlbe)) | ||
67 | continue; | ||
68 | |||
69 | tid = get_tlb_tid(tlbe); | ||
70 | if (tid && (tid != pid)) | ||
71 | continue; | ||
72 | |||
73 | if (!get_tlb_v(tlbe)) | ||
74 | continue; | ||
75 | |||
76 | if (get_tlb_ts(tlbe) != as) | ||
77 | continue; | ||
78 | |||
79 | return i; | ||
80 | } | ||
81 | |||
82 | return -1; | ||
83 | } | ||
84 | |||
85 | struct tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu, gva_t eaddr) | ||
86 | { | ||
87 | unsigned int as = !!(vcpu->arch.msr & MSR_IS); | ||
88 | unsigned int index; | ||
89 | |||
90 | index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); | ||
91 | if (index == -1) | ||
92 | return NULL; | ||
93 | return &vcpu->arch.guest_tlb[index]; | ||
94 | } | ||
95 | |||
96 | struct tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu, gva_t eaddr) | ||
97 | { | ||
98 | unsigned int as = !!(vcpu->arch.msr & MSR_DS); | ||
99 | unsigned int index; | ||
100 | |||
101 | index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); | ||
102 | if (index == -1) | ||
103 | return NULL; | ||
104 | return &vcpu->arch.guest_tlb[index]; | ||
105 | } | ||
106 | |||
107 | static int kvmppc_44x_tlbe_is_writable(struct tlbe *tlbe) | ||
108 | { | ||
109 | return tlbe->word2 & (PPC44x_TLB_SW|PPC44x_TLB_UW); | ||
110 | } | ||
111 | |||
112 | /* Must be called with mmap_sem locked for writing. */ | ||
113 | static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu, | ||
114 | unsigned int index) | ||
115 | { | ||
116 | struct tlbe *stlbe = &vcpu->arch.shadow_tlb[index]; | ||
117 | struct page *page = vcpu->arch.shadow_pages[index]; | ||
118 | |||
119 | kunmap(vcpu->arch.shadow_pages[index]); | ||
120 | |||
121 | if (get_tlb_v(stlbe)) { | ||
122 | if (kvmppc_44x_tlbe_is_writable(stlbe)) | ||
123 | kvm_release_page_dirty(page); | ||
124 | else | ||
125 | kvm_release_page_clean(page); | ||
126 | } | ||
127 | } | ||
128 | |||
129 | /* Caller must ensure that the specified guest TLB entry is safe to insert into | ||
130 | * the shadow TLB. */ | ||
131 | void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid, | ||
132 | u32 flags) | ||
133 | { | ||
134 | struct page *new_page; | ||
135 | struct tlbe *stlbe; | ||
136 | hpa_t hpaddr; | ||
137 | unsigned int victim; | ||
138 | |||
139 | /* Future optimization: don't overwrite the TLB entry containing the | ||
140 | * current PC (or stack?). */ | ||
141 | victim = kvmppc_tlb_44x_pos++; | ||
142 | if (kvmppc_tlb_44x_pos > tlb_44x_hwater) | ||
143 | kvmppc_tlb_44x_pos = 0; | ||
144 | stlbe = &vcpu->arch.shadow_tlb[victim]; | ||
145 | |||
146 | /* Get reference to new page. */ | ||
147 | down_write(¤t->mm->mmap_sem); | ||
148 | new_page = gfn_to_page(vcpu->kvm, gfn); | ||
149 | if (is_error_page(new_page)) { | ||
150 | printk(KERN_ERR "Couldn't get guest page!\n"); | ||
151 | kvm_release_page_clean(new_page); | ||
152 | return; | ||
153 | } | ||
154 | hpaddr = page_to_phys(new_page); | ||
155 | |||
156 | /* Drop reference to old page. */ | ||
157 | kvmppc_44x_shadow_release(vcpu, victim); | ||
158 | up_write(¤t->mm->mmap_sem); | ||
159 | |||
160 | vcpu->arch.shadow_pages[victim] = new_page; | ||
161 | |||
162 | /* XXX Make sure (va, size) doesn't overlap any other | ||
163 | * entries. 440x6 user manual says the result would be | ||
164 | * "undefined." */ | ||
165 | |||
166 | /* XXX what about AS? */ | ||
167 | |||
168 | stlbe->tid = asid & 0xff; | ||
169 | |||
170 | /* Force TS=1 for all guest mappings. */ | ||
171 | /* For now we hardcode 4KB mappings, but it will be important to | ||
172 | * use host large pages in the future. */ | ||
173 | stlbe->word0 = (gvaddr & PAGE_MASK) | PPC44x_TLB_VALID | PPC44x_TLB_TS | ||
174 | | PPC44x_TLB_4K; | ||
175 | |||
176 | stlbe->word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf); | ||
177 | stlbe->word2 = kvmppc_44x_tlb_shadow_attrib(flags, | ||
178 | vcpu->arch.msr & MSR_PR); | ||
179 | } | ||
180 | |||
181 | void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, u64 eaddr, u64 asid) | ||
182 | { | ||
183 | unsigned int pid = asid & 0xff; | ||
184 | int i; | ||
185 | |||
186 | /* XXX Replace loop with fancy data structures. */ | ||
187 | down_write(¤t->mm->mmap_sem); | ||
188 | for (i = 0; i <= tlb_44x_hwater; i++) { | ||
189 | struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i]; | ||
190 | unsigned int tid; | ||
191 | |||
192 | if (!get_tlb_v(stlbe)) | ||
193 | continue; | ||
194 | |||
195 | if (eaddr < get_tlb_eaddr(stlbe)) | ||
196 | continue; | ||
197 | |||
198 | if (eaddr > get_tlb_end(stlbe)) | ||
199 | continue; | ||
200 | |||
201 | tid = get_tlb_tid(stlbe); | ||
202 | if (tid && (tid != pid)) | ||
203 | continue; | ||
204 | |||
205 | kvmppc_44x_shadow_release(vcpu, i); | ||
206 | stlbe->word0 = 0; | ||
207 | } | ||
208 | up_write(¤t->mm->mmap_sem); | ||
209 | } | ||
210 | |||
211 | /* Invalidate all mappings, so that when they fault back in they will get the | ||
212 | * proper permission bits. */ | ||
213 | void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode) | ||
214 | { | ||
215 | int i; | ||
216 | |||
217 | /* XXX Replace loop with fancy data structures. */ | ||
218 | down_write(¤t->mm->mmap_sem); | ||
219 | for (i = 0; i <= tlb_44x_hwater; i++) { | ||
220 | kvmppc_44x_shadow_release(vcpu, i); | ||
221 | vcpu->arch.shadow_tlb[i].word0 = 0; | ||
222 | } | ||
223 | up_write(¤t->mm->mmap_sem); | ||
224 | } | ||
diff --git a/arch/powerpc/kvm/44x_tlb.h b/arch/powerpc/kvm/44x_tlb.h new file mode 100644 index 000000000000..2ccd46b6f6b7 --- /dev/null +++ b/arch/powerpc/kvm/44x_tlb.h | |||
@@ -0,0 +1,91 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright IBM Corp. 2007 | ||
16 | * | ||
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | ||
18 | */ | ||
19 | |||
20 | #ifndef __KVM_POWERPC_TLB_H__ | ||
21 | #define __KVM_POWERPC_TLB_H__ | ||
22 | |||
23 | #include <linux/kvm_host.h> | ||
24 | #include <asm/mmu-44x.h> | ||
25 | |||
26 | extern int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, | ||
27 | unsigned int pid, unsigned int as); | ||
28 | extern struct tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu, gva_t eaddr); | ||
29 | extern struct tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu, gva_t eaddr); | ||
30 | |||
31 | /* TLB helper functions */ | ||
32 | static inline unsigned int get_tlb_size(const struct tlbe *tlbe) | ||
33 | { | ||
34 | return (tlbe->word0 >> 4) & 0xf; | ||
35 | } | ||
36 | |||
37 | static inline gva_t get_tlb_eaddr(const struct tlbe *tlbe) | ||
38 | { | ||
39 | return tlbe->word0 & 0xfffffc00; | ||
40 | } | ||
41 | |||
42 | static inline gva_t get_tlb_bytes(const struct tlbe *tlbe) | ||
43 | { | ||
44 | unsigned int pgsize = get_tlb_size(tlbe); | ||
45 | return 1 << 10 << (pgsize << 1); | ||
46 | } | ||
47 | |||
48 | static inline gva_t get_tlb_end(const struct tlbe *tlbe) | ||
49 | { | ||
50 | return get_tlb_eaddr(tlbe) + get_tlb_bytes(tlbe) - 1; | ||
51 | } | ||
52 | |||
53 | static inline u64 get_tlb_raddr(const struct tlbe *tlbe) | ||
54 | { | ||
55 | u64 word1 = tlbe->word1; | ||
56 | return ((word1 & 0xf) << 32) | (word1 & 0xfffffc00); | ||
57 | } | ||
58 | |||
59 | static inline unsigned int get_tlb_tid(const struct tlbe *tlbe) | ||
60 | { | ||
61 | return tlbe->tid & 0xff; | ||
62 | } | ||
63 | |||
64 | static inline unsigned int get_tlb_ts(const struct tlbe *tlbe) | ||
65 | { | ||
66 | return (tlbe->word0 >> 8) & 0x1; | ||
67 | } | ||
68 | |||
69 | static inline unsigned int get_tlb_v(const struct tlbe *tlbe) | ||
70 | { | ||
71 | return (tlbe->word0 >> 9) & 0x1; | ||
72 | } | ||
73 | |||
74 | static inline unsigned int get_mmucr_stid(const struct kvm_vcpu *vcpu) | ||
75 | { | ||
76 | return vcpu->arch.mmucr & 0xff; | ||
77 | } | ||
78 | |||
79 | static inline unsigned int get_mmucr_sts(const struct kvm_vcpu *vcpu) | ||
80 | { | ||
81 | return (vcpu->arch.mmucr >> 16) & 0x1; | ||
82 | } | ||
83 | |||
84 | static inline gpa_t tlb_xlate(struct tlbe *tlbe, gva_t eaddr) | ||
85 | { | ||
86 | unsigned int pgmask = get_tlb_bytes(tlbe) - 1; | ||
87 | |||
88 | return get_tlb_raddr(tlbe) | (eaddr & pgmask); | ||
89 | } | ||
90 | |||
91 | #endif /* __KVM_POWERPC_TLB_H__ */ | ||
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig new file mode 100644 index 000000000000..6b076010213b --- /dev/null +++ b/arch/powerpc/kvm/Kconfig | |||
@@ -0,0 +1,42 @@ | |||
1 | # | ||
2 | # KVM configuration | ||
3 | # | ||
4 | |||
5 | menuconfig VIRTUALIZATION | ||
6 | bool "Virtualization" | ||
7 | ---help--- | ||
8 | Say Y here to get to see options for using your Linux host to run | ||
9 | other operating systems inside virtual machines (guests). | ||
10 | This option alone does not add any kernel code. | ||
11 | |||
12 | If you say N, all options in this submenu will be skipped and | ||
13 | disabled. | ||
14 | |||
15 | if VIRTUALIZATION | ||
16 | |||
17 | config KVM | ||
18 | bool "Kernel-based Virtual Machine (KVM) support" | ||
19 | depends on 44x && EXPERIMENTAL | ||
20 | select PREEMPT_NOTIFIERS | ||
21 | select ANON_INODES | ||
22 | # We can only run on Book E hosts so far | ||
23 | select KVM_BOOKE_HOST | ||
24 | ---help--- | ||
25 | Support hosting virtualized guest machines. You will also | ||
26 | need to select one or more of the processor modules below. | ||
27 | |||
28 | This module provides access to the hardware capabilities through | ||
29 | a character device node named /dev/kvm. | ||
30 | |||
31 | If unsure, say N. | ||
32 | |||
33 | config KVM_BOOKE_HOST | ||
34 | bool "KVM host support for Book E PowerPC processors" | ||
35 | depends on KVM && 44x | ||
36 | ---help--- | ||
37 | Provides host support for KVM on Book E PowerPC processors. Currently | ||
38 | this works on 440 processors only. | ||
39 | |||
40 | source drivers/virtio/Kconfig | ||
41 | |||
42 | endif # VIRTUALIZATION | ||
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile new file mode 100644 index 000000000000..d0d358d367ec --- /dev/null +++ b/arch/powerpc/kvm/Makefile | |||
@@ -0,0 +1,15 @@ | |||
1 | # | ||
2 | # Makefile for Kernel-based Virtual Machine module | ||
3 | # | ||
4 | |||
5 | EXTRA_CFLAGS += -Ivirt/kvm -Iarch/powerpc/kvm | ||
6 | |||
7 | common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o) | ||
8 | |||
9 | kvm-objs := $(common-objs) powerpc.o emulate.o booke_guest.o | ||
10 | obj-$(CONFIG_KVM) += kvm.o | ||
11 | |||
12 | AFLAGS_booke_interrupts.o := -I$(obj) | ||
13 | |||
14 | kvm-booke-host-objs := booke_host.o booke_interrupts.o 44x_tlb.o | ||
15 | obj-$(CONFIG_KVM_BOOKE_HOST) += kvm-booke-host.o | ||
diff --git a/arch/powerpc/kvm/booke_guest.c b/arch/powerpc/kvm/booke_guest.c new file mode 100644 index 000000000000..6d9884a6884a --- /dev/null +++ b/arch/powerpc/kvm/booke_guest.c | |||
@@ -0,0 +1,615 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright IBM Corp. 2007 | ||
16 | * | ||
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | ||
18 | * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> | ||
19 | */ | ||
20 | |||
21 | #include <linux/errno.h> | ||
22 | #include <linux/err.h> | ||
23 | #include <linux/kvm_host.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/vmalloc.h> | ||
26 | #include <linux/fs.h> | ||
27 | #include <asm/cputable.h> | ||
28 | #include <asm/uaccess.h> | ||
29 | #include <asm/kvm_ppc.h> | ||
30 | |||
31 | #include "44x_tlb.h" | ||
32 | |||
33 | #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM | ||
34 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU | ||
35 | |||
36 | struct kvm_stats_debugfs_item debugfs_entries[] = { | ||
37 | { "exits", VCPU_STAT(sum_exits) }, | ||
38 | { "mmio", VCPU_STAT(mmio_exits) }, | ||
39 | { "dcr", VCPU_STAT(dcr_exits) }, | ||
40 | { "sig", VCPU_STAT(signal_exits) }, | ||
41 | { "light", VCPU_STAT(light_exits) }, | ||
42 | { "itlb_r", VCPU_STAT(itlb_real_miss_exits) }, | ||
43 | { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) }, | ||
44 | { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) }, | ||
45 | { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) }, | ||
46 | { "sysc", VCPU_STAT(syscall_exits) }, | ||
47 | { "isi", VCPU_STAT(isi_exits) }, | ||
48 | { "dsi", VCPU_STAT(dsi_exits) }, | ||
49 | { "inst_emu", VCPU_STAT(emulated_inst_exits) }, | ||
50 | { "dec", VCPU_STAT(dec_exits) }, | ||
51 | { "ext_intr", VCPU_STAT(ext_intr_exits) }, | ||
52 | { NULL } | ||
53 | }; | ||
54 | |||
55 | static const u32 interrupt_msr_mask[16] = { | ||
56 | [BOOKE_INTERRUPT_CRITICAL] = MSR_ME, | ||
57 | [BOOKE_INTERRUPT_MACHINE_CHECK] = 0, | ||
58 | [BOOKE_INTERRUPT_DATA_STORAGE] = MSR_CE|MSR_ME|MSR_DE, | ||
59 | [BOOKE_INTERRUPT_INST_STORAGE] = MSR_CE|MSR_ME|MSR_DE, | ||
60 | [BOOKE_INTERRUPT_EXTERNAL] = MSR_CE|MSR_ME|MSR_DE, | ||
61 | [BOOKE_INTERRUPT_ALIGNMENT] = MSR_CE|MSR_ME|MSR_DE, | ||
62 | [BOOKE_INTERRUPT_PROGRAM] = MSR_CE|MSR_ME|MSR_DE, | ||
63 | [BOOKE_INTERRUPT_FP_UNAVAIL] = MSR_CE|MSR_ME|MSR_DE, | ||
64 | [BOOKE_INTERRUPT_SYSCALL] = MSR_CE|MSR_ME|MSR_DE, | ||
65 | [BOOKE_INTERRUPT_AP_UNAVAIL] = MSR_CE|MSR_ME|MSR_DE, | ||
66 | [BOOKE_INTERRUPT_DECREMENTER] = MSR_CE|MSR_ME|MSR_DE, | ||
67 | [BOOKE_INTERRUPT_FIT] = MSR_CE|MSR_ME|MSR_DE, | ||
68 | [BOOKE_INTERRUPT_WATCHDOG] = MSR_ME, | ||
69 | [BOOKE_INTERRUPT_DTLB_MISS] = MSR_CE|MSR_ME|MSR_DE, | ||
70 | [BOOKE_INTERRUPT_ITLB_MISS] = MSR_CE|MSR_ME|MSR_DE, | ||
71 | [BOOKE_INTERRUPT_DEBUG] = MSR_ME, | ||
72 | }; | ||
73 | |||
74 | const unsigned char exception_priority[] = { | ||
75 | [BOOKE_INTERRUPT_DATA_STORAGE] = 0, | ||
76 | [BOOKE_INTERRUPT_INST_STORAGE] = 1, | ||
77 | [BOOKE_INTERRUPT_ALIGNMENT] = 2, | ||
78 | [BOOKE_INTERRUPT_PROGRAM] = 3, | ||
79 | [BOOKE_INTERRUPT_FP_UNAVAIL] = 4, | ||
80 | [BOOKE_INTERRUPT_SYSCALL] = 5, | ||
81 | [BOOKE_INTERRUPT_AP_UNAVAIL] = 6, | ||
82 | [BOOKE_INTERRUPT_DTLB_MISS] = 7, | ||
83 | [BOOKE_INTERRUPT_ITLB_MISS] = 8, | ||
84 | [BOOKE_INTERRUPT_MACHINE_CHECK] = 9, | ||
85 | [BOOKE_INTERRUPT_DEBUG] = 10, | ||
86 | [BOOKE_INTERRUPT_CRITICAL] = 11, | ||
87 | [BOOKE_INTERRUPT_WATCHDOG] = 12, | ||
88 | [BOOKE_INTERRUPT_EXTERNAL] = 13, | ||
89 | [BOOKE_INTERRUPT_FIT] = 14, | ||
90 | [BOOKE_INTERRUPT_DECREMENTER] = 15, | ||
91 | }; | ||
92 | |||
93 | const unsigned char priority_exception[] = { | ||
94 | BOOKE_INTERRUPT_DATA_STORAGE, | ||
95 | BOOKE_INTERRUPT_INST_STORAGE, | ||
96 | BOOKE_INTERRUPT_ALIGNMENT, | ||
97 | BOOKE_INTERRUPT_PROGRAM, | ||
98 | BOOKE_INTERRUPT_FP_UNAVAIL, | ||
99 | BOOKE_INTERRUPT_SYSCALL, | ||
100 | BOOKE_INTERRUPT_AP_UNAVAIL, | ||
101 | BOOKE_INTERRUPT_DTLB_MISS, | ||
102 | BOOKE_INTERRUPT_ITLB_MISS, | ||
103 | BOOKE_INTERRUPT_MACHINE_CHECK, | ||
104 | BOOKE_INTERRUPT_DEBUG, | ||
105 | BOOKE_INTERRUPT_CRITICAL, | ||
106 | BOOKE_INTERRUPT_WATCHDOG, | ||
107 | BOOKE_INTERRUPT_EXTERNAL, | ||
108 | BOOKE_INTERRUPT_FIT, | ||
109 | BOOKE_INTERRUPT_DECREMENTER, | ||
110 | }; | ||
111 | |||
112 | |||
113 | void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu) | ||
114 | { | ||
115 | struct tlbe *tlbe; | ||
116 | int i; | ||
117 | |||
118 | printk("vcpu %d TLB dump:\n", vcpu->vcpu_id); | ||
119 | printk("| %2s | %3s | %8s | %8s | %8s |\n", | ||
120 | "nr", "tid", "word0", "word1", "word2"); | ||
121 | |||
122 | for (i = 0; i < PPC44x_TLB_SIZE; i++) { | ||
123 | tlbe = &vcpu->arch.guest_tlb[i]; | ||
124 | if (tlbe->word0 & PPC44x_TLB_VALID) | ||
125 | printk(" G%2d | %02X | %08X | %08X | %08X |\n", | ||
126 | i, tlbe->tid, tlbe->word0, tlbe->word1, | ||
127 | tlbe->word2); | ||
128 | } | ||
129 | |||
130 | for (i = 0; i < PPC44x_TLB_SIZE; i++) { | ||
131 | tlbe = &vcpu->arch.shadow_tlb[i]; | ||
132 | if (tlbe->word0 & PPC44x_TLB_VALID) | ||
133 | printk(" S%2d | %02X | %08X | %08X | %08X |\n", | ||
134 | i, tlbe->tid, tlbe->word0, tlbe->word1, | ||
135 | tlbe->word2); | ||
136 | } | ||
137 | } | ||
138 | |||
139 | /* TODO: use vcpu_printf() */ | ||
140 | void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu) | ||
141 | { | ||
142 | int i; | ||
143 | |||
144 | printk("pc: %08x msr: %08x\n", vcpu->arch.pc, vcpu->arch.msr); | ||
145 | printk("lr: %08x ctr: %08x\n", vcpu->arch.lr, vcpu->arch.ctr); | ||
146 | printk("srr0: %08x srr1: %08x\n", vcpu->arch.srr0, vcpu->arch.srr1); | ||
147 | |||
148 | printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions); | ||
149 | |||
150 | for (i = 0; i < 32; i += 4) { | ||
151 | printk("gpr%02d: %08x %08x %08x %08x\n", i, | ||
152 | vcpu->arch.gpr[i], | ||
153 | vcpu->arch.gpr[i+1], | ||
154 | vcpu->arch.gpr[i+2], | ||
155 | vcpu->arch.gpr[i+3]); | ||
156 | } | ||
157 | } | ||
158 | |||
159 | /* Check if we are ready to deliver the interrupt */ | ||
160 | static int kvmppc_can_deliver_interrupt(struct kvm_vcpu *vcpu, int interrupt) | ||
161 | { | ||
162 | int r; | ||
163 | |||
164 | switch (interrupt) { | ||
165 | case BOOKE_INTERRUPT_CRITICAL: | ||
166 | r = vcpu->arch.msr & MSR_CE; | ||
167 | break; | ||
168 | case BOOKE_INTERRUPT_MACHINE_CHECK: | ||
169 | r = vcpu->arch.msr & MSR_ME; | ||
170 | break; | ||
171 | case BOOKE_INTERRUPT_EXTERNAL: | ||
172 | r = vcpu->arch.msr & MSR_EE; | ||
173 | break; | ||
174 | case BOOKE_INTERRUPT_DECREMENTER: | ||
175 | r = vcpu->arch.msr & MSR_EE; | ||
176 | break; | ||
177 | case BOOKE_INTERRUPT_FIT: | ||
178 | r = vcpu->arch.msr & MSR_EE; | ||
179 | break; | ||
180 | case BOOKE_INTERRUPT_WATCHDOG: | ||
181 | r = vcpu->arch.msr & MSR_CE; | ||
182 | break; | ||
183 | case BOOKE_INTERRUPT_DEBUG: | ||
184 | r = vcpu->arch.msr & MSR_DE; | ||
185 | break; | ||
186 | default: | ||
187 | r = 1; | ||
188 | } | ||
189 | |||
190 | return r; | ||
191 | } | ||
192 | |||
193 | static void kvmppc_deliver_interrupt(struct kvm_vcpu *vcpu, int interrupt) | ||
194 | { | ||
195 | switch (interrupt) { | ||
196 | case BOOKE_INTERRUPT_DECREMENTER: | ||
197 | vcpu->arch.tsr |= TSR_DIS; | ||
198 | break; | ||
199 | } | ||
200 | |||
201 | vcpu->arch.srr0 = vcpu->arch.pc; | ||
202 | vcpu->arch.srr1 = vcpu->arch.msr; | ||
203 | vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[interrupt]; | ||
204 | kvmppc_set_msr(vcpu, vcpu->arch.msr & interrupt_msr_mask[interrupt]); | ||
205 | } | ||
206 | |||
207 | /* Check pending exceptions and deliver one, if possible. */ | ||
208 | void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu) | ||
209 | { | ||
210 | unsigned long *pending = &vcpu->arch.pending_exceptions; | ||
211 | unsigned int exception; | ||
212 | unsigned int priority; | ||
213 | |||
214 | priority = find_first_bit(pending, BITS_PER_BYTE * sizeof(*pending)); | ||
215 | while (priority <= BOOKE_MAX_INTERRUPT) { | ||
216 | exception = priority_exception[priority]; | ||
217 | if (kvmppc_can_deliver_interrupt(vcpu, exception)) { | ||
218 | kvmppc_clear_exception(vcpu, exception); | ||
219 | kvmppc_deliver_interrupt(vcpu, exception); | ||
220 | break; | ||
221 | } | ||
222 | |||
223 | priority = find_next_bit(pending, | ||
224 | BITS_PER_BYTE * sizeof(*pending), | ||
225 | priority + 1); | ||
226 | } | ||
227 | } | ||
228 | |||
229 | static int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) | ||
230 | { | ||
231 | enum emulation_result er; | ||
232 | int r; | ||
233 | |||
234 | er = kvmppc_emulate_instruction(run, vcpu); | ||
235 | switch (er) { | ||
236 | case EMULATE_DONE: | ||
237 | /* Future optimization: only reload non-volatiles if they were | ||
238 | * actually modified. */ | ||
239 | r = RESUME_GUEST_NV; | ||
240 | break; | ||
241 | case EMULATE_DO_MMIO: | ||
242 | run->exit_reason = KVM_EXIT_MMIO; | ||
243 | /* We must reload nonvolatiles because "update" load/store | ||
244 | * instructions modify register state. */ | ||
245 | /* Future optimization: only reload non-volatiles if they were | ||
246 | * actually modified. */ | ||
247 | r = RESUME_HOST_NV; | ||
248 | break; | ||
249 | case EMULATE_FAIL: | ||
250 | /* XXX Deliver Program interrupt to guest. */ | ||
251 | printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__, | ||
252 | vcpu->arch.last_inst); | ||
253 | r = RESUME_HOST; | ||
254 | break; | ||
255 | default: | ||
256 | BUG(); | ||
257 | } | ||
258 | |||
259 | return r; | ||
260 | } | ||
261 | |||
262 | /** | ||
263 | * kvmppc_handle_exit | ||
264 | * | ||
265 | * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV) | ||
266 | */ | ||
267 | int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | ||
268 | unsigned int exit_nr) | ||
269 | { | ||
270 | enum emulation_result er; | ||
271 | int r = RESUME_HOST; | ||
272 | |||
273 | local_irq_enable(); | ||
274 | |||
275 | run->exit_reason = KVM_EXIT_UNKNOWN; | ||
276 | run->ready_for_interrupt_injection = 1; | ||
277 | |||
278 | switch (exit_nr) { | ||
279 | case BOOKE_INTERRUPT_MACHINE_CHECK: | ||
280 | printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR)); | ||
281 | kvmppc_dump_vcpu(vcpu); | ||
282 | r = RESUME_HOST; | ||
283 | break; | ||
284 | |||
285 | case BOOKE_INTERRUPT_EXTERNAL: | ||
286 | case BOOKE_INTERRUPT_DECREMENTER: | ||
287 | /* Since we switched IVPR back to the host's value, the host | ||
288 | * handled this interrupt the moment we enabled interrupts. | ||
289 | * Now we just offer it a chance to reschedule the guest. */ | ||
290 | |||
291 | /* XXX At this point the TLB still holds our shadow TLB, so if | ||
292 | * we do reschedule the host will fault over it. Perhaps we | ||
293 | * should politely restore the host's entries to minimize | ||
294 | * misses before ceding control. */ | ||
295 | if (need_resched()) | ||
296 | cond_resched(); | ||
297 | if (exit_nr == BOOKE_INTERRUPT_DECREMENTER) | ||
298 | vcpu->stat.dec_exits++; | ||
299 | else | ||
300 | vcpu->stat.ext_intr_exits++; | ||
301 | r = RESUME_GUEST; | ||
302 | break; | ||
303 | |||
304 | case BOOKE_INTERRUPT_PROGRAM: | ||
305 | if (vcpu->arch.msr & MSR_PR) { | ||
306 | /* Program traps generated by user-level software must be handled | ||
307 | * by the guest kernel. */ | ||
308 | vcpu->arch.esr = vcpu->arch.fault_esr; | ||
309 | kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_PROGRAM); | ||
310 | r = RESUME_GUEST; | ||
311 | break; | ||
312 | } | ||
313 | |||
314 | er = kvmppc_emulate_instruction(run, vcpu); | ||
315 | switch (er) { | ||
316 | case EMULATE_DONE: | ||
317 | /* Future optimization: only reload non-volatiles if | ||
318 | * they were actually modified by emulation. */ | ||
319 | vcpu->stat.emulated_inst_exits++; | ||
320 | r = RESUME_GUEST_NV; | ||
321 | break; | ||
322 | case EMULATE_DO_DCR: | ||
323 | run->exit_reason = KVM_EXIT_DCR; | ||
324 | r = RESUME_HOST; | ||
325 | break; | ||
326 | case EMULATE_FAIL: | ||
327 | /* XXX Deliver Program interrupt to guest. */ | ||
328 | printk(KERN_CRIT "%s: emulation at %x failed (%08x)\n", | ||
329 | __func__, vcpu->arch.pc, vcpu->arch.last_inst); | ||
330 | /* For debugging, encode the failing instruction and | ||
331 | * report it to userspace. */ | ||
332 | run->hw.hardware_exit_reason = ~0ULL << 32; | ||
333 | run->hw.hardware_exit_reason |= vcpu->arch.last_inst; | ||
334 | r = RESUME_HOST; | ||
335 | break; | ||
336 | default: | ||
337 | BUG(); | ||
338 | } | ||
339 | break; | ||
340 | |||
341 | case BOOKE_INTERRUPT_DATA_STORAGE: | ||
342 | vcpu->arch.dear = vcpu->arch.fault_dear; | ||
343 | vcpu->arch.esr = vcpu->arch.fault_esr; | ||
344 | kvmppc_queue_exception(vcpu, exit_nr); | ||
345 | vcpu->stat.dsi_exits++; | ||
346 | r = RESUME_GUEST; | ||
347 | break; | ||
348 | |||
349 | case BOOKE_INTERRUPT_INST_STORAGE: | ||
350 | vcpu->arch.esr = vcpu->arch.fault_esr; | ||
351 | kvmppc_queue_exception(vcpu, exit_nr); | ||
352 | vcpu->stat.isi_exits++; | ||
353 | r = RESUME_GUEST; | ||
354 | break; | ||
355 | |||
356 | case BOOKE_INTERRUPT_SYSCALL: | ||
357 | kvmppc_queue_exception(vcpu, exit_nr); | ||
358 | vcpu->stat.syscall_exits++; | ||
359 | r = RESUME_GUEST; | ||
360 | break; | ||
361 | |||
362 | case BOOKE_INTERRUPT_DTLB_MISS: { | ||
363 | struct tlbe *gtlbe; | ||
364 | unsigned long eaddr = vcpu->arch.fault_dear; | ||
365 | gfn_t gfn; | ||
366 | |||
367 | /* Check the guest TLB. */ | ||
368 | gtlbe = kvmppc_44x_dtlb_search(vcpu, eaddr); | ||
369 | if (!gtlbe) { | ||
370 | /* The guest didn't have a mapping for it. */ | ||
371 | kvmppc_queue_exception(vcpu, exit_nr); | ||
372 | vcpu->arch.dear = vcpu->arch.fault_dear; | ||
373 | vcpu->arch.esr = vcpu->arch.fault_esr; | ||
374 | vcpu->stat.dtlb_real_miss_exits++; | ||
375 | r = RESUME_GUEST; | ||
376 | break; | ||
377 | } | ||
378 | |||
379 | vcpu->arch.paddr_accessed = tlb_xlate(gtlbe, eaddr); | ||
380 | gfn = vcpu->arch.paddr_accessed >> PAGE_SHIFT; | ||
381 | |||
382 | if (kvm_is_visible_gfn(vcpu->kvm, gfn)) { | ||
383 | /* The guest TLB had a mapping, but the shadow TLB | ||
384 | * didn't, and it is RAM. This could be because: | ||
385 | * a) the entry is mapping the host kernel, or | ||
386 | * b) the guest used a large mapping which we're faking | ||
387 | * Either way, we need to satisfy the fault without | ||
388 | * invoking the guest. */ | ||
389 | kvmppc_mmu_map(vcpu, eaddr, gfn, gtlbe->tid, | ||
390 | gtlbe->word2); | ||
391 | vcpu->stat.dtlb_virt_miss_exits++; | ||
392 | r = RESUME_GUEST; | ||
393 | } else { | ||
394 | /* Guest has mapped and accessed a page which is not | ||
395 | * actually RAM. */ | ||
396 | r = kvmppc_emulate_mmio(run, vcpu); | ||
397 | } | ||
398 | |||
399 | break; | ||
400 | } | ||
401 | |||
402 | case BOOKE_INTERRUPT_ITLB_MISS: { | ||
403 | struct tlbe *gtlbe; | ||
404 | unsigned long eaddr = vcpu->arch.pc; | ||
405 | gfn_t gfn; | ||
406 | |||
407 | r = RESUME_GUEST; | ||
408 | |||
409 | /* Check the guest TLB. */ | ||
410 | gtlbe = kvmppc_44x_itlb_search(vcpu, eaddr); | ||
411 | if (!gtlbe) { | ||
412 | /* The guest didn't have a mapping for it. */ | ||
413 | kvmppc_queue_exception(vcpu, exit_nr); | ||
414 | vcpu->stat.itlb_real_miss_exits++; | ||
415 | break; | ||
416 | } | ||
417 | |||
418 | vcpu->stat.itlb_virt_miss_exits++; | ||
419 | |||
420 | gfn = tlb_xlate(gtlbe, eaddr) >> PAGE_SHIFT; | ||
421 | |||
422 | if (kvm_is_visible_gfn(vcpu->kvm, gfn)) { | ||
423 | /* The guest TLB had a mapping, but the shadow TLB | ||
424 | * didn't. This could be because: | ||
425 | * a) the entry is mapping the host kernel, or | ||
426 | * b) the guest used a large mapping which we're faking | ||
427 | * Either way, we need to satisfy the fault without | ||
428 | * invoking the guest. */ | ||
429 | kvmppc_mmu_map(vcpu, eaddr, gfn, gtlbe->tid, | ||
430 | gtlbe->word2); | ||
431 | } else { | ||
432 | /* Guest mapped and leaped at non-RAM! */ | ||
433 | kvmppc_queue_exception(vcpu, | ||
434 | BOOKE_INTERRUPT_MACHINE_CHECK); | ||
435 | } | ||
436 | |||
437 | break; | ||
438 | } | ||
439 | |||
440 | default: | ||
441 | printk(KERN_EMERG "exit_nr %d\n", exit_nr); | ||
442 | BUG(); | ||
443 | } | ||
444 | |||
445 | local_irq_disable(); | ||
446 | |||
447 | kvmppc_check_and_deliver_interrupts(vcpu); | ||
448 | |||
449 | /* Do some exit accounting. */ | ||
450 | vcpu->stat.sum_exits++; | ||
451 | if (!(r & RESUME_HOST)) { | ||
452 | /* To avoid clobbering exit_reason, only check for signals if | ||
453 | * we aren't already exiting to userspace for some other | ||
454 | * reason. */ | ||
455 | if (signal_pending(current)) { | ||
456 | run->exit_reason = KVM_EXIT_INTR; | ||
457 | r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); | ||
458 | |||
459 | vcpu->stat.signal_exits++; | ||
460 | } else { | ||
461 | vcpu->stat.light_exits++; | ||
462 | } | ||
463 | } else { | ||
464 | switch (run->exit_reason) { | ||
465 | case KVM_EXIT_MMIO: | ||
466 | vcpu->stat.mmio_exits++; | ||
467 | break; | ||
468 | case KVM_EXIT_DCR: | ||
469 | vcpu->stat.dcr_exits++; | ||
470 | break; | ||
471 | case KVM_EXIT_INTR: | ||
472 | vcpu->stat.signal_exits++; | ||
473 | break; | ||
474 | } | ||
475 | } | ||
476 | |||
477 | return r; | ||
478 | } | ||
479 | |||
480 | /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */ | ||
481 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | ||
482 | { | ||
483 | struct tlbe *tlbe = &vcpu->arch.guest_tlb[0]; | ||
484 | |||
485 | tlbe->tid = 0; | ||
486 | tlbe->word0 = PPC44x_TLB_16M | PPC44x_TLB_VALID; | ||
487 | tlbe->word1 = 0; | ||
488 | tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR; | ||
489 | |||
490 | tlbe++; | ||
491 | tlbe->tid = 0; | ||
492 | tlbe->word0 = 0xef600000 | PPC44x_TLB_4K | PPC44x_TLB_VALID; | ||
493 | tlbe->word1 = 0xef600000; | ||
494 | tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR | ||
495 | | PPC44x_TLB_I | PPC44x_TLB_G; | ||
496 | |||
497 | vcpu->arch.pc = 0; | ||
498 | vcpu->arch.msr = 0; | ||
499 | vcpu->arch.gpr[1] = (16<<20) - 8; /* -8 for the callee-save LR slot */ | ||
500 | |||
501 | /* Eye-catching number so we know if the guest takes an interrupt | ||
502 | * before it's programmed its own IVPR. */ | ||
503 | vcpu->arch.ivpr = 0x55550000; | ||
504 | |||
505 | /* Since the guest can directly access the timebase, it must know the | ||
506 | * real timebase frequency. Accordingly, it must see the state of | ||
507 | * CCR1[TCS]. */ | ||
508 | vcpu->arch.ccr1 = mfspr(SPRN_CCR1); | ||
509 | |||
510 | return 0; | ||
511 | } | ||
512 | |||
513 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | ||
514 | { | ||
515 | int i; | ||
516 | |||
517 | regs->pc = vcpu->arch.pc; | ||
518 | regs->cr = vcpu->arch.cr; | ||
519 | regs->ctr = vcpu->arch.ctr; | ||
520 | regs->lr = vcpu->arch.lr; | ||
521 | regs->xer = vcpu->arch.xer; | ||
522 | regs->msr = vcpu->arch.msr; | ||
523 | regs->srr0 = vcpu->arch.srr0; | ||
524 | regs->srr1 = vcpu->arch.srr1; | ||
525 | regs->pid = vcpu->arch.pid; | ||
526 | regs->sprg0 = vcpu->arch.sprg0; | ||
527 | regs->sprg1 = vcpu->arch.sprg1; | ||
528 | regs->sprg2 = vcpu->arch.sprg2; | ||
529 | regs->sprg3 = vcpu->arch.sprg3; | ||
530 | regs->sprg5 = vcpu->arch.sprg4; | ||
531 | regs->sprg6 = vcpu->arch.sprg5; | ||
532 | regs->sprg7 = vcpu->arch.sprg6; | ||
533 | |||
534 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) | ||
535 | regs->gpr[i] = vcpu->arch.gpr[i]; | ||
536 | |||
537 | return 0; | ||
538 | } | ||
539 | |||
540 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | ||
541 | { | ||
542 | int i; | ||
543 | |||
544 | vcpu->arch.pc = regs->pc; | ||
545 | vcpu->arch.cr = regs->cr; | ||
546 | vcpu->arch.ctr = regs->ctr; | ||
547 | vcpu->arch.lr = regs->lr; | ||
548 | vcpu->arch.xer = regs->xer; | ||
549 | vcpu->arch.msr = regs->msr; | ||
550 | vcpu->arch.srr0 = regs->srr0; | ||
551 | vcpu->arch.srr1 = regs->srr1; | ||
552 | vcpu->arch.sprg0 = regs->sprg0; | ||
553 | vcpu->arch.sprg1 = regs->sprg1; | ||
554 | vcpu->arch.sprg2 = regs->sprg2; | ||
555 | vcpu->arch.sprg3 = regs->sprg3; | ||
556 | vcpu->arch.sprg5 = regs->sprg4; | ||
557 | vcpu->arch.sprg6 = regs->sprg5; | ||
558 | vcpu->arch.sprg7 = regs->sprg6; | ||
559 | |||
560 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.gpr); i++) | ||
561 | vcpu->arch.gpr[i] = regs->gpr[i]; | ||
562 | |||
563 | return 0; | ||
564 | } | ||
565 | |||
566 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | ||
567 | struct kvm_sregs *sregs) | ||
568 | { | ||
569 | return -ENOTSUPP; | ||
570 | } | ||
571 | |||
572 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | ||
573 | struct kvm_sregs *sregs) | ||
574 | { | ||
575 | return -ENOTSUPP; | ||
576 | } | ||
577 | |||
578 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | ||
579 | { | ||
580 | return -ENOTSUPP; | ||
581 | } | ||
582 | |||
583 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | ||
584 | { | ||
585 | return -ENOTSUPP; | ||
586 | } | ||
587 | |||
588 | /* 'linear_address' is actually an encoding of AS|PID|EADDR . */ | ||
589 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, | ||
590 | struct kvm_translation *tr) | ||
591 | { | ||
592 | struct tlbe *gtlbe; | ||
593 | int index; | ||
594 | gva_t eaddr; | ||
595 | u8 pid; | ||
596 | u8 as; | ||
597 | |||
598 | eaddr = tr->linear_address; | ||
599 | pid = (tr->linear_address >> 32) & 0xff; | ||
600 | as = (tr->linear_address >> 40) & 0x1; | ||
601 | |||
602 | index = kvmppc_44x_tlb_index(vcpu, eaddr, pid, as); | ||
603 | if (index == -1) { | ||
604 | tr->valid = 0; | ||
605 | return 0; | ||
606 | } | ||
607 | |||
608 | gtlbe = &vcpu->arch.guest_tlb[index]; | ||
609 | |||
610 | tr->physical_address = tlb_xlate(gtlbe, eaddr); | ||
611 | /* XXX what does "writeable" and "usermode" even mean? */ | ||
612 | tr->valid = 1; | ||
613 | |||
614 | return 0; | ||
615 | } | ||
diff --git a/arch/powerpc/kvm/booke_host.c b/arch/powerpc/kvm/booke_host.c new file mode 100644 index 000000000000..b480341bc31e --- /dev/null +++ b/arch/powerpc/kvm/booke_host.c | |||
@@ -0,0 +1,83 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright IBM Corp. 2008 | ||
16 | * | ||
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | ||
18 | */ | ||
19 | |||
20 | #include <linux/errno.h> | ||
21 | #include <linux/kvm_host.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <asm/cacheflush.h> | ||
24 | #include <asm/kvm_ppc.h> | ||
25 | |||
26 | unsigned long kvmppc_booke_handlers; | ||
27 | |||
28 | static int kvmppc_booke_init(void) | ||
29 | { | ||
30 | unsigned long ivor[16]; | ||
31 | unsigned long max_ivor = 0; | ||
32 | int i; | ||
33 | |||
34 | /* We install our own exception handlers by hijacking IVPR. IVPR must | ||
35 | * be 16-bit aligned, so we need a 64KB allocation. */ | ||
36 | kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO, | ||
37 | VCPU_SIZE_ORDER); | ||
38 | if (!kvmppc_booke_handlers) | ||
39 | return -ENOMEM; | ||
40 | |||
41 | /* XXX make sure our handlers are smaller than Linux's */ | ||
42 | |||
43 | /* Copy our interrupt handlers to match host IVORs. That way we don't | ||
44 | * have to swap the IVORs on every guest/host transition. */ | ||
45 | ivor[0] = mfspr(SPRN_IVOR0); | ||
46 | ivor[1] = mfspr(SPRN_IVOR1); | ||
47 | ivor[2] = mfspr(SPRN_IVOR2); | ||
48 | ivor[3] = mfspr(SPRN_IVOR3); | ||
49 | ivor[4] = mfspr(SPRN_IVOR4); | ||
50 | ivor[5] = mfspr(SPRN_IVOR5); | ||
51 | ivor[6] = mfspr(SPRN_IVOR6); | ||
52 | ivor[7] = mfspr(SPRN_IVOR7); | ||
53 | ivor[8] = mfspr(SPRN_IVOR8); | ||
54 | ivor[9] = mfspr(SPRN_IVOR9); | ||
55 | ivor[10] = mfspr(SPRN_IVOR10); | ||
56 | ivor[11] = mfspr(SPRN_IVOR11); | ||
57 | ivor[12] = mfspr(SPRN_IVOR12); | ||
58 | ivor[13] = mfspr(SPRN_IVOR13); | ||
59 | ivor[14] = mfspr(SPRN_IVOR14); | ||
60 | ivor[15] = mfspr(SPRN_IVOR15); | ||
61 | |||
62 | for (i = 0; i < 16; i++) { | ||
63 | if (ivor[i] > max_ivor) | ||
64 | max_ivor = ivor[i]; | ||
65 | |||
66 | memcpy((void *)kvmppc_booke_handlers + ivor[i], | ||
67 | kvmppc_handlers_start + i * kvmppc_handler_len, | ||
68 | kvmppc_handler_len); | ||
69 | } | ||
70 | flush_icache_range(kvmppc_booke_handlers, | ||
71 | kvmppc_booke_handlers + max_ivor + kvmppc_handler_len); | ||
72 | |||
73 | return kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE); | ||
74 | } | ||
75 | |||
76 | static void __exit kvmppc_booke_exit(void) | ||
77 | { | ||
78 | free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER); | ||
79 | kvm_exit(); | ||
80 | } | ||
81 | |||
82 | module_init(kvmppc_booke_init) | ||
83 | module_exit(kvmppc_booke_exit) | ||
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S new file mode 100644 index 000000000000..3b653b5309b8 --- /dev/null +++ b/arch/powerpc/kvm/booke_interrupts.S | |||
@@ -0,0 +1,436 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright IBM Corp. 2007 | ||
16 | * | ||
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | ||
18 | */ | ||
19 | |||
20 | #include <asm/ppc_asm.h> | ||
21 | #include <asm/kvm_asm.h> | ||
22 | #include <asm/reg.h> | ||
23 | #include <asm/mmu-44x.h> | ||
24 | #include <asm/page.h> | ||
25 | #include <asm/asm-offsets.h> | ||
26 | |||
27 | #define KVMPPC_MSR_MASK (MSR_CE|MSR_EE|MSR_PR|MSR_DE|MSR_ME|MSR_IS|MSR_DS) | ||
28 | |||
29 | #define VCPU_GPR(n) (VCPU_GPRS + (n * 4)) | ||
30 | |||
31 | /* The host stack layout: */ | ||
32 | #define HOST_R1 0 /* Implied by stwu. */ | ||
33 | #define HOST_CALLEE_LR 4 | ||
34 | #define HOST_RUN 8 | ||
35 | /* r2 is special: it holds 'current', and it made nonvolatile in the | ||
36 | * kernel with the -ffixed-r2 gcc option. */ | ||
37 | #define HOST_R2 12 | ||
38 | #define HOST_NV_GPRS 16 | ||
39 | #define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4)) | ||
40 | #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + 4) | ||
41 | #define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */ | ||
42 | #define HOST_STACK_LR (HOST_STACK_SIZE + 4) /* In caller stack frame. */ | ||
43 | |||
44 | #define NEED_INST_MASK ((1<<BOOKE_INTERRUPT_PROGRAM) | \ | ||
45 | (1<<BOOKE_INTERRUPT_DTLB_MISS)) | ||
46 | |||
47 | #define NEED_DEAR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \ | ||
48 | (1<<BOOKE_INTERRUPT_DTLB_MISS)) | ||
49 | |||
50 | #define NEED_ESR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \ | ||
51 | (1<<BOOKE_INTERRUPT_INST_STORAGE) | \ | ||
52 | (1<<BOOKE_INTERRUPT_PROGRAM) | \ | ||
53 | (1<<BOOKE_INTERRUPT_DTLB_MISS)) | ||
54 | |||
55 | .macro KVM_HANDLER ivor_nr | ||
56 | _GLOBAL(kvmppc_handler_\ivor_nr) | ||
57 | /* Get pointer to vcpu and record exit number. */ | ||
58 | mtspr SPRN_SPRG0, r4 | ||
59 | mfspr r4, SPRN_SPRG1 | ||
60 | stw r5, VCPU_GPR(r5)(r4) | ||
61 | stw r6, VCPU_GPR(r6)(r4) | ||
62 | mfctr r5 | ||
63 | lis r6, kvmppc_resume_host@h | ||
64 | stw r5, VCPU_CTR(r4) | ||
65 | li r5, \ivor_nr | ||
66 | ori r6, r6, kvmppc_resume_host@l | ||
67 | mtctr r6 | ||
68 | bctr | ||
69 | .endm | ||
70 | |||
71 | _GLOBAL(kvmppc_handlers_start) | ||
72 | KVM_HANDLER BOOKE_INTERRUPT_CRITICAL | ||
73 | KVM_HANDLER BOOKE_INTERRUPT_MACHINE_CHECK | ||
74 | KVM_HANDLER BOOKE_INTERRUPT_DATA_STORAGE | ||
75 | KVM_HANDLER BOOKE_INTERRUPT_INST_STORAGE | ||
76 | KVM_HANDLER BOOKE_INTERRUPT_EXTERNAL | ||
77 | KVM_HANDLER BOOKE_INTERRUPT_ALIGNMENT | ||
78 | KVM_HANDLER BOOKE_INTERRUPT_PROGRAM | ||
79 | KVM_HANDLER BOOKE_INTERRUPT_FP_UNAVAIL | ||
80 | KVM_HANDLER BOOKE_INTERRUPT_SYSCALL | ||
81 | KVM_HANDLER BOOKE_INTERRUPT_AP_UNAVAIL | ||
82 | KVM_HANDLER BOOKE_INTERRUPT_DECREMENTER | ||
83 | KVM_HANDLER BOOKE_INTERRUPT_FIT | ||
84 | KVM_HANDLER BOOKE_INTERRUPT_WATCHDOG | ||
85 | KVM_HANDLER BOOKE_INTERRUPT_DTLB_MISS | ||
86 | KVM_HANDLER BOOKE_INTERRUPT_ITLB_MISS | ||
87 | KVM_HANDLER BOOKE_INTERRUPT_DEBUG | ||
88 | |||
89 | _GLOBAL(kvmppc_handler_len) | ||
90 | .long kvmppc_handler_1 - kvmppc_handler_0 | ||
91 | |||
92 | |||
93 | /* Registers: | ||
94 | * SPRG0: guest r4 | ||
95 | * r4: vcpu pointer | ||
96 | * r5: KVM exit number | ||
97 | */ | ||
98 | _GLOBAL(kvmppc_resume_host) | ||
99 | stw r3, VCPU_GPR(r3)(r4) | ||
100 | mfcr r3 | ||
101 | stw r3, VCPU_CR(r4) | ||
102 | stw r7, VCPU_GPR(r7)(r4) | ||
103 | stw r8, VCPU_GPR(r8)(r4) | ||
104 | stw r9, VCPU_GPR(r9)(r4) | ||
105 | |||
106 | li r6, 1 | ||
107 | slw r6, r6, r5 | ||
108 | |||
109 | /* Save the faulting instruction and all GPRs for emulation. */ | ||
110 | andi. r7, r6, NEED_INST_MASK | ||
111 | beq ..skip_inst_copy | ||
112 | mfspr r9, SPRN_SRR0 | ||
113 | mfmsr r8 | ||
114 | ori r7, r8, MSR_DS | ||
115 | mtmsr r7 | ||
116 | isync | ||
117 | lwz r9, 0(r9) | ||
118 | mtmsr r8 | ||
119 | isync | ||
120 | stw r9, VCPU_LAST_INST(r4) | ||
121 | |||
122 | stw r15, VCPU_GPR(r15)(r4) | ||
123 | stw r16, VCPU_GPR(r16)(r4) | ||
124 | stw r17, VCPU_GPR(r17)(r4) | ||
125 | stw r18, VCPU_GPR(r18)(r4) | ||
126 | stw r19, VCPU_GPR(r19)(r4) | ||
127 | stw r20, VCPU_GPR(r20)(r4) | ||
128 | stw r21, VCPU_GPR(r21)(r4) | ||
129 | stw r22, VCPU_GPR(r22)(r4) | ||
130 | stw r23, VCPU_GPR(r23)(r4) | ||
131 | stw r24, VCPU_GPR(r24)(r4) | ||
132 | stw r25, VCPU_GPR(r25)(r4) | ||
133 | stw r26, VCPU_GPR(r26)(r4) | ||
134 | stw r27, VCPU_GPR(r27)(r4) | ||
135 | stw r28, VCPU_GPR(r28)(r4) | ||
136 | stw r29, VCPU_GPR(r29)(r4) | ||
137 | stw r30, VCPU_GPR(r30)(r4) | ||
138 | stw r31, VCPU_GPR(r31)(r4) | ||
139 | ..skip_inst_copy: | ||
140 | |||
141 | /* Also grab DEAR and ESR before the host can clobber them. */ | ||
142 | |||
143 | andi. r7, r6, NEED_DEAR_MASK | ||
144 | beq ..skip_dear | ||
145 | mfspr r9, SPRN_DEAR | ||
146 | stw r9, VCPU_FAULT_DEAR(r4) | ||
147 | ..skip_dear: | ||
148 | |||
149 | andi. r7, r6, NEED_ESR_MASK | ||
150 | beq ..skip_esr | ||
151 | mfspr r9, SPRN_ESR | ||
152 | stw r9, VCPU_FAULT_ESR(r4) | ||
153 | ..skip_esr: | ||
154 | |||
155 | /* Save remaining volatile guest register state to vcpu. */ | ||
156 | stw r0, VCPU_GPR(r0)(r4) | ||
157 | stw r1, VCPU_GPR(r1)(r4) | ||
158 | stw r2, VCPU_GPR(r2)(r4) | ||
159 | stw r10, VCPU_GPR(r10)(r4) | ||
160 | stw r11, VCPU_GPR(r11)(r4) | ||
161 | stw r12, VCPU_GPR(r12)(r4) | ||
162 | stw r13, VCPU_GPR(r13)(r4) | ||
163 | stw r14, VCPU_GPR(r14)(r4) /* We need a NV GPR below. */ | ||
164 | mflr r3 | ||
165 | stw r3, VCPU_LR(r4) | ||
166 | mfxer r3 | ||
167 | stw r3, VCPU_XER(r4) | ||
168 | mfspr r3, SPRN_SPRG0 | ||
169 | stw r3, VCPU_GPR(r4)(r4) | ||
170 | mfspr r3, SPRN_SRR0 | ||
171 | stw r3, VCPU_PC(r4) | ||
172 | |||
173 | /* Restore host stack pointer and PID before IVPR, since the host | ||
174 | * exception handlers use them. */ | ||
175 | lwz r1, VCPU_HOST_STACK(r4) | ||
176 | lwz r3, VCPU_HOST_PID(r4) | ||
177 | mtspr SPRN_PID, r3 | ||
178 | |||
179 | /* Restore host IVPR before re-enabling interrupts. We cheat and know | ||
180 | * that Linux IVPR is always 0xc0000000. */ | ||
181 | lis r3, 0xc000 | ||
182 | mtspr SPRN_IVPR, r3 | ||
183 | |||
184 | /* Switch to kernel stack and jump to handler. */ | ||
185 | LOAD_REG_ADDR(r3, kvmppc_handle_exit) | ||
186 | mtctr r3 | ||
187 | lwz r3, HOST_RUN(r1) | ||
188 | lwz r2, HOST_R2(r1) | ||
189 | mr r14, r4 /* Save vcpu pointer. */ | ||
190 | |||
191 | bctrl /* kvmppc_handle_exit() */ | ||
192 | |||
193 | /* Restore vcpu pointer and the nonvolatiles we used. */ | ||
194 | mr r4, r14 | ||
195 | lwz r14, VCPU_GPR(r14)(r4) | ||
196 | |||
197 | /* Sometimes instruction emulation must restore complete GPR state. */ | ||
198 | andi. r5, r3, RESUME_FLAG_NV | ||
199 | beq ..skip_nv_load | ||
200 | lwz r15, VCPU_GPR(r15)(r4) | ||
201 | lwz r16, VCPU_GPR(r16)(r4) | ||
202 | lwz r17, VCPU_GPR(r17)(r4) | ||
203 | lwz r18, VCPU_GPR(r18)(r4) | ||
204 | lwz r19, VCPU_GPR(r19)(r4) | ||
205 | lwz r20, VCPU_GPR(r20)(r4) | ||
206 | lwz r21, VCPU_GPR(r21)(r4) | ||
207 | lwz r22, VCPU_GPR(r22)(r4) | ||
208 | lwz r23, VCPU_GPR(r23)(r4) | ||
209 | lwz r24, VCPU_GPR(r24)(r4) | ||
210 | lwz r25, VCPU_GPR(r25)(r4) | ||
211 | lwz r26, VCPU_GPR(r26)(r4) | ||
212 | lwz r27, VCPU_GPR(r27)(r4) | ||
213 | lwz r28, VCPU_GPR(r28)(r4) | ||
214 | lwz r29, VCPU_GPR(r29)(r4) | ||
215 | lwz r30, VCPU_GPR(r30)(r4) | ||
216 | lwz r31, VCPU_GPR(r31)(r4) | ||
217 | ..skip_nv_load: | ||
218 | |||
219 | /* Should we return to the guest? */ | ||
220 | andi. r5, r3, RESUME_FLAG_HOST | ||
221 | beq lightweight_exit | ||
222 | |||
223 | srawi r3, r3, 2 /* Shift -ERR back down. */ | ||
224 | |||
225 | heavyweight_exit: | ||
226 | /* Not returning to guest. */ | ||
227 | |||
228 | /* We already saved guest volatile register state; now save the | ||
229 | * non-volatiles. */ | ||
230 | stw r15, VCPU_GPR(r15)(r4) | ||
231 | stw r16, VCPU_GPR(r16)(r4) | ||
232 | stw r17, VCPU_GPR(r17)(r4) | ||
233 | stw r18, VCPU_GPR(r18)(r4) | ||
234 | stw r19, VCPU_GPR(r19)(r4) | ||
235 | stw r20, VCPU_GPR(r20)(r4) | ||
236 | stw r21, VCPU_GPR(r21)(r4) | ||
237 | stw r22, VCPU_GPR(r22)(r4) | ||
238 | stw r23, VCPU_GPR(r23)(r4) | ||
239 | stw r24, VCPU_GPR(r24)(r4) | ||
240 | stw r25, VCPU_GPR(r25)(r4) | ||
241 | stw r26, VCPU_GPR(r26)(r4) | ||
242 | stw r27, VCPU_GPR(r27)(r4) | ||
243 | stw r28, VCPU_GPR(r28)(r4) | ||
244 | stw r29, VCPU_GPR(r29)(r4) | ||
245 | stw r30, VCPU_GPR(r30)(r4) | ||
246 | stw r31, VCPU_GPR(r31)(r4) | ||
247 | |||
248 | /* Load host non-volatile register state from host stack. */ | ||
249 | lwz r14, HOST_NV_GPR(r14)(r1) | ||
250 | lwz r15, HOST_NV_GPR(r15)(r1) | ||
251 | lwz r16, HOST_NV_GPR(r16)(r1) | ||
252 | lwz r17, HOST_NV_GPR(r17)(r1) | ||
253 | lwz r18, HOST_NV_GPR(r18)(r1) | ||
254 | lwz r19, HOST_NV_GPR(r19)(r1) | ||
255 | lwz r20, HOST_NV_GPR(r20)(r1) | ||
256 | lwz r21, HOST_NV_GPR(r21)(r1) | ||
257 | lwz r22, HOST_NV_GPR(r22)(r1) | ||
258 | lwz r23, HOST_NV_GPR(r23)(r1) | ||
259 | lwz r24, HOST_NV_GPR(r24)(r1) | ||
260 | lwz r25, HOST_NV_GPR(r25)(r1) | ||
261 | lwz r26, HOST_NV_GPR(r26)(r1) | ||
262 | lwz r27, HOST_NV_GPR(r27)(r1) | ||
263 | lwz r28, HOST_NV_GPR(r28)(r1) | ||
264 | lwz r29, HOST_NV_GPR(r29)(r1) | ||
265 | lwz r30, HOST_NV_GPR(r30)(r1) | ||
266 | lwz r31, HOST_NV_GPR(r31)(r1) | ||
267 | |||
268 | /* Return to kvm_vcpu_run(). */ | ||
269 | lwz r4, HOST_STACK_LR(r1) | ||
270 | addi r1, r1, HOST_STACK_SIZE | ||
271 | mtlr r4 | ||
272 | /* r3 still contains the return code from kvmppc_handle_exit(). */ | ||
273 | blr | ||
274 | |||
275 | |||
276 | /* Registers: | ||
277 | * r3: kvm_run pointer | ||
278 | * r4: vcpu pointer | ||
279 | */ | ||
280 | _GLOBAL(__kvmppc_vcpu_run) | ||
281 | stwu r1, -HOST_STACK_SIZE(r1) | ||
282 | stw r1, VCPU_HOST_STACK(r4) /* Save stack pointer to vcpu. */ | ||
283 | |||
284 | /* Save host state to stack. */ | ||
285 | stw r3, HOST_RUN(r1) | ||
286 | mflr r3 | ||
287 | stw r3, HOST_STACK_LR(r1) | ||
288 | |||
289 | /* Save host non-volatile register state to stack. */ | ||
290 | stw r14, HOST_NV_GPR(r14)(r1) | ||
291 | stw r15, HOST_NV_GPR(r15)(r1) | ||
292 | stw r16, HOST_NV_GPR(r16)(r1) | ||
293 | stw r17, HOST_NV_GPR(r17)(r1) | ||
294 | stw r18, HOST_NV_GPR(r18)(r1) | ||
295 | stw r19, HOST_NV_GPR(r19)(r1) | ||
296 | stw r20, HOST_NV_GPR(r20)(r1) | ||
297 | stw r21, HOST_NV_GPR(r21)(r1) | ||
298 | stw r22, HOST_NV_GPR(r22)(r1) | ||
299 | stw r23, HOST_NV_GPR(r23)(r1) | ||
300 | stw r24, HOST_NV_GPR(r24)(r1) | ||
301 | stw r25, HOST_NV_GPR(r25)(r1) | ||
302 | stw r26, HOST_NV_GPR(r26)(r1) | ||
303 | stw r27, HOST_NV_GPR(r27)(r1) | ||
304 | stw r28, HOST_NV_GPR(r28)(r1) | ||
305 | stw r29, HOST_NV_GPR(r29)(r1) | ||
306 | stw r30, HOST_NV_GPR(r30)(r1) | ||
307 | stw r31, HOST_NV_GPR(r31)(r1) | ||
308 | |||
309 | /* Load guest non-volatiles. */ | ||
310 | lwz r14, VCPU_GPR(r14)(r4) | ||
311 | lwz r15, VCPU_GPR(r15)(r4) | ||
312 | lwz r16, VCPU_GPR(r16)(r4) | ||
313 | lwz r17, VCPU_GPR(r17)(r4) | ||
314 | lwz r18, VCPU_GPR(r18)(r4) | ||
315 | lwz r19, VCPU_GPR(r19)(r4) | ||
316 | lwz r20, VCPU_GPR(r20)(r4) | ||
317 | lwz r21, VCPU_GPR(r21)(r4) | ||
318 | lwz r22, VCPU_GPR(r22)(r4) | ||
319 | lwz r23, VCPU_GPR(r23)(r4) | ||
320 | lwz r24, VCPU_GPR(r24)(r4) | ||
321 | lwz r25, VCPU_GPR(r25)(r4) | ||
322 | lwz r26, VCPU_GPR(r26)(r4) | ||
323 | lwz r27, VCPU_GPR(r27)(r4) | ||
324 | lwz r28, VCPU_GPR(r28)(r4) | ||
325 | lwz r29, VCPU_GPR(r29)(r4) | ||
326 | lwz r30, VCPU_GPR(r30)(r4) | ||
327 | lwz r31, VCPU_GPR(r31)(r4) | ||
328 | |||
329 | lightweight_exit: | ||
330 | stw r2, HOST_R2(r1) | ||
331 | |||
332 | mfspr r3, SPRN_PID | ||
333 | stw r3, VCPU_HOST_PID(r4) | ||
334 | lwz r3, VCPU_PID(r4) | ||
335 | mtspr SPRN_PID, r3 | ||
336 | |||
337 | /* Prevent all TLB updates. */ | ||
338 | mfmsr r5 | ||
339 | lis r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@h | ||
340 | ori r6, r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l | ||
341 | andc r6, r5, r6 | ||
342 | mtmsr r6 | ||
343 | |||
344 | /* Save the host's non-pinned TLB mappings, and load the guest mappings | ||
345 | * over them. Leave the host's "pinned" kernel mappings in place. */ | ||
346 | /* XXX optimization: use generation count to avoid swapping unmodified | ||
347 | * entries. */ | ||
348 | mfspr r10, SPRN_MMUCR /* Save host MMUCR. */ | ||
349 | lis r8, tlb_44x_hwater@ha | ||
350 | lwz r8, tlb_44x_hwater@l(r8) | ||
351 | addi r3, r4, VCPU_HOST_TLB - 4 | ||
352 | addi r9, r4, VCPU_SHADOW_TLB - 4 | ||
353 | li r6, 0 | ||
354 | 1: | ||
355 | /* Save host entry. */ | ||
356 | tlbre r7, r6, PPC44x_TLB_PAGEID | ||
357 | mfspr r5, SPRN_MMUCR | ||
358 | stwu r5, 4(r3) | ||
359 | stwu r7, 4(r3) | ||
360 | tlbre r7, r6, PPC44x_TLB_XLAT | ||
361 | stwu r7, 4(r3) | ||
362 | tlbre r7, r6, PPC44x_TLB_ATTRIB | ||
363 | stwu r7, 4(r3) | ||
364 | /* Load guest entry. */ | ||
365 | lwzu r7, 4(r9) | ||
366 | mtspr SPRN_MMUCR, r7 | ||
367 | lwzu r7, 4(r9) | ||
368 | tlbwe r7, r6, PPC44x_TLB_PAGEID | ||
369 | lwzu r7, 4(r9) | ||
370 | tlbwe r7, r6, PPC44x_TLB_XLAT | ||
371 | lwzu r7, 4(r9) | ||
372 | tlbwe r7, r6, PPC44x_TLB_ATTRIB | ||
373 | /* Increment index. */ | ||
374 | addi r6, r6, 1 | ||
375 | cmpw r6, r8 | ||
376 | blt 1b | ||
377 | mtspr SPRN_MMUCR, r10 /* Restore host MMUCR. */ | ||
378 | |||
379 | iccci 0, 0 /* XXX hack */ | ||
380 | |||
381 | /* Load some guest volatiles. */ | ||
382 | lwz r0, VCPU_GPR(r0)(r4) | ||
383 | lwz r2, VCPU_GPR(r2)(r4) | ||
384 | lwz r9, VCPU_GPR(r9)(r4) | ||
385 | lwz r10, VCPU_GPR(r10)(r4) | ||
386 | lwz r11, VCPU_GPR(r11)(r4) | ||
387 | lwz r12, VCPU_GPR(r12)(r4) | ||
388 | lwz r13, VCPU_GPR(r13)(r4) | ||
389 | lwz r3, VCPU_LR(r4) | ||
390 | mtlr r3 | ||
391 | lwz r3, VCPU_XER(r4) | ||
392 | mtxer r3 | ||
393 | |||
394 | /* Switch the IVPR. XXX If we take a TLB miss after this we're screwed, | ||
395 | * so how do we make sure vcpu won't fault? */ | ||
396 | lis r8, kvmppc_booke_handlers@ha | ||
397 | lwz r8, kvmppc_booke_handlers@l(r8) | ||
398 | mtspr SPRN_IVPR, r8 | ||
399 | |||
400 | /* Save vcpu pointer for the exception handlers. */ | ||
401 | mtspr SPRN_SPRG1, r4 | ||
402 | |||
403 | /* Can't switch the stack pointer until after IVPR is switched, | ||
404 | * because host interrupt handlers would get confused. */ | ||
405 | lwz r1, VCPU_GPR(r1)(r4) | ||
406 | |||
407 | /* XXX handle USPRG0 */ | ||
408 | /* Host interrupt handlers may have clobbered these guest-readable | ||
409 | * SPRGs, so we need to reload them here with the guest's values. */ | ||
410 | lwz r3, VCPU_SPRG4(r4) | ||
411 | mtspr SPRN_SPRG4, r3 | ||
412 | lwz r3, VCPU_SPRG5(r4) | ||
413 | mtspr SPRN_SPRG5, r3 | ||
414 | lwz r3, VCPU_SPRG6(r4) | ||
415 | mtspr SPRN_SPRG6, r3 | ||
416 | lwz r3, VCPU_SPRG7(r4) | ||
417 | mtspr SPRN_SPRG7, r3 | ||
418 | |||
419 | /* Finish loading guest volatiles and jump to guest. */ | ||
420 | lwz r3, VCPU_CTR(r4) | ||
421 | mtctr r3 | ||
422 | lwz r3, VCPU_CR(r4) | ||
423 | mtcr r3 | ||
424 | lwz r5, VCPU_GPR(r5)(r4) | ||
425 | lwz r6, VCPU_GPR(r6)(r4) | ||
426 | lwz r7, VCPU_GPR(r7)(r4) | ||
427 | lwz r8, VCPU_GPR(r8)(r4) | ||
428 | lwz r3, VCPU_PC(r4) | ||
429 | mtsrr0 r3 | ||
430 | lwz r3, VCPU_MSR(r4) | ||
431 | oris r3, r3, KVMPPC_MSR_MASK@h | ||
432 | ori r3, r3, KVMPPC_MSR_MASK@l | ||
433 | mtsrr1 r3 | ||
434 | lwz r3, VCPU_GPR(r3)(r4) | ||
435 | lwz r4, VCPU_GPR(r4)(r4) | ||
436 | rfi | ||
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c new file mode 100644 index 000000000000..a03fe0c80698 --- /dev/null +++ b/arch/powerpc/kvm/emulate.c | |||
@@ -0,0 +1,760 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright IBM Corp. 2007 | ||
16 | * | ||
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | ||
18 | */ | ||
19 | |||
20 | #include <linux/jiffies.h> | ||
21 | #include <linux/timer.h> | ||
22 | #include <linux/types.h> | ||
23 | #include <linux/string.h> | ||
24 | #include <linux/kvm_host.h> | ||
25 | |||
26 | #include <asm/dcr.h> | ||
27 | #include <asm/dcr-regs.h> | ||
28 | #include <asm/time.h> | ||
29 | #include <asm/byteorder.h> | ||
30 | #include <asm/kvm_ppc.h> | ||
31 | |||
32 | #include "44x_tlb.h" | ||
33 | |||
34 | /* Instruction decoding */ | ||
35 | static inline unsigned int get_op(u32 inst) | ||
36 | { | ||
37 | return inst >> 26; | ||
38 | } | ||
39 | |||
40 | static inline unsigned int get_xop(u32 inst) | ||
41 | { | ||
42 | return (inst >> 1) & 0x3ff; | ||
43 | } | ||
44 | |||
45 | static inline unsigned int get_sprn(u32 inst) | ||
46 | { | ||
47 | return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0); | ||
48 | } | ||
49 | |||
50 | static inline unsigned int get_dcrn(u32 inst) | ||
51 | { | ||
52 | return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0); | ||
53 | } | ||
54 | |||
55 | static inline unsigned int get_rt(u32 inst) | ||
56 | { | ||
57 | return (inst >> 21) & 0x1f; | ||
58 | } | ||
59 | |||
60 | static inline unsigned int get_rs(u32 inst) | ||
61 | { | ||
62 | return (inst >> 21) & 0x1f; | ||
63 | } | ||
64 | |||
65 | static inline unsigned int get_ra(u32 inst) | ||
66 | { | ||
67 | return (inst >> 16) & 0x1f; | ||
68 | } | ||
69 | |||
70 | static inline unsigned int get_rb(u32 inst) | ||
71 | { | ||
72 | return (inst >> 11) & 0x1f; | ||
73 | } | ||
74 | |||
75 | static inline unsigned int get_rc(u32 inst) | ||
76 | { | ||
77 | return inst & 0x1; | ||
78 | } | ||
79 | |||
80 | static inline unsigned int get_ws(u32 inst) | ||
81 | { | ||
82 | return (inst >> 11) & 0x1f; | ||
83 | } | ||
84 | |||
85 | static inline unsigned int get_d(u32 inst) | ||
86 | { | ||
87 | return inst & 0xffff; | ||
88 | } | ||
89 | |||
90 | static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu, | ||
91 | const struct tlbe *tlbe) | ||
92 | { | ||
93 | gpa_t gpa; | ||
94 | |||
95 | if (!get_tlb_v(tlbe)) | ||
96 | return 0; | ||
97 | |||
98 | /* Does it match current guest AS? */ | ||
99 | /* XXX what about IS != DS? */ | ||
100 | if (get_tlb_ts(tlbe) != !!(vcpu->arch.msr & MSR_IS)) | ||
101 | return 0; | ||
102 | |||
103 | gpa = get_tlb_raddr(tlbe); | ||
104 | if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT)) | ||
105 | /* Mapping is not for RAM. */ | ||
106 | return 0; | ||
107 | |||
108 | return 1; | ||
109 | } | ||
110 | |||
111 | static int kvmppc_emul_tlbwe(struct kvm_vcpu *vcpu, u32 inst) | ||
112 | { | ||
113 | u64 eaddr; | ||
114 | u64 raddr; | ||
115 | u64 asid; | ||
116 | u32 flags; | ||
117 | struct tlbe *tlbe; | ||
118 | unsigned int ra; | ||
119 | unsigned int rs; | ||
120 | unsigned int ws; | ||
121 | unsigned int index; | ||
122 | |||
123 | ra = get_ra(inst); | ||
124 | rs = get_rs(inst); | ||
125 | ws = get_ws(inst); | ||
126 | |||
127 | index = vcpu->arch.gpr[ra]; | ||
128 | if (index > PPC44x_TLB_SIZE) { | ||
129 | printk("%s: index %d\n", __func__, index); | ||
130 | kvmppc_dump_vcpu(vcpu); | ||
131 | return EMULATE_FAIL; | ||
132 | } | ||
133 | |||
134 | tlbe = &vcpu->arch.guest_tlb[index]; | ||
135 | |||
136 | /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ | ||
137 | if (tlbe->word0 & PPC44x_TLB_VALID) { | ||
138 | eaddr = get_tlb_eaddr(tlbe); | ||
139 | asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid; | ||
140 | kvmppc_mmu_invalidate(vcpu, eaddr, asid); | ||
141 | } | ||
142 | |||
143 | switch (ws) { | ||
144 | case PPC44x_TLB_PAGEID: | ||
145 | tlbe->tid = vcpu->arch.mmucr & 0xff; | ||
146 | tlbe->word0 = vcpu->arch.gpr[rs]; | ||
147 | break; | ||
148 | |||
149 | case PPC44x_TLB_XLAT: | ||
150 | tlbe->word1 = vcpu->arch.gpr[rs]; | ||
151 | break; | ||
152 | |||
153 | case PPC44x_TLB_ATTRIB: | ||
154 | tlbe->word2 = vcpu->arch.gpr[rs]; | ||
155 | break; | ||
156 | |||
157 | default: | ||
158 | return EMULATE_FAIL; | ||
159 | } | ||
160 | |||
161 | if (tlbe_is_host_safe(vcpu, tlbe)) { | ||
162 | eaddr = get_tlb_eaddr(tlbe); | ||
163 | raddr = get_tlb_raddr(tlbe); | ||
164 | asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid; | ||
165 | flags = tlbe->word2 & 0xffff; | ||
166 | |||
167 | /* Create a 4KB mapping on the host. If the guest wanted a | ||
168 | * large page, only the first 4KB is mapped here and the rest | ||
169 | * are mapped on the fly. */ | ||
170 | kvmppc_mmu_map(vcpu, eaddr, raddr >> PAGE_SHIFT, asid, flags); | ||
171 | } | ||
172 | |||
173 | return EMULATE_DONE; | ||
174 | } | ||
175 | |||
176 | static void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) | ||
177 | { | ||
178 | if (vcpu->arch.tcr & TCR_DIE) { | ||
179 | /* The decrementer ticks at the same rate as the timebase, so | ||
180 | * that's how we convert the guest DEC value to the number of | ||
181 | * host ticks. */ | ||
182 | unsigned long nr_jiffies; | ||
183 | |||
184 | nr_jiffies = vcpu->arch.dec / tb_ticks_per_jiffy; | ||
185 | mod_timer(&vcpu->arch.dec_timer, | ||
186 | get_jiffies_64() + nr_jiffies); | ||
187 | } else { | ||
188 | del_timer(&vcpu->arch.dec_timer); | ||
189 | } | ||
190 | } | ||
191 | |||
192 | static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu) | ||
193 | { | ||
194 | vcpu->arch.pc = vcpu->arch.srr0; | ||
195 | kvmppc_set_msr(vcpu, vcpu->arch.srr1); | ||
196 | } | ||
197 | |||
198 | /* XXX to do: | ||
199 | * lhax | ||
200 | * lhaux | ||
201 | * lswx | ||
202 | * lswi | ||
203 | * stswx | ||
204 | * stswi | ||
205 | * lha | ||
206 | * lhau | ||
207 | * lmw | ||
208 | * stmw | ||
209 | * | ||
210 | * XXX is_bigendian should depend on MMU mapping or MSR[LE] | ||
211 | */ | ||
212 | int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | ||
213 | { | ||
214 | u32 inst = vcpu->arch.last_inst; | ||
215 | u32 ea; | ||
216 | int ra; | ||
217 | int rb; | ||
218 | int rc; | ||
219 | int rs; | ||
220 | int rt; | ||
221 | int sprn; | ||
222 | int dcrn; | ||
223 | enum emulation_result emulated = EMULATE_DONE; | ||
224 | int advance = 1; | ||
225 | |||
226 | switch (get_op(inst)) { | ||
227 | case 3: /* trap */ | ||
228 | printk("trap!\n"); | ||
229 | kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_PROGRAM); | ||
230 | advance = 0; | ||
231 | break; | ||
232 | |||
233 | case 19: | ||
234 | switch (get_xop(inst)) { | ||
235 | case 50: /* rfi */ | ||
236 | kvmppc_emul_rfi(vcpu); | ||
237 | advance = 0; | ||
238 | break; | ||
239 | |||
240 | default: | ||
241 | emulated = EMULATE_FAIL; | ||
242 | break; | ||
243 | } | ||
244 | break; | ||
245 | |||
246 | case 31: | ||
247 | switch (get_xop(inst)) { | ||
248 | |||
249 | case 83: /* mfmsr */ | ||
250 | rt = get_rt(inst); | ||
251 | vcpu->arch.gpr[rt] = vcpu->arch.msr; | ||
252 | break; | ||
253 | |||
254 | case 87: /* lbzx */ | ||
255 | rt = get_rt(inst); | ||
256 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | ||
257 | break; | ||
258 | |||
259 | case 131: /* wrtee */ | ||
260 | rs = get_rs(inst); | ||
261 | vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE) | ||
262 | | (vcpu->arch.gpr[rs] & MSR_EE); | ||
263 | break; | ||
264 | |||
265 | case 146: /* mtmsr */ | ||
266 | rs = get_rs(inst); | ||
267 | kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]); | ||
268 | break; | ||
269 | |||
270 | case 163: /* wrteei */ | ||
271 | vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE) | ||
272 | | (inst & MSR_EE); | ||
273 | break; | ||
274 | |||
275 | case 215: /* stbx */ | ||
276 | rs = get_rs(inst); | ||
277 | emulated = kvmppc_handle_store(run, vcpu, | ||
278 | vcpu->arch.gpr[rs], | ||
279 | 1, 1); | ||
280 | break; | ||
281 | |||
282 | case 247: /* stbux */ | ||
283 | rs = get_rs(inst); | ||
284 | ra = get_ra(inst); | ||
285 | rb = get_rb(inst); | ||
286 | |||
287 | ea = vcpu->arch.gpr[rb]; | ||
288 | if (ra) | ||
289 | ea += vcpu->arch.gpr[ra]; | ||
290 | |||
291 | emulated = kvmppc_handle_store(run, vcpu, | ||
292 | vcpu->arch.gpr[rs], | ||
293 | 1, 1); | ||
294 | vcpu->arch.gpr[rs] = ea; | ||
295 | break; | ||
296 | |||
297 | case 279: /* lhzx */ | ||
298 | rt = get_rt(inst); | ||
299 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | ||
300 | break; | ||
301 | |||
302 | case 311: /* lhzux */ | ||
303 | rt = get_rt(inst); | ||
304 | ra = get_ra(inst); | ||
305 | rb = get_rb(inst); | ||
306 | |||
307 | ea = vcpu->arch.gpr[rb]; | ||
308 | if (ra) | ||
309 | ea += vcpu->arch.gpr[ra]; | ||
310 | |||
311 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | ||
312 | vcpu->arch.gpr[ra] = ea; | ||
313 | break; | ||
314 | |||
315 | case 323: /* mfdcr */ | ||
316 | dcrn = get_dcrn(inst); | ||
317 | rt = get_rt(inst); | ||
318 | |||
319 | /* The guest may access CPR0 registers to determine the timebase | ||
320 | * frequency, and it must know the real host frequency because it | ||
321 | * can directly access the timebase registers. | ||
322 | * | ||
323 | * It would be possible to emulate those accesses in userspace, | ||
324 | * but userspace can really only figure out the end frequency. | ||
325 | * We could decompose that into the factors that compute it, but | ||
326 | * that's tricky math, and it's easier to just report the real | ||
327 | * CPR0 values. | ||
328 | */ | ||
329 | switch (dcrn) { | ||
330 | case DCRN_CPR0_CONFIG_ADDR: | ||
331 | vcpu->arch.gpr[rt] = vcpu->arch.cpr0_cfgaddr; | ||
332 | break; | ||
333 | case DCRN_CPR0_CONFIG_DATA: | ||
334 | local_irq_disable(); | ||
335 | mtdcr(DCRN_CPR0_CONFIG_ADDR, | ||
336 | vcpu->arch.cpr0_cfgaddr); | ||
337 | vcpu->arch.gpr[rt] = mfdcr(DCRN_CPR0_CONFIG_DATA); | ||
338 | local_irq_enable(); | ||
339 | break; | ||
340 | default: | ||
341 | run->dcr.dcrn = dcrn; | ||
342 | run->dcr.data = 0; | ||
343 | run->dcr.is_write = 0; | ||
344 | vcpu->arch.io_gpr = rt; | ||
345 | vcpu->arch.dcr_needed = 1; | ||
346 | emulated = EMULATE_DO_DCR; | ||
347 | } | ||
348 | |||
349 | break; | ||
350 | |||
351 | case 339: /* mfspr */ | ||
352 | sprn = get_sprn(inst); | ||
353 | rt = get_rt(inst); | ||
354 | |||
355 | switch (sprn) { | ||
356 | case SPRN_SRR0: | ||
357 | vcpu->arch.gpr[rt] = vcpu->arch.srr0; break; | ||
358 | case SPRN_SRR1: | ||
359 | vcpu->arch.gpr[rt] = vcpu->arch.srr1; break; | ||
360 | case SPRN_MMUCR: | ||
361 | vcpu->arch.gpr[rt] = vcpu->arch.mmucr; break; | ||
362 | case SPRN_PID: | ||
363 | vcpu->arch.gpr[rt] = vcpu->arch.pid; break; | ||
364 | case SPRN_IVPR: | ||
365 | vcpu->arch.gpr[rt] = vcpu->arch.ivpr; break; | ||
366 | case SPRN_CCR0: | ||
367 | vcpu->arch.gpr[rt] = vcpu->arch.ccr0; break; | ||
368 | case SPRN_CCR1: | ||
369 | vcpu->arch.gpr[rt] = vcpu->arch.ccr1; break; | ||
370 | case SPRN_PVR: | ||
371 | vcpu->arch.gpr[rt] = vcpu->arch.pvr; break; | ||
372 | case SPRN_DEAR: | ||
373 | vcpu->arch.gpr[rt] = vcpu->arch.dear; break; | ||
374 | case SPRN_ESR: | ||
375 | vcpu->arch.gpr[rt] = vcpu->arch.esr; break; | ||
376 | case SPRN_DBCR0: | ||
377 | vcpu->arch.gpr[rt] = vcpu->arch.dbcr0; break; | ||
378 | case SPRN_DBCR1: | ||
379 | vcpu->arch.gpr[rt] = vcpu->arch.dbcr1; break; | ||
380 | |||
381 | /* Note: mftb and TBRL/TBWL are user-accessible, so | ||
382 | * the guest can always access the real TB anyways. | ||
383 | * In fact, we probably will never see these traps. */ | ||
384 | case SPRN_TBWL: | ||
385 | vcpu->arch.gpr[rt] = mftbl(); break; | ||
386 | case SPRN_TBWU: | ||
387 | vcpu->arch.gpr[rt] = mftbu(); break; | ||
388 | |||
389 | case SPRN_SPRG0: | ||
390 | vcpu->arch.gpr[rt] = vcpu->arch.sprg0; break; | ||
391 | case SPRN_SPRG1: | ||
392 | vcpu->arch.gpr[rt] = vcpu->arch.sprg1; break; | ||
393 | case SPRN_SPRG2: | ||
394 | vcpu->arch.gpr[rt] = vcpu->arch.sprg2; break; | ||
395 | case SPRN_SPRG3: | ||
396 | vcpu->arch.gpr[rt] = vcpu->arch.sprg3; break; | ||
397 | /* Note: SPRG4-7 are user-readable, so we don't get | ||
398 | * a trap. */ | ||
399 | |||
400 | case SPRN_IVOR0: | ||
401 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[0]; break; | ||
402 | case SPRN_IVOR1: | ||
403 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[1]; break; | ||
404 | case SPRN_IVOR2: | ||
405 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[2]; break; | ||
406 | case SPRN_IVOR3: | ||
407 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[3]; break; | ||
408 | case SPRN_IVOR4: | ||
409 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[4]; break; | ||
410 | case SPRN_IVOR5: | ||
411 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[5]; break; | ||
412 | case SPRN_IVOR6: | ||
413 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[6]; break; | ||
414 | case SPRN_IVOR7: | ||
415 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[7]; break; | ||
416 | case SPRN_IVOR8: | ||
417 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[8]; break; | ||
418 | case SPRN_IVOR9: | ||
419 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[9]; break; | ||
420 | case SPRN_IVOR10: | ||
421 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[10]; break; | ||
422 | case SPRN_IVOR11: | ||
423 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[11]; break; | ||
424 | case SPRN_IVOR12: | ||
425 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[12]; break; | ||
426 | case SPRN_IVOR13: | ||
427 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[13]; break; | ||
428 | case SPRN_IVOR14: | ||
429 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[14]; break; | ||
430 | case SPRN_IVOR15: | ||
431 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[15]; break; | ||
432 | |||
433 | default: | ||
434 | printk("mfspr: unknown spr %x\n", sprn); | ||
435 | vcpu->arch.gpr[rt] = 0; | ||
436 | break; | ||
437 | } | ||
438 | break; | ||
439 | |||
440 | case 407: /* sthx */ | ||
441 | rs = get_rs(inst); | ||
442 | ra = get_ra(inst); | ||
443 | rb = get_rb(inst); | ||
444 | |||
445 | emulated = kvmppc_handle_store(run, vcpu, | ||
446 | vcpu->arch.gpr[rs], | ||
447 | 2, 1); | ||
448 | break; | ||
449 | |||
450 | case 439: /* sthux */ | ||
451 | rs = get_rs(inst); | ||
452 | ra = get_ra(inst); | ||
453 | rb = get_rb(inst); | ||
454 | |||
455 | ea = vcpu->arch.gpr[rb]; | ||
456 | if (ra) | ||
457 | ea += vcpu->arch.gpr[ra]; | ||
458 | |||
459 | emulated = kvmppc_handle_store(run, vcpu, | ||
460 | vcpu->arch.gpr[rs], | ||
461 | 2, 1); | ||
462 | vcpu->arch.gpr[ra] = ea; | ||
463 | break; | ||
464 | |||
465 | case 451: /* mtdcr */ | ||
466 | dcrn = get_dcrn(inst); | ||
467 | rs = get_rs(inst); | ||
468 | |||
469 | /* emulate some access in kernel */ | ||
470 | switch (dcrn) { | ||
471 | case DCRN_CPR0_CONFIG_ADDR: | ||
472 | vcpu->arch.cpr0_cfgaddr = vcpu->arch.gpr[rs]; | ||
473 | break; | ||
474 | default: | ||
475 | run->dcr.dcrn = dcrn; | ||
476 | run->dcr.data = vcpu->arch.gpr[rs]; | ||
477 | run->dcr.is_write = 1; | ||
478 | vcpu->arch.dcr_needed = 1; | ||
479 | emulated = EMULATE_DO_DCR; | ||
480 | } | ||
481 | |||
482 | break; | ||
483 | |||
484 | case 467: /* mtspr */ | ||
485 | sprn = get_sprn(inst); | ||
486 | rs = get_rs(inst); | ||
487 | switch (sprn) { | ||
488 | case SPRN_SRR0: | ||
489 | vcpu->arch.srr0 = vcpu->arch.gpr[rs]; break; | ||
490 | case SPRN_SRR1: | ||
491 | vcpu->arch.srr1 = vcpu->arch.gpr[rs]; break; | ||
492 | case SPRN_MMUCR: | ||
493 | vcpu->arch.mmucr = vcpu->arch.gpr[rs]; break; | ||
494 | case SPRN_PID: | ||
495 | vcpu->arch.pid = vcpu->arch.gpr[rs]; break; | ||
496 | case SPRN_CCR0: | ||
497 | vcpu->arch.ccr0 = vcpu->arch.gpr[rs]; break; | ||
498 | case SPRN_CCR1: | ||
499 | vcpu->arch.ccr1 = vcpu->arch.gpr[rs]; break; | ||
500 | case SPRN_DEAR: | ||
501 | vcpu->arch.dear = vcpu->arch.gpr[rs]; break; | ||
502 | case SPRN_ESR: | ||
503 | vcpu->arch.esr = vcpu->arch.gpr[rs]; break; | ||
504 | case SPRN_DBCR0: | ||
505 | vcpu->arch.dbcr0 = vcpu->arch.gpr[rs]; break; | ||
506 | case SPRN_DBCR1: | ||
507 | vcpu->arch.dbcr1 = vcpu->arch.gpr[rs]; break; | ||
508 | |||
509 | /* XXX We need to context-switch the timebase for | ||
510 | * watchdog and FIT. */ | ||
511 | case SPRN_TBWL: break; | ||
512 | case SPRN_TBWU: break; | ||
513 | |||
514 | case SPRN_DEC: | ||
515 | vcpu->arch.dec = vcpu->arch.gpr[rs]; | ||
516 | kvmppc_emulate_dec(vcpu); | ||
517 | break; | ||
518 | |||
519 | case SPRN_TSR: | ||
520 | vcpu->arch.tsr &= ~vcpu->arch.gpr[rs]; break; | ||
521 | |||
522 | case SPRN_TCR: | ||
523 | vcpu->arch.tcr = vcpu->arch.gpr[rs]; | ||
524 | kvmppc_emulate_dec(vcpu); | ||
525 | break; | ||
526 | |||
527 | case SPRN_SPRG0: | ||
528 | vcpu->arch.sprg0 = vcpu->arch.gpr[rs]; break; | ||
529 | case SPRN_SPRG1: | ||
530 | vcpu->arch.sprg1 = vcpu->arch.gpr[rs]; break; | ||
531 | case SPRN_SPRG2: | ||
532 | vcpu->arch.sprg2 = vcpu->arch.gpr[rs]; break; | ||
533 | case SPRN_SPRG3: | ||
534 | vcpu->arch.sprg3 = vcpu->arch.gpr[rs]; break; | ||
535 | |||
536 | /* Note: SPRG4-7 are user-readable. These values are | ||
537 | * loaded into the real SPRGs when resuming the | ||
538 | * guest. */ | ||
539 | case SPRN_SPRG4: | ||
540 | vcpu->arch.sprg4 = vcpu->arch.gpr[rs]; break; | ||
541 | case SPRN_SPRG5: | ||
542 | vcpu->arch.sprg5 = vcpu->arch.gpr[rs]; break; | ||
543 | case SPRN_SPRG6: | ||
544 | vcpu->arch.sprg6 = vcpu->arch.gpr[rs]; break; | ||
545 | case SPRN_SPRG7: | ||
546 | vcpu->arch.sprg7 = vcpu->arch.gpr[rs]; break; | ||
547 | |||
548 | case SPRN_IVPR: | ||
549 | vcpu->arch.ivpr = vcpu->arch.gpr[rs]; break; | ||
550 | case SPRN_IVOR0: | ||
551 | vcpu->arch.ivor[0] = vcpu->arch.gpr[rs]; break; | ||
552 | case SPRN_IVOR1: | ||
553 | vcpu->arch.ivor[1] = vcpu->arch.gpr[rs]; break; | ||
554 | case SPRN_IVOR2: | ||
555 | vcpu->arch.ivor[2] = vcpu->arch.gpr[rs]; break; | ||
556 | case SPRN_IVOR3: | ||
557 | vcpu->arch.ivor[3] = vcpu->arch.gpr[rs]; break; | ||
558 | case SPRN_IVOR4: | ||
559 | vcpu->arch.ivor[4] = vcpu->arch.gpr[rs]; break; | ||
560 | case SPRN_IVOR5: | ||
561 | vcpu->arch.ivor[5] = vcpu->arch.gpr[rs]; break; | ||
562 | case SPRN_IVOR6: | ||
563 | vcpu->arch.ivor[6] = vcpu->arch.gpr[rs]; break; | ||
564 | case SPRN_IVOR7: | ||
565 | vcpu->arch.ivor[7] = vcpu->arch.gpr[rs]; break; | ||
566 | case SPRN_IVOR8: | ||
567 | vcpu->arch.ivor[8] = vcpu->arch.gpr[rs]; break; | ||
568 | case SPRN_IVOR9: | ||
569 | vcpu->arch.ivor[9] = vcpu->arch.gpr[rs]; break; | ||
570 | case SPRN_IVOR10: | ||
571 | vcpu->arch.ivor[10] = vcpu->arch.gpr[rs]; break; | ||
572 | case SPRN_IVOR11: | ||
573 | vcpu->arch.ivor[11] = vcpu->arch.gpr[rs]; break; | ||
574 | case SPRN_IVOR12: | ||
575 | vcpu->arch.ivor[12] = vcpu->arch.gpr[rs]; break; | ||
576 | case SPRN_IVOR13: | ||
577 | vcpu->arch.ivor[13] = vcpu->arch.gpr[rs]; break; | ||
578 | case SPRN_IVOR14: | ||
579 | vcpu->arch.ivor[14] = vcpu->arch.gpr[rs]; break; | ||
580 | case SPRN_IVOR15: | ||
581 | vcpu->arch.ivor[15] = vcpu->arch.gpr[rs]; break; | ||
582 | |||
583 | default: | ||
584 | printk("mtspr: unknown spr %x\n", sprn); | ||
585 | emulated = EMULATE_FAIL; | ||
586 | break; | ||
587 | } | ||
588 | break; | ||
589 | |||
590 | case 470: /* dcbi */ | ||
591 | /* Do nothing. The guest is performing dcbi because | ||
592 | * hardware DMA is not snooped by the dcache, but | ||
593 | * emulated DMA either goes through the dcache as | ||
594 | * normal writes, or the host kernel has handled dcache | ||
595 | * coherence. */ | ||
596 | break; | ||
597 | |||
598 | case 534: /* lwbrx */ | ||
599 | rt = get_rt(inst); | ||
600 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); | ||
601 | break; | ||
602 | |||
603 | case 566: /* tlbsync */ | ||
604 | break; | ||
605 | |||
606 | case 662: /* stwbrx */ | ||
607 | rs = get_rs(inst); | ||
608 | ra = get_ra(inst); | ||
609 | rb = get_rb(inst); | ||
610 | |||
611 | emulated = kvmppc_handle_store(run, vcpu, | ||
612 | vcpu->arch.gpr[rs], | ||
613 | 4, 0); | ||
614 | break; | ||
615 | |||
616 | case 978: /* tlbwe */ | ||
617 | emulated = kvmppc_emul_tlbwe(vcpu, inst); | ||
618 | break; | ||
619 | |||
620 | case 914: { /* tlbsx */ | ||
621 | int index; | ||
622 | unsigned int as = get_mmucr_sts(vcpu); | ||
623 | unsigned int pid = get_mmucr_stid(vcpu); | ||
624 | |||
625 | rt = get_rt(inst); | ||
626 | ra = get_ra(inst); | ||
627 | rb = get_rb(inst); | ||
628 | rc = get_rc(inst); | ||
629 | |||
630 | ea = vcpu->arch.gpr[rb]; | ||
631 | if (ra) | ||
632 | ea += vcpu->arch.gpr[ra]; | ||
633 | |||
634 | index = kvmppc_44x_tlb_index(vcpu, ea, pid, as); | ||
635 | if (rc) { | ||
636 | if (index < 0) | ||
637 | vcpu->arch.cr &= ~0x20000000; | ||
638 | else | ||
639 | vcpu->arch.cr |= 0x20000000; | ||
640 | } | ||
641 | vcpu->arch.gpr[rt] = index; | ||
642 | |||
643 | } | ||
644 | break; | ||
645 | |||
646 | case 790: /* lhbrx */ | ||
647 | rt = get_rt(inst); | ||
648 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); | ||
649 | break; | ||
650 | |||
651 | case 918: /* sthbrx */ | ||
652 | rs = get_rs(inst); | ||
653 | ra = get_ra(inst); | ||
654 | rb = get_rb(inst); | ||
655 | |||
656 | emulated = kvmppc_handle_store(run, vcpu, | ||
657 | vcpu->arch.gpr[rs], | ||
658 | 2, 0); | ||
659 | break; | ||
660 | |||
661 | case 966: /* iccci */ | ||
662 | break; | ||
663 | |||
664 | default: | ||
665 | printk("unknown: op %d xop %d\n", get_op(inst), | ||
666 | get_xop(inst)); | ||
667 | emulated = EMULATE_FAIL; | ||
668 | break; | ||
669 | } | ||
670 | break; | ||
671 | |||
672 | case 32: /* lwz */ | ||
673 | rt = get_rt(inst); | ||
674 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | ||
675 | break; | ||
676 | |||
677 | case 33: /* lwzu */ | ||
678 | ra = get_ra(inst); | ||
679 | rt = get_rt(inst); | ||
680 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | ||
681 | vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; | ||
682 | break; | ||
683 | |||
684 | case 34: /* lbz */ | ||
685 | rt = get_rt(inst); | ||
686 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | ||
687 | break; | ||
688 | |||
689 | case 35: /* lbzu */ | ||
690 | ra = get_ra(inst); | ||
691 | rt = get_rt(inst); | ||
692 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | ||
693 | vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; | ||
694 | break; | ||
695 | |||
696 | case 36: /* stw */ | ||
697 | rs = get_rs(inst); | ||
698 | emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], | ||
699 | 4, 1); | ||
700 | break; | ||
701 | |||
702 | case 37: /* stwu */ | ||
703 | ra = get_ra(inst); | ||
704 | rs = get_rs(inst); | ||
705 | emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], | ||
706 | 4, 1); | ||
707 | vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; | ||
708 | break; | ||
709 | |||
710 | case 38: /* stb */ | ||
711 | rs = get_rs(inst); | ||
712 | emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], | ||
713 | 1, 1); | ||
714 | break; | ||
715 | |||
716 | case 39: /* stbu */ | ||
717 | ra = get_ra(inst); | ||
718 | rs = get_rs(inst); | ||
719 | emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], | ||
720 | 1, 1); | ||
721 | vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; | ||
722 | break; | ||
723 | |||
724 | case 40: /* lhz */ | ||
725 | rt = get_rt(inst); | ||
726 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | ||
727 | break; | ||
728 | |||
729 | case 41: /* lhzu */ | ||
730 | ra = get_ra(inst); | ||
731 | rt = get_rt(inst); | ||
732 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | ||
733 | vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; | ||
734 | break; | ||
735 | |||
736 | case 44: /* sth */ | ||
737 | rs = get_rs(inst); | ||
738 | emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], | ||
739 | 2, 1); | ||
740 | break; | ||
741 | |||
742 | case 45: /* sthu */ | ||
743 | ra = get_ra(inst); | ||
744 | rs = get_rs(inst); | ||
745 | emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], | ||
746 | 2, 1); | ||
747 | vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; | ||
748 | break; | ||
749 | |||
750 | default: | ||
751 | printk("unknown op %d\n", get_op(inst)); | ||
752 | emulated = EMULATE_FAIL; | ||
753 | break; | ||
754 | } | ||
755 | |||
756 | if (advance) | ||
757 | vcpu->arch.pc += 4; /* Advance past emulated instruction. */ | ||
758 | |||
759 | return emulated; | ||
760 | } | ||
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c new file mode 100644 index 000000000000..bad40bd2d3ac --- /dev/null +++ b/arch/powerpc/kvm/powerpc.c | |||
@@ -0,0 +1,436 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright IBM Corp. 2007 | ||
16 | * | ||
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | ||
18 | * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> | ||
19 | */ | ||
20 | |||
21 | #include <linux/errno.h> | ||
22 | #include <linux/err.h> | ||
23 | #include <linux/kvm_host.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/vmalloc.h> | ||
26 | #include <linux/fs.h> | ||
27 | #include <asm/cputable.h> | ||
28 | #include <asm/uaccess.h> | ||
29 | #include <asm/kvm_ppc.h> | ||
30 | |||
31 | |||
32 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) | ||
33 | { | ||
34 | return gfn; | ||
35 | } | ||
36 | |||
37 | int kvm_cpu_has_interrupt(struct kvm_vcpu *v) | ||
38 | { | ||
39 | /* XXX implement me */ | ||
40 | return 0; | ||
41 | } | ||
42 | |||
43 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) | ||
44 | { | ||
45 | return 1; | ||
46 | } | ||
47 | |||
48 | |||
49 | int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) | ||
50 | { | ||
51 | enum emulation_result er; | ||
52 | int r; | ||
53 | |||
54 | er = kvmppc_emulate_instruction(run, vcpu); | ||
55 | switch (er) { | ||
56 | case EMULATE_DONE: | ||
57 | /* Future optimization: only reload non-volatiles if they were | ||
58 | * actually modified. */ | ||
59 | r = RESUME_GUEST_NV; | ||
60 | break; | ||
61 | case EMULATE_DO_MMIO: | ||
62 | run->exit_reason = KVM_EXIT_MMIO; | ||
63 | /* We must reload nonvolatiles because "update" load/store | ||
64 | * instructions modify register state. */ | ||
65 | /* Future optimization: only reload non-volatiles if they were | ||
66 | * actually modified. */ | ||
67 | r = RESUME_HOST_NV; | ||
68 | break; | ||
69 | case EMULATE_FAIL: | ||
70 | /* XXX Deliver Program interrupt to guest. */ | ||
71 | printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__, | ||
72 | vcpu->arch.last_inst); | ||
73 | r = RESUME_HOST; | ||
74 | break; | ||
75 | default: | ||
76 | BUG(); | ||
77 | } | ||
78 | |||
79 | return r; | ||
80 | } | ||
81 | |||
82 | void kvm_arch_hardware_enable(void *garbage) | ||
83 | { | ||
84 | } | ||
85 | |||
86 | void kvm_arch_hardware_disable(void *garbage) | ||
87 | { | ||
88 | } | ||
89 | |||
90 | int kvm_arch_hardware_setup(void) | ||
91 | { | ||
92 | return 0; | ||
93 | } | ||
94 | |||
95 | void kvm_arch_hardware_unsetup(void) | ||
96 | { | ||
97 | } | ||
98 | |||
99 | void kvm_arch_check_processor_compat(void *rtn) | ||
100 | { | ||
101 | int r; | ||
102 | |||
103 | if (strcmp(cur_cpu_spec->platform, "ppc440") == 0) | ||
104 | r = 0; | ||
105 | else | ||
106 | r = -ENOTSUPP; | ||
107 | |||
108 | *(int *)rtn = r; | ||
109 | } | ||
110 | |||
111 | struct kvm *kvm_arch_create_vm(void) | ||
112 | { | ||
113 | struct kvm *kvm; | ||
114 | |||
115 | kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL); | ||
116 | if (!kvm) | ||
117 | return ERR_PTR(-ENOMEM); | ||
118 | |||
119 | return kvm; | ||
120 | } | ||
121 | |||
122 | static void kvmppc_free_vcpus(struct kvm *kvm) | ||
123 | { | ||
124 | unsigned int i; | ||
125 | |||
126 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | ||
127 | if (kvm->vcpus[i]) { | ||
128 | kvm_arch_vcpu_free(kvm->vcpus[i]); | ||
129 | kvm->vcpus[i] = NULL; | ||
130 | } | ||
131 | } | ||
132 | } | ||
133 | |||
134 | void kvm_arch_destroy_vm(struct kvm *kvm) | ||
135 | { | ||
136 | kvmppc_free_vcpus(kvm); | ||
137 | kvm_free_physmem(kvm); | ||
138 | kfree(kvm); | ||
139 | } | ||
140 | |||
141 | int kvm_dev_ioctl_check_extension(long ext) | ||
142 | { | ||
143 | int r; | ||
144 | |||
145 | switch (ext) { | ||
146 | case KVM_CAP_USER_MEMORY: | ||
147 | r = 1; | ||
148 | break; | ||
149 | default: | ||
150 | r = 0; | ||
151 | break; | ||
152 | } | ||
153 | return r; | ||
154 | |||
155 | } | ||
156 | |||
157 | long kvm_arch_dev_ioctl(struct file *filp, | ||
158 | unsigned int ioctl, unsigned long arg) | ||
159 | { | ||
160 | return -EINVAL; | ||
161 | } | ||
162 | |||
163 | int kvm_arch_set_memory_region(struct kvm *kvm, | ||
164 | struct kvm_userspace_memory_region *mem, | ||
165 | struct kvm_memory_slot old, | ||
166 | int user_alloc) | ||
167 | { | ||
168 | return 0; | ||
169 | } | ||
170 | |||
171 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) | ||
172 | { | ||
173 | struct kvm_vcpu *vcpu; | ||
174 | int err; | ||
175 | |||
176 | vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); | ||
177 | if (!vcpu) { | ||
178 | err = -ENOMEM; | ||
179 | goto out; | ||
180 | } | ||
181 | |||
182 | err = kvm_vcpu_init(vcpu, kvm, id); | ||
183 | if (err) | ||
184 | goto free_vcpu; | ||
185 | |||
186 | return vcpu; | ||
187 | |||
188 | free_vcpu: | ||
189 | kmem_cache_free(kvm_vcpu_cache, vcpu); | ||
190 | out: | ||
191 | return ERR_PTR(err); | ||
192 | } | ||
193 | |||
194 | void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) | ||
195 | { | ||
196 | kvm_vcpu_uninit(vcpu); | ||
197 | kmem_cache_free(kvm_vcpu_cache, vcpu); | ||
198 | } | ||
199 | |||
200 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | ||
201 | { | ||
202 | kvm_arch_vcpu_free(vcpu); | ||
203 | } | ||
204 | |||
205 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | ||
206 | { | ||
207 | unsigned int priority = exception_priority[BOOKE_INTERRUPT_DECREMENTER]; | ||
208 | |||
209 | return test_bit(priority, &vcpu->arch.pending_exceptions); | ||
210 | } | ||
211 | |||
212 | static void kvmppc_decrementer_func(unsigned long data) | ||
213 | { | ||
214 | struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; | ||
215 | |||
216 | kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_DECREMENTER); | ||
217 | } | ||
218 | |||
219 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | ||
220 | { | ||
221 | setup_timer(&vcpu->arch.dec_timer, kvmppc_decrementer_func, | ||
222 | (unsigned long)vcpu); | ||
223 | |||
224 | return 0; | ||
225 | } | ||
226 | |||
227 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) | ||
228 | { | ||
229 | } | ||
230 | |||
231 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | ||
232 | { | ||
233 | } | ||
234 | |||
235 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | ||
236 | { | ||
237 | } | ||
238 | |||
239 | void decache_vcpus_on_cpu(int cpu) | ||
240 | { | ||
241 | } | ||
242 | |||
243 | int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, | ||
244 | struct kvm_debug_guest *dbg) | ||
245 | { | ||
246 | return -ENOTSUPP; | ||
247 | } | ||
248 | |||
249 | static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, | ||
250 | struct kvm_run *run) | ||
251 | { | ||
252 | u32 *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr]; | ||
253 | *gpr = run->dcr.data; | ||
254 | } | ||
255 | |||
256 | static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, | ||
257 | struct kvm_run *run) | ||
258 | { | ||
259 | u32 *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr]; | ||
260 | |||
261 | if (run->mmio.len > sizeof(*gpr)) { | ||
262 | printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); | ||
263 | return; | ||
264 | } | ||
265 | |||
266 | if (vcpu->arch.mmio_is_bigendian) { | ||
267 | switch (run->mmio.len) { | ||
268 | case 4: *gpr = *(u32 *)run->mmio.data; break; | ||
269 | case 2: *gpr = *(u16 *)run->mmio.data; break; | ||
270 | case 1: *gpr = *(u8 *)run->mmio.data; break; | ||
271 | } | ||
272 | } else { | ||
273 | /* Convert BE data from userland back to LE. */ | ||
274 | switch (run->mmio.len) { | ||
275 | case 4: *gpr = ld_le32((u32 *)run->mmio.data); break; | ||
276 | case 2: *gpr = ld_le16((u16 *)run->mmio.data); break; | ||
277 | case 1: *gpr = *(u8 *)run->mmio.data; break; | ||
278 | } | ||
279 | } | ||
280 | } | ||
281 | |||
282 | int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, | ||
283 | unsigned int rt, unsigned int bytes, int is_bigendian) | ||
284 | { | ||
285 | if (bytes > sizeof(run->mmio.data)) { | ||
286 | printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, | ||
287 | run->mmio.len); | ||
288 | } | ||
289 | |||
290 | run->mmio.phys_addr = vcpu->arch.paddr_accessed; | ||
291 | run->mmio.len = bytes; | ||
292 | run->mmio.is_write = 0; | ||
293 | |||
294 | vcpu->arch.io_gpr = rt; | ||
295 | vcpu->arch.mmio_is_bigendian = is_bigendian; | ||
296 | vcpu->mmio_needed = 1; | ||
297 | vcpu->mmio_is_write = 0; | ||
298 | |||
299 | return EMULATE_DO_MMIO; | ||
300 | } | ||
301 | |||
302 | int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, | ||
303 | u32 val, unsigned int bytes, int is_bigendian) | ||
304 | { | ||
305 | void *data = run->mmio.data; | ||
306 | |||
307 | if (bytes > sizeof(run->mmio.data)) { | ||
308 | printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, | ||
309 | run->mmio.len); | ||
310 | } | ||
311 | |||
312 | run->mmio.phys_addr = vcpu->arch.paddr_accessed; | ||
313 | run->mmio.len = bytes; | ||
314 | run->mmio.is_write = 1; | ||
315 | vcpu->mmio_needed = 1; | ||
316 | vcpu->mmio_is_write = 1; | ||
317 | |||
318 | /* Store the value at the lowest bytes in 'data'. */ | ||
319 | if (is_bigendian) { | ||
320 | switch (bytes) { | ||
321 | case 4: *(u32 *)data = val; break; | ||
322 | case 2: *(u16 *)data = val; break; | ||
323 | case 1: *(u8 *)data = val; break; | ||
324 | } | ||
325 | } else { | ||
326 | /* Store LE value into 'data'. */ | ||
327 | switch (bytes) { | ||
328 | case 4: st_le32(data, val); break; | ||
329 | case 2: st_le16(data, val); break; | ||
330 | case 1: *(u8 *)data = val; break; | ||
331 | } | ||
332 | } | ||
333 | |||
334 | return EMULATE_DO_MMIO; | ||
335 | } | ||
336 | |||
337 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
338 | { | ||
339 | int r; | ||
340 | sigset_t sigsaved; | ||
341 | |||
342 | if (vcpu->sigset_active) | ||
343 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); | ||
344 | |||
345 | if (vcpu->mmio_needed) { | ||
346 | if (!vcpu->mmio_is_write) | ||
347 | kvmppc_complete_mmio_load(vcpu, run); | ||
348 | vcpu->mmio_needed = 0; | ||
349 | } else if (vcpu->arch.dcr_needed) { | ||
350 | if (!vcpu->arch.dcr_is_write) | ||
351 | kvmppc_complete_dcr_load(vcpu, run); | ||
352 | vcpu->arch.dcr_needed = 0; | ||
353 | } | ||
354 | |||
355 | kvmppc_check_and_deliver_interrupts(vcpu); | ||
356 | |||
357 | local_irq_disable(); | ||
358 | kvm_guest_enter(); | ||
359 | r = __kvmppc_vcpu_run(run, vcpu); | ||
360 | kvm_guest_exit(); | ||
361 | local_irq_enable(); | ||
362 | |||
363 | if (vcpu->sigset_active) | ||
364 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); | ||
365 | |||
366 | return r; | ||
367 | } | ||
368 | |||
369 | int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) | ||
370 | { | ||
371 | kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_EXTERNAL); | ||
372 | return 0; | ||
373 | } | ||
374 | |||
375 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, | ||
376 | struct kvm_mp_state *mp_state) | ||
377 | { | ||
378 | return -EINVAL; | ||
379 | } | ||
380 | |||
381 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | ||
382 | struct kvm_mp_state *mp_state) | ||
383 | { | ||
384 | return -EINVAL; | ||
385 | } | ||
386 | |||
387 | long kvm_arch_vcpu_ioctl(struct file *filp, | ||
388 | unsigned int ioctl, unsigned long arg) | ||
389 | { | ||
390 | struct kvm_vcpu *vcpu = filp->private_data; | ||
391 | void __user *argp = (void __user *)arg; | ||
392 | long r; | ||
393 | |||
394 | switch (ioctl) { | ||
395 | case KVM_INTERRUPT: { | ||
396 | struct kvm_interrupt irq; | ||
397 | r = -EFAULT; | ||
398 | if (copy_from_user(&irq, argp, sizeof(irq))) | ||
399 | goto out; | ||
400 | r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); | ||
401 | break; | ||
402 | } | ||
403 | default: | ||
404 | r = -EINVAL; | ||
405 | } | ||
406 | |||
407 | out: | ||
408 | return r; | ||
409 | } | ||
410 | |||
411 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) | ||
412 | { | ||
413 | return -ENOTSUPP; | ||
414 | } | ||
415 | |||
416 | long kvm_arch_vm_ioctl(struct file *filp, | ||
417 | unsigned int ioctl, unsigned long arg) | ||
418 | { | ||
419 | long r; | ||
420 | |||
421 | switch (ioctl) { | ||
422 | default: | ||
423 | r = -EINVAL; | ||
424 | } | ||
425 | |||
426 | return r; | ||
427 | } | ||
428 | |||
429 | int kvm_arch_init(void *opaque) | ||
430 | { | ||
431 | return 0; | ||
432 | } | ||
433 | |||
434 | void kvm_arch_exit(void) | ||
435 | { | ||
436 | } | ||
diff --git a/include/asm-powerpc/kvm.h b/include/asm-powerpc/kvm.h index d1b530fbf8dd..f993e4198d5c 100644 --- a/include/asm-powerpc/kvm.h +++ b/include/asm-powerpc/kvm.h | |||
@@ -1,6 +1,55 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright IBM Corp. 2007 | ||
16 | * | ||
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | ||
18 | */ | ||
19 | |||
1 | #ifndef __LINUX_KVM_POWERPC_H | 20 | #ifndef __LINUX_KVM_POWERPC_H |
2 | #define __LINUX_KVM_POWERPC_H | 21 | #define __LINUX_KVM_POWERPC_H |
3 | 22 | ||
4 | /* powerpc does not support KVM */ | 23 | #include <asm/types.h> |
24 | |||
25 | struct kvm_regs { | ||
26 | __u64 pc; | ||
27 | __u64 cr; | ||
28 | __u64 ctr; | ||
29 | __u64 lr; | ||
30 | __u64 xer; | ||
31 | __u64 msr; | ||
32 | __u64 srr0; | ||
33 | __u64 srr1; | ||
34 | __u64 pid; | ||
35 | |||
36 | __u64 sprg0; | ||
37 | __u64 sprg1; | ||
38 | __u64 sprg2; | ||
39 | __u64 sprg3; | ||
40 | __u64 sprg4; | ||
41 | __u64 sprg5; | ||
42 | __u64 sprg6; | ||
43 | __u64 sprg7; | ||
44 | |||
45 | __u64 gpr[32]; | ||
46 | }; | ||
47 | |||
48 | struct kvm_sregs { | ||
49 | }; | ||
50 | |||
51 | struct kvm_fpu { | ||
52 | __u64 fpr[32]; | ||
53 | }; | ||
5 | 54 | ||
6 | #endif | 55 | #endif /* __LINUX_KVM_POWERPC_H */ |
diff --git a/include/asm-powerpc/kvm_asm.h b/include/asm-powerpc/kvm_asm.h new file mode 100644 index 000000000000..2197764796d9 --- /dev/null +++ b/include/asm-powerpc/kvm_asm.h | |||
@@ -0,0 +1,55 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright IBM Corp. 2008 | ||
16 | * | ||
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | ||
18 | */ | ||
19 | |||
20 | #ifndef __POWERPC_KVM_ASM_H__ | ||
21 | #define __POWERPC_KVM_ASM_H__ | ||
22 | |||
23 | /* IVPR must be 64KiB-aligned. */ | ||
24 | #define VCPU_SIZE_ORDER 4 | ||
25 | #define VCPU_SIZE_LOG (VCPU_SIZE_ORDER + 12) | ||
26 | #define VCPU_TLB_PGSZ PPC44x_TLB_64K | ||
27 | #define VCPU_SIZE_BYTES (1<<VCPU_SIZE_LOG) | ||
28 | |||
29 | #define BOOKE_INTERRUPT_CRITICAL 0 | ||
30 | #define BOOKE_INTERRUPT_MACHINE_CHECK 1 | ||
31 | #define BOOKE_INTERRUPT_DATA_STORAGE 2 | ||
32 | #define BOOKE_INTERRUPT_INST_STORAGE 3 | ||
33 | #define BOOKE_INTERRUPT_EXTERNAL 4 | ||
34 | #define BOOKE_INTERRUPT_ALIGNMENT 5 | ||
35 | #define BOOKE_INTERRUPT_PROGRAM 6 | ||
36 | #define BOOKE_INTERRUPT_FP_UNAVAIL 7 | ||
37 | #define BOOKE_INTERRUPT_SYSCALL 8 | ||
38 | #define BOOKE_INTERRUPT_AP_UNAVAIL 9 | ||
39 | #define BOOKE_INTERRUPT_DECREMENTER 10 | ||
40 | #define BOOKE_INTERRUPT_FIT 11 | ||
41 | #define BOOKE_INTERRUPT_WATCHDOG 12 | ||
42 | #define BOOKE_INTERRUPT_DTLB_MISS 13 | ||
43 | #define BOOKE_INTERRUPT_ITLB_MISS 14 | ||
44 | #define BOOKE_INTERRUPT_DEBUG 15 | ||
45 | #define BOOKE_MAX_INTERRUPT 15 | ||
46 | |||
47 | #define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */ | ||
48 | #define RESUME_FLAG_HOST (1<<1) /* Resume host? */ | ||
49 | |||
50 | #define RESUME_GUEST 0 | ||
51 | #define RESUME_GUEST_NV RESUME_FLAG_NV | ||
52 | #define RESUME_HOST RESUME_FLAG_HOST | ||
53 | #define RESUME_HOST_NV (RESUME_FLAG_HOST|RESUME_FLAG_NV) | ||
54 | |||
55 | #endif /* __POWERPC_KVM_ASM_H__ */ | ||
diff --git a/include/asm-powerpc/kvm_host.h b/include/asm-powerpc/kvm_host.h new file mode 100644 index 000000000000..04ffbb8e0a35 --- /dev/null +++ b/include/asm-powerpc/kvm_host.h | |||
@@ -0,0 +1,152 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright IBM Corp. 2007 | ||
16 | * | ||
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | ||
18 | */ | ||
19 | |||
20 | #ifndef __POWERPC_KVM_HOST_H__ | ||
21 | #define __POWERPC_KVM_HOST_H__ | ||
22 | |||
23 | #include <linux/mutex.h> | ||
24 | #include <linux/timer.h> | ||
25 | #include <linux/types.h> | ||
26 | #include <linux/kvm_types.h> | ||
27 | #include <asm/kvm_asm.h> | ||
28 | |||
29 | #define KVM_MAX_VCPUS 1 | ||
30 | #define KVM_MEMORY_SLOTS 32 | ||
31 | /* memory slots that does not exposed to userspace */ | ||
32 | #define KVM_PRIVATE_MEM_SLOTS 4 | ||
33 | |||
34 | /* We don't currently support large pages. */ | ||
35 | #define KVM_PAGES_PER_HPAGE (1<<31) | ||
36 | |||
37 | struct kvm; | ||
38 | struct kvm_run; | ||
39 | struct kvm_vcpu; | ||
40 | |||
41 | struct kvm_vm_stat { | ||
42 | u32 remote_tlb_flush; | ||
43 | }; | ||
44 | |||
45 | struct kvm_vcpu_stat { | ||
46 | u32 sum_exits; | ||
47 | u32 mmio_exits; | ||
48 | u32 dcr_exits; | ||
49 | u32 signal_exits; | ||
50 | u32 light_exits; | ||
51 | /* Account for special types of light exits: */ | ||
52 | u32 itlb_real_miss_exits; | ||
53 | u32 itlb_virt_miss_exits; | ||
54 | u32 dtlb_real_miss_exits; | ||
55 | u32 dtlb_virt_miss_exits; | ||
56 | u32 syscall_exits; | ||
57 | u32 isi_exits; | ||
58 | u32 dsi_exits; | ||
59 | u32 emulated_inst_exits; | ||
60 | u32 dec_exits; | ||
61 | u32 ext_intr_exits; | ||
62 | }; | ||
63 | |||
64 | struct tlbe { | ||
65 | u32 tid; /* Only the low 8 bits are used. */ | ||
66 | u32 word0; | ||
67 | u32 word1; | ||
68 | u32 word2; | ||
69 | }; | ||
70 | |||
71 | struct kvm_arch { | ||
72 | }; | ||
73 | |||
74 | struct kvm_vcpu_arch { | ||
75 | /* Unmodified copy of the guest's TLB. */ | ||
76 | struct tlbe guest_tlb[PPC44x_TLB_SIZE]; | ||
77 | /* TLB that's actually used when the guest is running. */ | ||
78 | struct tlbe shadow_tlb[PPC44x_TLB_SIZE]; | ||
79 | /* Pages which are referenced in the shadow TLB. */ | ||
80 | struct page *shadow_pages[PPC44x_TLB_SIZE]; | ||
81 | /* Copy of the host's TLB. */ | ||
82 | struct tlbe host_tlb[PPC44x_TLB_SIZE]; | ||
83 | |||
84 | u32 host_stack; | ||
85 | u32 host_pid; | ||
86 | |||
87 | u64 fpr[32]; | ||
88 | u32 gpr[32]; | ||
89 | |||
90 | u32 pc; | ||
91 | u32 cr; | ||
92 | u32 ctr; | ||
93 | u32 lr; | ||
94 | u32 xer; | ||
95 | |||
96 | u32 msr; | ||
97 | u32 mmucr; | ||
98 | u32 sprg0; | ||
99 | u32 sprg1; | ||
100 | u32 sprg2; | ||
101 | u32 sprg3; | ||
102 | u32 sprg4; | ||
103 | u32 sprg5; | ||
104 | u32 sprg6; | ||
105 | u32 sprg7; | ||
106 | u32 srr0; | ||
107 | u32 srr1; | ||
108 | u32 csrr0; | ||
109 | u32 csrr1; | ||
110 | u32 dsrr0; | ||
111 | u32 dsrr1; | ||
112 | u32 dear; | ||
113 | u32 esr; | ||
114 | u32 dec; | ||
115 | u32 decar; | ||
116 | u32 tbl; | ||
117 | u32 tbu; | ||
118 | u32 tcr; | ||
119 | u32 tsr; | ||
120 | u32 ivor[16]; | ||
121 | u32 ivpr; | ||
122 | u32 pir; | ||
123 | u32 pid; | ||
124 | u32 pvr; | ||
125 | u32 ccr0; | ||
126 | u32 ccr1; | ||
127 | u32 dbcr0; | ||
128 | u32 dbcr1; | ||
129 | |||
130 | u32 last_inst; | ||
131 | u32 fault_dear; | ||
132 | u32 fault_esr; | ||
133 | gpa_t paddr_accessed; | ||
134 | |||
135 | u8 io_gpr; /* GPR used as IO source/target */ | ||
136 | u8 mmio_is_bigendian; | ||
137 | u8 dcr_needed; | ||
138 | u8 dcr_is_write; | ||
139 | |||
140 | u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */ | ||
141 | |||
142 | struct timer_list dec_timer; | ||
143 | unsigned long pending_exceptions; | ||
144 | }; | ||
145 | |||
146 | struct kvm_guest_debug { | ||
147 | int enabled; | ||
148 | unsigned long bp[4]; | ||
149 | int singlestep; | ||
150 | }; | ||
151 | |||
152 | #endif /* __POWERPC_KVM_HOST_H__ */ | ||
diff --git a/include/asm-powerpc/kvm_para.h b/include/asm-powerpc/kvm_para.h new file mode 100644 index 000000000000..2d48f6a63d0b --- /dev/null +++ b/include/asm-powerpc/kvm_para.h | |||
@@ -0,0 +1,37 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright IBM Corp. 2008 | ||
16 | * | ||
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | ||
18 | */ | ||
19 | |||
20 | #ifndef __POWERPC_KVM_PARA_H__ | ||
21 | #define __POWERPC_KVM_PARA_H__ | ||
22 | |||
23 | #ifdef __KERNEL__ | ||
24 | |||
25 | static inline int kvm_para_available(void) | ||
26 | { | ||
27 | return 0; | ||
28 | } | ||
29 | |||
30 | static inline unsigned int kvm_arch_para_features(void) | ||
31 | { | ||
32 | return 0; | ||
33 | } | ||
34 | |||
35 | #endif /* __KERNEL__ */ | ||
36 | |||
37 | #endif /* __POWERPC_KVM_PARA_H__ */ | ||
diff --git a/include/asm-powerpc/kvm_ppc.h b/include/asm-powerpc/kvm_ppc.h new file mode 100644 index 000000000000..7ac820308a7e --- /dev/null +++ b/include/asm-powerpc/kvm_ppc.h | |||
@@ -0,0 +1,88 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright IBM Corp. 2008 | ||
16 | * | ||
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | ||
18 | */ | ||
19 | |||
20 | #ifndef __POWERPC_KVM_PPC_H__ | ||
21 | #define __POWERPC_KVM_PPC_H__ | ||
22 | |||
23 | /* This file exists just so we can dereference kvm_vcpu, avoiding nested header | ||
24 | * dependencies. */ | ||
25 | |||
26 | #include <linux/mutex.h> | ||
27 | #include <linux/timer.h> | ||
28 | #include <linux/types.h> | ||
29 | #include <linux/kvm_types.h> | ||
30 | #include <linux/kvm_host.h> | ||
31 | |||
32 | struct kvm_tlb { | ||
33 | struct tlbe guest_tlb[PPC44x_TLB_SIZE]; | ||
34 | struct tlbe shadow_tlb[PPC44x_TLB_SIZE]; | ||
35 | }; | ||
36 | |||
37 | enum emulation_result { | ||
38 | EMULATE_DONE, /* no further processing */ | ||
39 | EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */ | ||
40 | EMULATE_DO_DCR, /* kvm_run filled with DCR request */ | ||
41 | EMULATE_FAIL, /* can't emulate this instruction */ | ||
42 | }; | ||
43 | |||
44 | extern const unsigned char exception_priority[]; | ||
45 | extern const unsigned char priority_exception[]; | ||
46 | |||
47 | extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); | ||
48 | extern char kvmppc_handlers_start[]; | ||
49 | extern unsigned long kvmppc_handler_len; | ||
50 | |||
51 | extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu); | ||
52 | extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, | ||
53 | unsigned int rt, unsigned int bytes, | ||
54 | int is_bigendian); | ||
55 | extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, | ||
56 | u32 val, unsigned int bytes, int is_bigendian); | ||
57 | |||
58 | extern int kvmppc_emulate_instruction(struct kvm_run *run, | ||
59 | struct kvm_vcpu *vcpu); | ||
60 | |||
61 | extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, | ||
62 | u64 asid, u32 flags); | ||
63 | extern void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, u64 eaddr, u64 asid); | ||
64 | extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); | ||
65 | |||
66 | extern void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu); | ||
67 | |||
68 | static inline void kvmppc_queue_exception(struct kvm_vcpu *vcpu, int exception) | ||
69 | { | ||
70 | unsigned int priority = exception_priority[exception]; | ||
71 | set_bit(priority, &vcpu->arch.pending_exceptions); | ||
72 | } | ||
73 | |||
74 | static inline void kvmppc_clear_exception(struct kvm_vcpu *vcpu, int exception) | ||
75 | { | ||
76 | unsigned int priority = exception_priority[exception]; | ||
77 | clear_bit(priority, &vcpu->arch.pending_exceptions); | ||
78 | } | ||
79 | |||
80 | static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) | ||
81 | { | ||
82 | if ((new_msr & MSR_PR) != (vcpu->arch.msr & MSR_PR)) | ||
83 | kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR); | ||
84 | |||
85 | vcpu->arch.msr = new_msr; | ||
86 | } | ||
87 | |||
88 | #endif /* __POWERPC_KVM_PPC_H__ */ | ||