diff options
-rw-r--r-- | arch/powerpc/include/asm/kvm_44x.h | 24 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kvm_ppc.h | 3 | ||||
-rw-r--r-- | arch/powerpc/kernel/asm-offsets.c | 6 | ||||
-rw-r--r-- | arch/powerpc/kvm/44x.c | 19 | ||||
-rw-r--r-- | arch/powerpc/kvm/44x_tlb.c | 256 | ||||
-rw-r--r-- | arch/powerpc/kvm/44x_tlb.h | 7 | ||||
-rw-r--r-- | arch/powerpc/kvm/booke.c | 26 | ||||
-rw-r--r-- | arch/powerpc/kvm/booke_interrupts.S | 48 |
8 files changed, 168 insertions, 221 deletions
diff --git a/arch/powerpc/include/asm/kvm_44x.h b/arch/powerpc/include/asm/kvm_44x.h index 72e593914adb..e770ea2bbb1c 100644 --- a/arch/powerpc/include/asm/kvm_44x.h +++ b/arch/powerpc/include/asm/kvm_44x.h | |||
@@ -22,19 +22,25 @@ | |||
22 | 22 | ||
23 | #include <linux/kvm_host.h> | 23 | #include <linux/kvm_host.h> |
24 | 24 | ||
25 | /* XXX Can't include mmu-44x.h because it redefines struct mm_context. */ | ||
26 | #define PPC44x_TLB_SIZE 64 | 25 | #define PPC44x_TLB_SIZE 64 |
27 | 26 | ||
27 | /* If the guest is expecting it, this can be as large as we like; we'd just | ||
28 | * need to find some way of advertising it. */ | ||
29 | #define KVM44x_GUEST_TLB_SIZE 64 | ||
30 | |||
31 | struct kvmppc_44x_shadow_ref { | ||
32 | struct page *page; | ||
33 | u16 gtlb_index; | ||
34 | u8 writeable; | ||
35 | u8 tid; | ||
36 | }; | ||
37 | |||
28 | struct kvmppc_vcpu_44x { | 38 | struct kvmppc_vcpu_44x { |
29 | /* Unmodified copy of the guest's TLB. */ | 39 | /* Unmodified copy of the guest's TLB. */ |
30 | struct kvmppc_44x_tlbe guest_tlb[PPC44x_TLB_SIZE]; | 40 | struct kvmppc_44x_tlbe guest_tlb[KVM44x_GUEST_TLB_SIZE]; |
31 | /* TLB that's actually used when the guest is running. */ | 41 | |
32 | struct kvmppc_44x_tlbe shadow_tlb[PPC44x_TLB_SIZE]; | 42 | /* References to guest pages in the hardware TLB. */ |
33 | /* Pages which are referenced in the shadow TLB. */ | 43 | struct kvmppc_44x_shadow_ref shadow_refs[PPC44x_TLB_SIZE]; |
34 | struct page *shadow_pages[PPC44x_TLB_SIZE]; | ||
35 | |||
36 | /* Track which TLB entries we've modified in the current exit. */ | ||
37 | u8 shadow_tlb_mod[PPC44x_TLB_SIZE]; | ||
38 | 44 | ||
39 | struct kvm_vcpu vcpu; | 45 | struct kvm_vcpu vcpu; |
40 | }; | 46 | }; |
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 5bb29267d6a6..36d2a50a8487 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h | |||
@@ -53,7 +53,8 @@ extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu); | |||
53 | extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu); | 53 | extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu); |
54 | 54 | ||
55 | extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, | 55 | extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, |
56 | u64 asid, u32 flags, u32 max_bytes); | 56 | u64 asid, u32 flags, u32 max_bytes, |
57 | unsigned int gtlb_idx); | ||
57 | extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); | 58 | extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); |
58 | extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid); | 59 | extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid); |
59 | 60 | ||
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 393c7f36a1e8..ba39526d3201 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -359,12 +359,6 @@ int main(void) | |||
359 | #ifdef CONFIG_KVM | 359 | #ifdef CONFIG_KVM |
360 | DEFINE(TLBE_BYTES, sizeof(struct kvmppc_44x_tlbe)); | 360 | DEFINE(TLBE_BYTES, sizeof(struct kvmppc_44x_tlbe)); |
361 | 361 | ||
362 | DEFINE(VCPU_TO_44X, offsetof(struct kvmppc_vcpu_44x, vcpu)); | ||
363 | DEFINE(VCPU44x_SHADOW_TLB, | ||
364 | offsetof(struct kvmppc_vcpu_44x, shadow_tlb)); | ||
365 | DEFINE(VCPU44x_SHADOW_MOD, | ||
366 | offsetof(struct kvmppc_vcpu_44x, shadow_tlb_mod)); | ||
367 | |||
368 | DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); | 362 | DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); |
369 | DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); | 363 | DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); |
370 | DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); | 364 | DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); |
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c index 22054b164b5a..05d72fc8b478 100644 --- a/arch/powerpc/kvm/44x.c +++ b/arch/powerpc/kvm/44x.c | |||
@@ -96,21 +96,14 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu) | |||
96 | 96 | ||
97 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 97 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
98 | { | 98 | { |
99 | int i; | ||
100 | |||
101 | /* Mark every guest entry in the shadow TLB entry modified, so that they | ||
102 | * will all be reloaded on the next vcpu run (instead of being | ||
103 | * demand-faulted). */ | ||
104 | for (i = 0; i <= tlb_44x_hwater; i++) | ||
105 | kvmppc_tlbe_set_modified(vcpu, i); | ||
106 | } | 99 | } |
107 | 100 | ||
108 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | 101 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) |
109 | { | 102 | { |
110 | /* Don't leave guest TLB entries resident when being de-scheduled. */ | 103 | /* XXX Since every guest uses TS=1 TID=0/1 mappings, we can't leave any TLB |
111 | /* XXX It would be nice to differentiate between heavyweight exit and | 104 | * entries around when we're descheduled, so we must completely flush the |
112 | * sched_out here, since we could avoid the TLB flush for heavyweight | 105 | * TLB of all guest mappings. On the other hand, if there is only one |
113 | * exits. */ | 106 | * guest, this flush is completely unnecessary. */ |
114 | _tlbia(); | 107 | _tlbia(); |
115 | } | 108 | } |
116 | 109 | ||
@@ -130,6 +123,7 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) | |||
130 | { | 123 | { |
131 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | 124 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); |
132 | struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[0]; | 125 | struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[0]; |
126 | int i; | ||
133 | 127 | ||
134 | tlbe->tid = 0; | 128 | tlbe->tid = 0; |
135 | tlbe->word0 = PPC44x_TLB_16M | PPC44x_TLB_VALID; | 129 | tlbe->word0 = PPC44x_TLB_16M | PPC44x_TLB_VALID; |
@@ -148,6 +142,9 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) | |||
148 | * CCR1[TCS]. */ | 142 | * CCR1[TCS]. */ |
149 | vcpu->arch.ccr1 = mfspr(SPRN_CCR1); | 143 | vcpu->arch.ccr1 = mfspr(SPRN_CCR1); |
150 | 144 | ||
145 | for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) | ||
146 | vcpu_44x->shadow_refs[i].gtlb_index = -1; | ||
147 | |||
151 | return 0; | 148 | return 0; |
152 | } | 149 | } |
153 | 150 | ||
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c index d49dc66ab3c3..2981ebea3d1f 100644 --- a/arch/powerpc/kvm/44x_tlb.c +++ b/arch/powerpc/kvm/44x_tlb.c | |||
@@ -22,6 +22,8 @@ | |||
22 | #include <linux/kvm.h> | 22 | #include <linux/kvm.h> |
23 | #include <linux/kvm_host.h> | 23 | #include <linux/kvm_host.h> |
24 | #include <linux/highmem.h> | 24 | #include <linux/highmem.h> |
25 | |||
26 | #include <asm/tlbflush.h> | ||
25 | #include <asm/mmu-44x.h> | 27 | #include <asm/mmu-44x.h> |
26 | #include <asm/kvm_ppc.h> | 28 | #include <asm/kvm_ppc.h> |
27 | #include <asm/kvm_44x.h> | 29 | #include <asm/kvm_44x.h> |
@@ -40,8 +42,6 @@ | |||
40 | #define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW) | 42 | #define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW) |
41 | #define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW) | 43 | #define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW) |
42 | 44 | ||
43 | static unsigned int kvmppc_tlb_44x_pos; | ||
44 | |||
45 | #ifdef DEBUG | 45 | #ifdef DEBUG |
46 | void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu) | 46 | void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu) |
47 | { | 47 | { |
@@ -52,24 +52,49 @@ void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu) | |||
52 | printk("| %2s | %3s | %8s | %8s | %8s |\n", | 52 | printk("| %2s | %3s | %8s | %8s | %8s |\n", |
53 | "nr", "tid", "word0", "word1", "word2"); | 53 | "nr", "tid", "word0", "word1", "word2"); |
54 | 54 | ||
55 | for (i = 0; i < PPC44x_TLB_SIZE; i++) { | 55 | for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) { |
56 | tlbe = &vcpu_44x->guest_tlb[i]; | 56 | tlbe = &vcpu_44x->guest_tlb[i]; |
57 | if (tlbe->word0 & PPC44x_TLB_VALID) | 57 | if (tlbe->word0 & PPC44x_TLB_VALID) |
58 | printk(" G%2d | %02X | %08X | %08X | %08X |\n", | 58 | printk(" G%2d | %02X | %08X | %08X | %08X |\n", |
59 | i, tlbe->tid, tlbe->word0, tlbe->word1, | 59 | i, tlbe->tid, tlbe->word0, tlbe->word1, |
60 | tlbe->word2); | 60 | tlbe->word2); |
61 | } | 61 | } |
62 | |||
63 | for (i = 0; i < PPC44x_TLB_SIZE; i++) { | ||
64 | tlbe = &vcpu_44x->shadow_tlb[i]; | ||
65 | if (tlbe->word0 & PPC44x_TLB_VALID) | ||
66 | printk(" S%2d | %02X | %08X | %08X | %08X |\n", | ||
67 | i, tlbe->tid, tlbe->word0, tlbe->word1, | ||
68 | tlbe->word2); | ||
69 | } | ||
70 | } | 62 | } |
71 | #endif | 63 | #endif |
72 | 64 | ||
65 | static inline void kvmppc_44x_tlbie(unsigned int index) | ||
66 | { | ||
67 | /* 0 <= index < 64, so the V bit is clear and we can use the index as | ||
68 | * word0. */ | ||
69 | asm volatile( | ||
70 | "tlbwe %[index], %[index], 0\n" | ||
71 | : | ||
72 | : [index] "r"(index) | ||
73 | ); | ||
74 | } | ||
75 | |||
76 | static inline void kvmppc_44x_tlbwe(unsigned int index, | ||
77 | struct kvmppc_44x_tlbe *stlbe) | ||
78 | { | ||
79 | unsigned long tmp; | ||
80 | |||
81 | asm volatile( | ||
82 | "mfspr %[tmp], %[sprn_mmucr]\n" | ||
83 | "rlwimi %[tmp], %[tid], 0, 0xff\n" | ||
84 | "mtspr %[sprn_mmucr], %[tmp]\n" | ||
85 | "tlbwe %[word0], %[index], 0\n" | ||
86 | "tlbwe %[word1], %[index], 1\n" | ||
87 | "tlbwe %[word2], %[index], 2\n" | ||
88 | : [tmp] "=&r"(tmp) | ||
89 | : [word0] "r"(stlbe->word0), | ||
90 | [word1] "r"(stlbe->word1), | ||
91 | [word2] "r"(stlbe->word2), | ||
92 | [tid] "r"(stlbe->tid), | ||
93 | [index] "r"(index), | ||
94 | [sprn_mmucr] "i"(SPRN_MMUCR) | ||
95 | ); | ||
96 | } | ||
97 | |||
73 | static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode) | 98 | static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode) |
74 | { | 99 | { |
75 | /* We only care about the guest's permission and user bits. */ | 100 | /* We only care about the guest's permission and user bits. */ |
@@ -99,7 +124,7 @@ int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid, | |||
99 | int i; | 124 | int i; |
100 | 125 | ||
101 | /* XXX Replace loop with fancy data structures. */ | 126 | /* XXX Replace loop with fancy data structures. */ |
102 | for (i = 0; i < PPC44x_TLB_SIZE; i++) { | 127 | for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) { |
103 | struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[i]; | 128 | struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[i]; |
104 | unsigned int tid; | 129 | unsigned int tid; |
105 | 130 | ||
@@ -125,65 +150,53 @@ int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid, | |||
125 | return -1; | 150 | return -1; |
126 | } | 151 | } |
127 | 152 | ||
128 | struct kvmppc_44x_tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu, | 153 | int kvmppc_44x_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) |
129 | gva_t eaddr) | ||
130 | { | 154 | { |
131 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
132 | unsigned int as = !!(vcpu->arch.msr & MSR_IS); | 155 | unsigned int as = !!(vcpu->arch.msr & MSR_IS); |
133 | unsigned int index; | ||
134 | 156 | ||
135 | index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); | 157 | return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); |
136 | if (index == -1) | ||
137 | return NULL; | ||
138 | return &vcpu_44x->guest_tlb[index]; | ||
139 | } | 158 | } |
140 | 159 | ||
141 | struct kvmppc_44x_tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu, | 160 | int kvmppc_44x_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) |
142 | gva_t eaddr) | ||
143 | { | 161 | { |
144 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
145 | unsigned int as = !!(vcpu->arch.msr & MSR_DS); | 162 | unsigned int as = !!(vcpu->arch.msr & MSR_DS); |
146 | unsigned int index; | ||
147 | 163 | ||
148 | index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); | 164 | return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); |
149 | if (index == -1) | ||
150 | return NULL; | ||
151 | return &vcpu_44x->guest_tlb[index]; | ||
152 | } | 165 | } |
153 | 166 | ||
154 | static int kvmppc_44x_tlbe_is_writable(struct kvmppc_44x_tlbe *tlbe) | 167 | static void kvmppc_44x_shadow_release(struct kvmppc_vcpu_44x *vcpu_44x, |
168 | unsigned int stlb_index) | ||
155 | { | 169 | { |
156 | return tlbe->word2 & (PPC44x_TLB_SW|PPC44x_TLB_UW); | 170 | struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[stlb_index]; |
157 | } | ||
158 | 171 | ||
159 | static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu, | 172 | if (!ref->page) |
160 | unsigned int index) | 173 | return; |
161 | { | ||
162 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
163 | struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[index]; | ||
164 | struct page *page = vcpu_44x->shadow_pages[index]; | ||
165 | 174 | ||
166 | if (get_tlb_v(stlbe)) { | 175 | /* Discard from the TLB. */ |
167 | if (kvmppc_44x_tlbe_is_writable(stlbe)) | 176 | /* Note: we could actually invalidate a host mapping, if the host overwrote |
168 | kvm_release_page_dirty(page); | 177 | * this TLB entry since we inserted a guest mapping. */ |
169 | else | 178 | kvmppc_44x_tlbie(stlb_index); |
170 | kvm_release_page_clean(page); | ||
171 | } | ||
172 | } | ||
173 | 179 | ||
174 | void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu) | 180 | /* Now release the page. */ |
175 | { | 181 | if (ref->writeable) |
176 | int i; | 182 | kvm_release_page_dirty(ref->page); |
183 | else | ||
184 | kvm_release_page_clean(ref->page); | ||
177 | 185 | ||
178 | for (i = 0; i <= tlb_44x_hwater; i++) | 186 | ref->page = NULL; |
179 | kvmppc_44x_shadow_release(vcpu, i); | 187 | |
188 | /* XXX set tlb_44x_index to stlb_index? */ | ||
189 | |||
190 | KVMTRACE_1D(STLB_INVAL, &vcpu_44x->vcpu, stlb_index, handler); | ||
180 | } | 191 | } |
181 | 192 | ||
182 | void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i) | 193 | void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu) |
183 | { | 194 | { |
184 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | 195 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); |
196 | int i; | ||
185 | 197 | ||
186 | vcpu_44x->shadow_tlb_mod[i] = 1; | 198 | for (i = 0; i <= tlb_44x_hwater; i++) |
199 | kvmppc_44x_shadow_release(vcpu_44x, i); | ||
187 | } | 200 | } |
188 | 201 | ||
189 | /** | 202 | /** |
@@ -199,21 +212,24 @@ void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i) | |||
199 | * the shadow TLB. | 212 | * the shadow TLB. |
200 | */ | 213 | */ |
201 | void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, u64 asid, | 214 | void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, u64 asid, |
202 | u32 flags, u32 max_bytes) | 215 | u32 flags, u32 max_bytes, unsigned int gtlb_index) |
203 | { | 216 | { |
217 | struct kvmppc_44x_tlbe stlbe; | ||
204 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | 218 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); |
219 | struct kvmppc_44x_shadow_ref *ref; | ||
205 | struct page *new_page; | 220 | struct page *new_page; |
206 | struct kvmppc_44x_tlbe *stlbe; | ||
207 | hpa_t hpaddr; | 221 | hpa_t hpaddr; |
208 | gfn_t gfn; | 222 | gfn_t gfn; |
209 | unsigned int victim; | 223 | unsigned int victim; |
210 | 224 | ||
211 | /* Future optimization: don't overwrite the TLB entry containing the | 225 | /* Select TLB entry to clobber. Indirectly guard against races with the TLB |
212 | * current PC (or stack?). */ | 226 | * miss handler by disabling interrupts. */ |
213 | victim = kvmppc_tlb_44x_pos++; | 227 | local_irq_disable(); |
214 | if (kvmppc_tlb_44x_pos > tlb_44x_hwater) | 228 | victim = ++tlb_44x_index; |
215 | kvmppc_tlb_44x_pos = 0; | 229 | if (victim > tlb_44x_hwater) |
216 | stlbe = &vcpu_44x->shadow_tlb[victim]; | 230 | victim = 0; |
231 | tlb_44x_index = victim; | ||
232 | local_irq_enable(); | ||
217 | 233 | ||
218 | /* Get reference to new page. */ | 234 | /* Get reference to new page. */ |
219 | gfn = gpaddr >> PAGE_SHIFT; | 235 | gfn = gpaddr >> PAGE_SHIFT; |
@@ -225,10 +241,8 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, u64 asid, | |||
225 | } | 241 | } |
226 | hpaddr = page_to_phys(new_page); | 242 | hpaddr = page_to_phys(new_page); |
227 | 243 | ||
228 | /* Drop reference to old page. */ | 244 | /* Invalidate any previous shadow mappings. */ |
229 | kvmppc_44x_shadow_release(vcpu, victim); | 245 | kvmppc_44x_shadow_release(vcpu_44x, victim); |
230 | |||
231 | vcpu_44x->shadow_pages[victim] = new_page; | ||
232 | 246 | ||
233 | /* XXX Make sure (va, size) doesn't overlap any other | 247 | /* XXX Make sure (va, size) doesn't overlap any other |
234 | * entries. 440x6 user manual says the result would be | 248 | * entries. 440x6 user manual says the result would be |
@@ -236,21 +250,19 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, u64 asid, | |||
236 | 250 | ||
237 | /* XXX what about AS? */ | 251 | /* XXX what about AS? */ |
238 | 252 | ||
239 | stlbe->tid = !(asid & 0xff); | ||
240 | |||
241 | /* Force TS=1 for all guest mappings. */ | 253 | /* Force TS=1 for all guest mappings. */ |
242 | stlbe->word0 = PPC44x_TLB_VALID | PPC44x_TLB_TS; | 254 | stlbe.word0 = PPC44x_TLB_VALID | PPC44x_TLB_TS; |
243 | 255 | ||
244 | if (max_bytes >= PAGE_SIZE) { | 256 | if (max_bytes >= PAGE_SIZE) { |
245 | /* Guest mapping is larger than or equal to host page size. We can use | 257 | /* Guest mapping is larger than or equal to host page size. We can use |
246 | * a "native" host mapping. */ | 258 | * a "native" host mapping. */ |
247 | stlbe->word0 |= (gvaddr & PAGE_MASK) | PPC44x_TLBE_SIZE; | 259 | stlbe.word0 |= (gvaddr & PAGE_MASK) | PPC44x_TLBE_SIZE; |
248 | } else { | 260 | } else { |
249 | /* Guest mapping is smaller than host page size. We must restrict the | 261 | /* Guest mapping is smaller than host page size. We must restrict the |
250 | * size of the mapping to be at most the smaller of the two, but for | 262 | * size of the mapping to be at most the smaller of the two, but for |
251 | * simplicity we fall back to a 4K mapping (this is probably what the | 263 | * simplicity we fall back to a 4K mapping (this is probably what the |
252 | * guest is using anyways). */ | 264 | * guest is using anyways). */ |
253 | stlbe->word0 |= (gvaddr & PAGE_MASK_4K) | PPC44x_TLB_4K; | 265 | stlbe.word0 |= (gvaddr & PAGE_MASK_4K) | PPC44x_TLB_4K; |
254 | 266 | ||
255 | /* 'hpaddr' is a host page, which is larger than the mapping we're | 267 | /* 'hpaddr' is a host page, which is larger than the mapping we're |
256 | * inserting here. To compensate, we must add the in-page offset to the | 268 | * inserting here. To compensate, we must add the in-page offset to the |
@@ -258,47 +270,36 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, u64 asid, | |||
258 | hpaddr |= gpaddr & (PAGE_MASK ^ PAGE_MASK_4K); | 270 | hpaddr |= gpaddr & (PAGE_MASK ^ PAGE_MASK_4K); |
259 | } | 271 | } |
260 | 272 | ||
261 | stlbe->word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf); | 273 | stlbe.word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf); |
262 | stlbe->word2 = kvmppc_44x_tlb_shadow_attrib(flags, | 274 | stlbe.word2 = kvmppc_44x_tlb_shadow_attrib(flags, |
263 | vcpu->arch.msr & MSR_PR); | 275 | vcpu->arch.msr & MSR_PR); |
264 | kvmppc_tlbe_set_modified(vcpu, victim); | 276 | stlbe.tid = !(asid & 0xff); |
265 | 277 | ||
266 | KVMTRACE_5D(STLB_WRITE, vcpu, victim, | 278 | /* Keep track of the reference so we can properly release it later. */ |
267 | stlbe->tid, stlbe->word0, stlbe->word1, stlbe->word2, | 279 | ref = &vcpu_44x->shadow_refs[victim]; |
268 | handler); | 280 | ref->page = new_page; |
281 | ref->gtlb_index = gtlb_index; | ||
282 | ref->writeable = !!(stlbe.word2 & PPC44x_TLB_UW); | ||
283 | ref->tid = stlbe.tid; | ||
284 | |||
285 | /* Insert shadow mapping into hardware TLB. */ | ||
286 | kvmppc_44x_tlbwe(victim, &stlbe); | ||
287 | KVMTRACE_5D(STLB_WRITE, vcpu, victim, stlbe.tid, stlbe.word0, stlbe.word1, | ||
288 | stlbe.word2, handler); | ||
269 | } | 289 | } |
270 | 290 | ||
271 | static void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr, | 291 | /* For a particular guest TLB entry, invalidate the corresponding host TLB |
272 | gva_t eend, u32 asid) | 292 | * mappings and release the host pages. */ |
293 | static void kvmppc_44x_invalidate(struct kvm_vcpu *vcpu, | ||
294 | unsigned int gtlb_index) | ||
273 | { | 295 | { |
274 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | 296 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); |
275 | unsigned int pid = !(asid & 0xff); | ||
276 | int i; | 297 | int i; |
277 | 298 | ||
278 | /* XXX Replace loop with fancy data structures. */ | 299 | for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) { |
279 | for (i = 0; i <= tlb_44x_hwater; i++) { | 300 | struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i]; |
280 | struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i]; | 301 | if (ref->gtlb_index == gtlb_index) |
281 | unsigned int tid; | 302 | kvmppc_44x_shadow_release(vcpu_44x, i); |
282 | |||
283 | if (!get_tlb_v(stlbe)) | ||
284 | continue; | ||
285 | |||
286 | if (eend < get_tlb_eaddr(stlbe)) | ||
287 | continue; | ||
288 | |||
289 | if (eaddr > get_tlb_end(stlbe)) | ||
290 | continue; | ||
291 | |||
292 | tid = get_tlb_tid(stlbe); | ||
293 | if (tid && (tid != pid)) | ||
294 | continue; | ||
295 | |||
296 | kvmppc_44x_shadow_release(vcpu, i); | ||
297 | stlbe->word0 = 0; | ||
298 | kvmppc_tlbe_set_modified(vcpu, i); | ||
299 | KVMTRACE_5D(STLB_INVAL, vcpu, i, | ||
300 | stlbe->tid, stlbe->word0, stlbe->word1, | ||
301 | stlbe->word2, handler); | ||
302 | } | 303 | } |
303 | } | 304 | } |
304 | 305 | ||
@@ -321,14 +322,11 @@ void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid) | |||
321 | * can't access guest kernel mappings (TID=1). When we switch to a new | 322 | * can't access guest kernel mappings (TID=1). When we switch to a new |
322 | * guest PID, which will also use host PID=0, we must discard the old guest | 323 | * guest PID, which will also use host PID=0, we must discard the old guest |
323 | * userspace mappings. */ | 324 | * userspace mappings. */ |
324 | for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_tlb); i++) { | 325 | for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) { |
325 | struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i]; | 326 | struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i]; |
326 | 327 | ||
327 | if (get_tlb_tid(stlbe) == 0) { | 328 | if (ref->tid == 0) |
328 | kvmppc_44x_shadow_release(vcpu, i); | 329 | kvmppc_44x_shadow_release(vcpu_44x, i); |
329 | stlbe->word0 = 0; | ||
330 | kvmppc_tlbe_set_modified(vcpu, i); | ||
331 | } | ||
332 | } | 330 | } |
333 | } | 331 | } |
334 | 332 | ||
@@ -356,26 +354,21 @@ static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu, | |||
356 | int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) | 354 | int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) |
357 | { | 355 | { |
358 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | 356 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); |
359 | gva_t eaddr; | ||
360 | u64 asid; | ||
361 | struct kvmppc_44x_tlbe *tlbe; | 357 | struct kvmppc_44x_tlbe *tlbe; |
362 | unsigned int index; | 358 | unsigned int gtlb_index; |
363 | 359 | ||
364 | index = vcpu->arch.gpr[ra]; | 360 | gtlb_index = vcpu->arch.gpr[ra]; |
365 | if (index > PPC44x_TLB_SIZE) { | 361 | if (gtlb_index > KVM44x_GUEST_TLB_SIZE) { |
366 | printk("%s: index %d\n", __func__, index); | 362 | printk("%s: index %d\n", __func__, gtlb_index); |
367 | kvmppc_dump_vcpu(vcpu); | 363 | kvmppc_dump_vcpu(vcpu); |
368 | return EMULATE_FAIL; | 364 | return EMULATE_FAIL; |
369 | } | 365 | } |
370 | 366 | ||
371 | tlbe = &vcpu_44x->guest_tlb[index]; | 367 | tlbe = &vcpu_44x->guest_tlb[gtlb_index]; |
372 | 368 | ||
373 | /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ | 369 | /* Invalidate shadow mappings for the about-to-be-clobbered TLB entry. */ |
374 | if (tlbe->word0 & PPC44x_TLB_VALID) { | 370 | if (tlbe->word0 & PPC44x_TLB_VALID) |
375 | eaddr = get_tlb_eaddr(tlbe); | 371 | kvmppc_44x_invalidate(vcpu, gtlb_index); |
376 | asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid; | ||
377 | kvmppc_mmu_invalidate(vcpu, eaddr, get_tlb_end(tlbe), asid); | ||
378 | } | ||
379 | 372 | ||
380 | switch (ws) { | 373 | switch (ws) { |
381 | case PPC44x_TLB_PAGEID: | 374 | case PPC44x_TLB_PAGEID: |
@@ -396,6 +389,8 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) | |||
396 | } | 389 | } |
397 | 390 | ||
398 | if (tlbe_is_host_safe(vcpu, tlbe)) { | 391 | if (tlbe_is_host_safe(vcpu, tlbe)) { |
392 | u64 asid; | ||
393 | gva_t eaddr; | ||
399 | gpa_t gpaddr; | 394 | gpa_t gpaddr; |
400 | u32 flags; | 395 | u32 flags; |
401 | u32 bytes; | 396 | u32 bytes; |
@@ -411,12 +406,11 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) | |||
411 | asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid; | 406 | asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid; |
412 | flags = tlbe->word2 & 0xffff; | 407 | flags = tlbe->word2 & 0xffff; |
413 | 408 | ||
414 | kvmppc_mmu_map(vcpu, eaddr, gpaddr, asid, flags, bytes); | 409 | kvmppc_mmu_map(vcpu, eaddr, gpaddr, asid, flags, bytes, gtlb_index); |
415 | } | 410 | } |
416 | 411 | ||
417 | KVMTRACE_5D(GTLB_WRITE, vcpu, index, | 412 | KVMTRACE_5D(GTLB_WRITE, vcpu, gtlb_index, tlbe->tid, tlbe->word0, |
418 | tlbe->tid, tlbe->word0, tlbe->word1, tlbe->word2, | 413 | tlbe->word1, tlbe->word2, handler); |
419 | handler); | ||
420 | 414 | ||
421 | return EMULATE_DONE; | 415 | return EMULATE_DONE; |
422 | } | 416 | } |
@@ -424,7 +418,7 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) | |||
424 | int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc) | 418 | int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc) |
425 | { | 419 | { |
426 | u32 ea; | 420 | u32 ea; |
427 | int index; | 421 | int gtlb_index; |
428 | unsigned int as = get_mmucr_sts(vcpu); | 422 | unsigned int as = get_mmucr_sts(vcpu); |
429 | unsigned int pid = get_mmucr_stid(vcpu); | 423 | unsigned int pid = get_mmucr_stid(vcpu); |
430 | 424 | ||
@@ -432,14 +426,14 @@ int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc) | |||
432 | if (ra) | 426 | if (ra) |
433 | ea += vcpu->arch.gpr[ra]; | 427 | ea += vcpu->arch.gpr[ra]; |
434 | 428 | ||
435 | index = kvmppc_44x_tlb_index(vcpu, ea, pid, as); | 429 | gtlb_index = kvmppc_44x_tlb_index(vcpu, ea, pid, as); |
436 | if (rc) { | 430 | if (rc) { |
437 | if (index < 0) | 431 | if (gtlb_index < 0) |
438 | vcpu->arch.cr &= ~0x20000000; | 432 | vcpu->arch.cr &= ~0x20000000; |
439 | else | 433 | else |
440 | vcpu->arch.cr |= 0x20000000; | 434 | vcpu->arch.cr |= 0x20000000; |
441 | } | 435 | } |
442 | vcpu->arch.gpr[rt] = index; | 436 | vcpu->arch.gpr[rt] = gtlb_index; |
443 | 437 | ||
444 | return EMULATE_DONE; | 438 | return EMULATE_DONE; |
445 | } | 439 | } |
diff --git a/arch/powerpc/kvm/44x_tlb.h b/arch/powerpc/kvm/44x_tlb.h index b1029af3de20..772191f29e62 100644 --- a/arch/powerpc/kvm/44x_tlb.h +++ b/arch/powerpc/kvm/44x_tlb.h | |||
@@ -25,11 +25,8 @@ | |||
25 | 25 | ||
26 | extern int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, | 26 | extern int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, |
27 | unsigned int pid, unsigned int as); | 27 | unsigned int pid, unsigned int as); |
28 | extern struct kvmppc_44x_tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu, | 28 | extern int kvmppc_44x_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); |
29 | gva_t eaddr); | 29 | extern int kvmppc_44x_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); |
30 | extern struct kvmppc_44x_tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu, | ||
31 | gva_t eaddr); | ||
32 | extern void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i); | ||
33 | 30 | ||
34 | extern int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, | 31 | extern int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, |
35 | u8 rc); | 32 | u8 rc); |
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 924c7b4b1107..eb24383c87d2 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c | |||
@@ -24,10 +24,12 @@ | |||
24 | #include <linux/module.h> | 24 | #include <linux/module.h> |
25 | #include <linux/vmalloc.h> | 25 | #include <linux/vmalloc.h> |
26 | #include <linux/fs.h> | 26 | #include <linux/fs.h> |
27 | |||
27 | #include <asm/cputable.h> | 28 | #include <asm/cputable.h> |
28 | #include <asm/uaccess.h> | 29 | #include <asm/uaccess.h> |
29 | #include <asm/kvm_ppc.h> | 30 | #include <asm/kvm_ppc.h> |
30 | #include <asm/cacheflush.h> | 31 | #include <asm/cacheflush.h> |
32 | #include <asm/kvm_44x.h> | ||
31 | 33 | ||
32 | #include "booke.h" | 34 | #include "booke.h" |
33 | #include "44x_tlb.h" | 35 | #include "44x_tlb.h" |
@@ -207,10 +209,6 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
207 | * handled this interrupt the moment we enabled interrupts. | 209 | * handled this interrupt the moment we enabled interrupts. |
208 | * Now we just offer it a chance to reschedule the guest. */ | 210 | * Now we just offer it a chance to reschedule the guest. */ |
209 | 211 | ||
210 | /* XXX At this point the TLB still holds our shadow TLB, so if | ||
211 | * we do reschedule the host will fault over it. Perhaps we | ||
212 | * should politely restore the host's entries to minimize | ||
213 | * misses before ceding control. */ | ||
214 | vcpu->stat.dec_exits++; | 212 | vcpu->stat.dec_exits++; |
215 | if (need_resched()) | 213 | if (need_resched()) |
216 | cond_resched(); | 214 | cond_resched(); |
@@ -281,14 +279,17 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
281 | r = RESUME_GUEST; | 279 | r = RESUME_GUEST; |
282 | break; | 280 | break; |
283 | 281 | ||
282 | /* XXX move to a 440-specific file. */ | ||
284 | case BOOKE_INTERRUPT_DTLB_MISS: { | 283 | case BOOKE_INTERRUPT_DTLB_MISS: { |
284 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
285 | struct kvmppc_44x_tlbe *gtlbe; | 285 | struct kvmppc_44x_tlbe *gtlbe; |
286 | unsigned long eaddr = vcpu->arch.fault_dear; | 286 | unsigned long eaddr = vcpu->arch.fault_dear; |
287 | int gtlb_index; | ||
287 | gfn_t gfn; | 288 | gfn_t gfn; |
288 | 289 | ||
289 | /* Check the guest TLB. */ | 290 | /* Check the guest TLB. */ |
290 | gtlbe = kvmppc_44x_dtlb_search(vcpu, eaddr); | 291 | gtlb_index = kvmppc_44x_dtlb_index(vcpu, eaddr); |
291 | if (!gtlbe) { | 292 | if (gtlb_index < 0) { |
292 | /* The guest didn't have a mapping for it. */ | 293 | /* The guest didn't have a mapping for it. */ |
293 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS); | 294 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS); |
294 | vcpu->arch.dear = vcpu->arch.fault_dear; | 295 | vcpu->arch.dear = vcpu->arch.fault_dear; |
@@ -298,6 +299,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
298 | break; | 299 | break; |
299 | } | 300 | } |
300 | 301 | ||
302 | gtlbe = &vcpu_44x->guest_tlb[gtlb_index]; | ||
301 | vcpu->arch.paddr_accessed = tlb_xlate(gtlbe, eaddr); | 303 | vcpu->arch.paddr_accessed = tlb_xlate(gtlbe, eaddr); |
302 | gfn = vcpu->arch.paddr_accessed >> PAGE_SHIFT; | 304 | gfn = vcpu->arch.paddr_accessed >> PAGE_SHIFT; |
303 | 305 | ||
@@ -309,7 +311,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
309 | * Either way, we need to satisfy the fault without | 311 | * Either way, we need to satisfy the fault without |
310 | * invoking the guest. */ | 312 | * invoking the guest. */ |
311 | kvmppc_mmu_map(vcpu, eaddr, vcpu->arch.paddr_accessed, gtlbe->tid, | 313 | kvmppc_mmu_map(vcpu, eaddr, vcpu->arch.paddr_accessed, gtlbe->tid, |
312 | gtlbe->word2, get_tlb_bytes(gtlbe)); | 314 | gtlbe->word2, get_tlb_bytes(gtlbe), gtlb_index); |
313 | vcpu->stat.dtlb_virt_miss_exits++; | 315 | vcpu->stat.dtlb_virt_miss_exits++; |
314 | r = RESUME_GUEST; | 316 | r = RESUME_GUEST; |
315 | } else { | 317 | } else { |
@@ -322,17 +324,20 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
322 | break; | 324 | break; |
323 | } | 325 | } |
324 | 326 | ||
327 | /* XXX move to a 440-specific file. */ | ||
325 | case BOOKE_INTERRUPT_ITLB_MISS: { | 328 | case BOOKE_INTERRUPT_ITLB_MISS: { |
329 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
326 | struct kvmppc_44x_tlbe *gtlbe; | 330 | struct kvmppc_44x_tlbe *gtlbe; |
327 | unsigned long eaddr = vcpu->arch.pc; | 331 | unsigned long eaddr = vcpu->arch.pc; |
328 | gpa_t gpaddr; | 332 | gpa_t gpaddr; |
329 | gfn_t gfn; | 333 | gfn_t gfn; |
334 | int gtlb_index; | ||
330 | 335 | ||
331 | r = RESUME_GUEST; | 336 | r = RESUME_GUEST; |
332 | 337 | ||
333 | /* Check the guest TLB. */ | 338 | /* Check the guest TLB. */ |
334 | gtlbe = kvmppc_44x_itlb_search(vcpu, eaddr); | 339 | gtlb_index = kvmppc_44x_itlb_index(vcpu, eaddr); |
335 | if (!gtlbe) { | 340 | if (gtlb_index < 0) { |
336 | /* The guest didn't have a mapping for it. */ | 341 | /* The guest didn't have a mapping for it. */ |
337 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS); | 342 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS); |
338 | vcpu->stat.itlb_real_miss_exits++; | 343 | vcpu->stat.itlb_real_miss_exits++; |
@@ -341,6 +346,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
341 | 346 | ||
342 | vcpu->stat.itlb_virt_miss_exits++; | 347 | vcpu->stat.itlb_virt_miss_exits++; |
343 | 348 | ||
349 | gtlbe = &vcpu_44x->guest_tlb[gtlb_index]; | ||
344 | gpaddr = tlb_xlate(gtlbe, eaddr); | 350 | gpaddr = tlb_xlate(gtlbe, eaddr); |
345 | gfn = gpaddr >> PAGE_SHIFT; | 351 | gfn = gpaddr >> PAGE_SHIFT; |
346 | 352 | ||
@@ -352,7 +358,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
352 | * Either way, we need to satisfy the fault without | 358 | * Either way, we need to satisfy the fault without |
353 | * invoking the guest. */ | 359 | * invoking the guest. */ |
354 | kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlbe->tid, | 360 | kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlbe->tid, |
355 | gtlbe->word2, get_tlb_bytes(gtlbe)); | 361 | gtlbe->word2, get_tlb_bytes(gtlbe), gtlb_index); |
356 | } else { | 362 | } else { |
357 | /* Guest mapped and leaped at non-RAM! */ | 363 | /* Guest mapped and leaped at non-RAM! */ |
358 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK); | 364 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK); |
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S index 8d6929b7fdb6..eb2186823e4e 100644 --- a/arch/powerpc/kvm/booke_interrupts.S +++ b/arch/powerpc/kvm/booke_interrupts.S | |||
@@ -335,54 +335,6 @@ lightweight_exit: | |||
335 | lwz r3, VCPU_SHADOW_PID(r4) | 335 | lwz r3, VCPU_SHADOW_PID(r4) |
336 | mtspr SPRN_PID, r3 | 336 | mtspr SPRN_PID, r3 |
337 | 337 | ||
338 | /* Prevent all asynchronous TLB updates. */ | ||
339 | mfmsr r5 | ||
340 | lis r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@h | ||
341 | ori r6, r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l | ||
342 | andc r6, r5, r6 | ||
343 | mtmsr r6 | ||
344 | |||
345 | /* Load the guest mappings, leaving the host's "pinned" kernel mappings | ||
346 | * in place. */ | ||
347 | mfspr r10, SPRN_MMUCR /* Save host MMUCR. */ | ||
348 | li r5, PPC44x_TLB_SIZE | ||
349 | lis r5, tlb_44x_hwater@ha | ||
350 | lwz r5, tlb_44x_hwater@l(r5) | ||
351 | mtctr r5 | ||
352 | addi r9, r4, -VCPU_TO_44X + VCPU44x_SHADOW_TLB | ||
353 | addi r5, r4, -VCPU_TO_44X + VCPU44x_SHADOW_MOD | ||
354 | li r3, 0 | ||
355 | 1: | ||
356 | lbzx r7, r3, r5 | ||
357 | cmpwi r7, 0 | ||
358 | beq 3f | ||
359 | |||
360 | /* Load guest entry. */ | ||
361 | mulli r11, r3, TLBE_BYTES | ||
362 | add r11, r11, r9 | ||
363 | lwz r7, 0(r11) | ||
364 | mtspr SPRN_MMUCR, r7 | ||
365 | lwz r7, 4(r11) | ||
366 | tlbwe r7, r3, PPC44x_TLB_PAGEID | ||
367 | lwz r7, 8(r11) | ||
368 | tlbwe r7, r3, PPC44x_TLB_XLAT | ||
369 | lwz r7, 12(r11) | ||
370 | tlbwe r7, r3, PPC44x_TLB_ATTRIB | ||
371 | 3: | ||
372 | addi r3, r3, 1 /* Increment index. */ | ||
373 | bdnz 1b | ||
374 | |||
375 | mtspr SPRN_MMUCR, r10 /* Restore host MMUCR. */ | ||
376 | |||
377 | /* Clear bitmap of modified TLB entries */ | ||
378 | li r5, PPC44x_TLB_SIZE>>2 | ||
379 | mtctr r5 | ||
380 | addi r5, r4, -VCPU_TO_44X + VCPU44x_SHADOW_MOD - 4 | ||
381 | li r6, 0 | ||
382 | 1: | ||
383 | stwu r6, 4(r5) | ||
384 | bdnz 1b | ||
385 | |||
386 | iccci 0, 0 /* XXX hack */ | 338 | iccci 0, 0 /* XXX hack */ |
387 | 339 | ||
388 | /* Load some guest volatiles. */ | 340 | /* Load some guest volatiles. */ |