aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/virtual/kvm/api.txt74
-rw-r--r--arch/powerpc/include/asm/kvm.h35
-rw-r--r--arch/powerpc/include/asm/kvm_e500.h24
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h5
-rw-r--r--arch/powerpc/kvm/e500.c5
-rw-r--r--arch/powerpc/kvm/e500_emulate.c12
-rw-r--r--arch/powerpc/kvm/e500_tlb.c393
-rw-r--r--arch/powerpc/kvm/e500_tlb.h38
-rw-r--r--arch/powerpc/kvm/powerpc.c28
-rw-r--r--include/linux/kvm.h18
10 files changed, 469 insertions, 163 deletions
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 7ca696227d3..bcd45d5afca 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -1409,6 +1409,38 @@ The following flags are defined:
1409If datamatch flag is set, the event will be signaled only if the written value 1409If datamatch flag is set, the event will be signaled only if the written value
1410to the registered address is equal to datamatch in struct kvm_ioeventfd. 1410to the registered address is equal to datamatch in struct kvm_ioeventfd.
1411 1411
14124.59 KVM_DIRTY_TLB
1413
1414Capability: KVM_CAP_SW_TLB
1415Architectures: ppc
1416Type: vcpu ioctl
1417Parameters: struct kvm_dirty_tlb (in)
1418Returns: 0 on success, -1 on error
1419
1420struct kvm_dirty_tlb {
1421 __u64 bitmap;
1422 __u32 num_dirty;
1423};
1424
1425This must be called whenever userspace has changed an entry in the shared
1426TLB, prior to calling KVM_RUN on the associated vcpu.
1427
1428The "bitmap" field is the userspace address of an array. This array
1429consists of a number of bits, equal to the total number of TLB entries as
1430determined by the last successful call to KVM_CONFIG_TLB, rounded up to the
1431nearest multiple of 64.
1432
1433Each bit corresponds to one TLB entry, ordered the same as in the shared TLB
1434array.
1435
1436The array is little-endian: the bit 0 is the least significant bit of the
1437first byte, bit 8 is the least significant bit of the second byte, etc.
1438This avoids any complications with differing word sizes.
1439
1440The "num_dirty" field is a performance hint for KVM to determine whether it
1441should skip processing the bitmap and just invalidate everything. It must
1442be set to the number of set bits in the bitmap.
1443
14124.62 KVM_CREATE_SPAPR_TCE 14444.62 KVM_CREATE_SPAPR_TCE
1413 1445
1414Capability: KVM_CAP_SPAPR_TCE 1446Capability: KVM_CAP_SPAPR_TCE
@@ -1842,3 +1874,45 @@ HTAB address part of SDR1 contains an HVA instead of a GPA, as PAPR keeps the
1842HTAB invisible to the guest. 1874HTAB invisible to the guest.
1843 1875
1844When this capability is enabled, KVM_EXIT_PAPR_HCALL can occur. 1876When this capability is enabled, KVM_EXIT_PAPR_HCALL can occur.
1877
18786.3 KVM_CAP_SW_TLB
1879
1880Architectures: ppc
1881Parameters: args[0] is the address of a struct kvm_config_tlb
1882Returns: 0 on success; -1 on error
1883
1884struct kvm_config_tlb {
1885 __u64 params;
1886 __u64 array;
1887 __u32 mmu_type;
1888 __u32 array_len;
1889};
1890
1891Configures the virtual CPU's TLB array, establishing a shared memory area
1892between userspace and KVM. The "params" and "array" fields are userspace
1893addresses of mmu-type-specific data structures. The "array_len" field is an
1894safety mechanism, and should be set to the size in bytes of the memory that
1895userspace has reserved for the array. It must be at least the size dictated
1896by "mmu_type" and "params".
1897
1898While KVM_RUN is active, the shared region is under control of KVM. Its
1899contents are undefined, and any modification by userspace results in
1900boundedly undefined behavior.
1901
1902On return from KVM_RUN, the shared region will reflect the current state of
1903the guest's TLB. If userspace makes any changes, it must call KVM_DIRTY_TLB
1904to tell KVM which entries have been changed, prior to calling KVM_RUN again
1905on this vcpu.
1906
1907For mmu types KVM_MMU_FSL_BOOKE_NOHV and KVM_MMU_FSL_BOOKE_HV:
1908 - The "params" field is of type "struct kvm_book3e_206_tlb_params".
1909 - The "array" field points to an array of type "struct
1910 kvm_book3e_206_tlb_entry".
1911 - The array consists of all entries in the first TLB, followed by all
1912 entries in the second TLB.
1913 - Within a TLB, entries are ordered first by increasing set number. Within a
1914 set, entries are ordered by way (increasing ESEL).
1915 - The hash for determining set number in TLB0 is: (MAS2 >> 12) & (num_sets - 1)
1916 where "num_sets" is the tlb_sizes[] value divided by the tlb_ways[] value.
1917 - The tsize field of mas1 shall be set to 4K on TLB0, even though the
1918 hardware ignores this value for TLB0.
diff --git a/arch/powerpc/include/asm/kvm.h b/arch/powerpc/include/asm/kvm.h
index 7d9d4de057e..663c57f8716 100644
--- a/arch/powerpc/include/asm/kvm.h
+++ b/arch/powerpc/include/asm/kvm.h
@@ -296,4 +296,39 @@ struct kvm_allocate_rma {
296 __u64 rma_size; 296 __u64 rma_size;
297}; 297};
298 298
299struct kvm_book3e_206_tlb_entry {
300 __u32 mas8;
301 __u32 mas1;
302 __u64 mas2;
303 __u64 mas7_3;
304};
305
306struct kvm_book3e_206_tlb_params {
307 /*
308 * For mmu types KVM_MMU_FSL_BOOKE_NOHV and KVM_MMU_FSL_BOOKE_HV:
309 *
310 * - The number of ways of TLB0 must be a power of two between 2 and
311 * 16.
312 * - TLB1 must be fully associative.
313 * - The size of TLB0 must be a multiple of the number of ways, and
314 * the number of sets must be a power of two.
315 * - The size of TLB1 may not exceed 64 entries.
316 * - TLB0 supports 4 KiB pages.
317 * - The page sizes supported by TLB1 are as indicated by
318 * TLB1CFG (if MMUCFG[MAVN] = 0) or TLB1PS (if MMUCFG[MAVN] = 1)
319 * as returned by KVM_GET_SREGS.
320 * - TLB2 and TLB3 are reserved, and their entries in tlb_sizes[]
321 * and tlb_ways[] must be zero.
322 *
323 * tlb_ways[n] = tlb_sizes[n] means the array is fully associative.
324 *
325 * KVM will adjust TLBnCFG based on the sizes configured here,
326 * though arrays greater than 2048 entries will have TLBnCFG[NENTRY]
327 * set to zero.
328 */
329 __u32 tlb_sizes[4];
330 __u32 tlb_ways[4];
331 __u32 reserved[8];
332};
333
299#endif /* __LINUX_KVM_POWERPC_H */ 334#endif /* __LINUX_KVM_POWERPC_H */
diff --git a/arch/powerpc/include/asm/kvm_e500.h b/arch/powerpc/include/asm/kvm_e500.h
index a5197d816ec..bc17441535f 100644
--- a/arch/powerpc/include/asm/kvm_e500.h
+++ b/arch/powerpc/include/asm/kvm_e500.h
@@ -22,13 +22,6 @@
22#define E500_PID_NUM 3 22#define E500_PID_NUM 3
23#define E500_TLB_NUM 2 23#define E500_TLB_NUM 2
24 24
25struct tlbe{
26 u32 mas1;
27 u32 mas2;
28 u32 mas3;
29 u32 mas7;
30};
31
32#define E500_TLB_VALID 1 25#define E500_TLB_VALID 1
33#define E500_TLB_DIRTY 2 26#define E500_TLB_DIRTY 2
34 27
@@ -48,13 +41,17 @@ struct kvmppc_e500_tlb_params {
48}; 41};
49 42
50struct kvmppc_vcpu_e500 { 43struct kvmppc_vcpu_e500 {
51 /* Unmodified copy of the guest's TLB. */ 44 /* Unmodified copy of the guest's TLB -- shared with host userspace. */
52 struct tlbe *gtlb_arch[E500_TLB_NUM]; 45 struct kvm_book3e_206_tlb_entry *gtlb_arch;
46
47 /* Starting entry number in gtlb_arch[] */
48 int gtlb_offset[E500_TLB_NUM];
53 49
54 /* KVM internal information associated with each guest TLB entry */ 50 /* KVM internal information associated with each guest TLB entry */
55 struct tlbe_priv *gtlb_priv[E500_TLB_NUM]; 51 struct tlbe_priv *gtlb_priv[E500_TLB_NUM];
56 52
57 unsigned int gtlb_size[E500_TLB_NUM]; 53 struct kvmppc_e500_tlb_params gtlb_params[E500_TLB_NUM];
54
58 unsigned int gtlb_nv[E500_TLB_NUM]; 55 unsigned int gtlb_nv[E500_TLB_NUM];
59 56
60 /* 57 /*
@@ -68,7 +65,6 @@ struct kvmppc_vcpu_e500 {
68 * and back, and our host TLB entries got evicted). 65 * and back, and our host TLB entries got evicted).
69 */ 66 */
70 struct tlbe_ref *tlb_refs[E500_TLB_NUM]; 67 struct tlbe_ref *tlb_refs[E500_TLB_NUM];
71
72 unsigned int host_tlb1_nv; 68 unsigned int host_tlb1_nv;
73 69
74 u32 host_pid[E500_PID_NUM]; 70 u32 host_pid[E500_PID_NUM];
@@ -78,11 +74,10 @@ struct kvmppc_vcpu_e500 {
78 u32 mas0; 74 u32 mas0;
79 u32 mas1; 75 u32 mas1;
80 u32 mas2; 76 u32 mas2;
81 u32 mas3; 77 u64 mas7_3;
82 u32 mas4; 78 u32 mas4;
83 u32 mas5; 79 u32 mas5;
84 u32 mas6; 80 u32 mas6;
85 u32 mas7;
86 81
87 /* vcpu id table */ 82 /* vcpu id table */
88 struct vcpu_id_table *idt; 83 struct vcpu_id_table *idt;
@@ -95,6 +90,9 @@ struct kvmppc_vcpu_e500 {
95 u32 tlb1cfg; 90 u32 tlb1cfg;
96 u64 mcar; 91 u64 mcar;
97 92
93 struct page **shared_tlb_pages;
94 int num_shared_tlb_pages;
95
98 struct kvm_vcpu vcpu; 96 struct kvm_vcpu vcpu;
99}; 97};
100 98
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 46efd1a265c..a284f209e2d 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -193,4 +193,9 @@ static inline void kvm_rma_init(void)
193{} 193{}
194#endif 194#endif
195 195
196int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
197 struct kvm_config_tlb *cfg);
198int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
199 struct kvm_dirty_tlb *cfg);
200
196#endif /* __POWERPC_KVM_PPC_H__ */ 201#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index 8c0d45a6faf..f17d7e732a1 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -121,7 +121,7 @@ void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
121 sregs->u.e.mas0 = vcpu_e500->mas0; 121 sregs->u.e.mas0 = vcpu_e500->mas0;
122 sregs->u.e.mas1 = vcpu_e500->mas1; 122 sregs->u.e.mas1 = vcpu_e500->mas1;
123 sregs->u.e.mas2 = vcpu_e500->mas2; 123 sregs->u.e.mas2 = vcpu_e500->mas2;
124 sregs->u.e.mas7_3 = ((u64)vcpu_e500->mas7 << 32) | vcpu_e500->mas3; 124 sregs->u.e.mas7_3 = vcpu_e500->mas7_3;
125 sregs->u.e.mas4 = vcpu_e500->mas4; 125 sregs->u.e.mas4 = vcpu_e500->mas4;
126 sregs->u.e.mas6 = vcpu_e500->mas6; 126 sregs->u.e.mas6 = vcpu_e500->mas6;
127 127
@@ -154,8 +154,7 @@ int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
154 vcpu_e500->mas0 = sregs->u.e.mas0; 154 vcpu_e500->mas0 = sregs->u.e.mas0;
155 vcpu_e500->mas1 = sregs->u.e.mas1; 155 vcpu_e500->mas1 = sregs->u.e.mas1;
156 vcpu_e500->mas2 = sregs->u.e.mas2; 156 vcpu_e500->mas2 = sregs->u.e.mas2;
157 vcpu_e500->mas7 = sregs->u.e.mas7_3 >> 32; 157 vcpu_e500->mas7_3 = sregs->u.e.mas7_3;
158 vcpu_e500->mas3 = (u32)sregs->u.e.mas7_3;
159 vcpu_e500->mas4 = sregs->u.e.mas4; 158 vcpu_e500->mas4 = sregs->u.e.mas4;
160 vcpu_e500->mas6 = sregs->u.e.mas6; 159 vcpu_e500->mas6 = sregs->u.e.mas6;
161 } 160 }
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index d48ae396f41..e0d36099c75 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -95,13 +95,17 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
95 case SPRN_MAS2: 95 case SPRN_MAS2:
96 vcpu_e500->mas2 = spr_val; break; 96 vcpu_e500->mas2 = spr_val; break;
97 case SPRN_MAS3: 97 case SPRN_MAS3:
98 vcpu_e500->mas3 = spr_val; break; 98 vcpu_e500->mas7_3 &= ~(u64)0xffffffff;
99 vcpu_e500->mas7_3 |= spr_val;
100 break;
99 case SPRN_MAS4: 101 case SPRN_MAS4:
100 vcpu_e500->mas4 = spr_val; break; 102 vcpu_e500->mas4 = spr_val; break;
101 case SPRN_MAS6: 103 case SPRN_MAS6:
102 vcpu_e500->mas6 = spr_val; break; 104 vcpu_e500->mas6 = spr_val; break;
103 case SPRN_MAS7: 105 case SPRN_MAS7:
104 vcpu_e500->mas7 = spr_val; break; 106 vcpu_e500->mas7_3 &= (u64)0xffffffff;
107 vcpu_e500->mas7_3 |= (u64)spr_val << 32;
108 break;
105 case SPRN_L1CSR0: 109 case SPRN_L1CSR0:
106 vcpu_e500->l1csr0 = spr_val; 110 vcpu_e500->l1csr0 = spr_val;
107 vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC); 111 vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC);
@@ -158,13 +162,13 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
158 case SPRN_MAS2: 162 case SPRN_MAS2:
159 kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas2); break; 163 kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas2); break;
160 case SPRN_MAS3: 164 case SPRN_MAS3:
161 kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas3); break; 165 kvmppc_set_gpr(vcpu, rt, (u32)vcpu_e500->mas7_3); break;
162 case SPRN_MAS4: 166 case SPRN_MAS4:
163 kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas4); break; 167 kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas4); break;
164 case SPRN_MAS6: 168 case SPRN_MAS6:
165 kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas6); break; 169 kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas6); break;
166 case SPRN_MAS7: 170 case SPRN_MAS7:
167 kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas7); break; 171 kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas7_3 >> 32); break;
168 172
169 case SPRN_TLB0CFG: 173 case SPRN_TLB0CFG:
170 kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb0cfg); break; 174 kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb0cfg); break;
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index 59221bb1e00..f19ae2f6152 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -19,6 +19,11 @@
19#include <linux/kvm.h> 19#include <linux/kvm.h>
20#include <linux/kvm_host.h> 20#include <linux/kvm_host.h>
21#include <linux/highmem.h> 21#include <linux/highmem.h>
22#include <linux/log2.h>
23#include <linux/uaccess.h>
24#include <linux/sched.h>
25#include <linux/rwsem.h>
26#include <linux/vmalloc.h>
22#include <asm/kvm_ppc.h> 27#include <asm/kvm_ppc.h>
23#include <asm/kvm_e500.h> 28#include <asm/kvm_e500.h>
24 29
@@ -66,6 +71,13 @@ static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid);
66 71
67static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM]; 72static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM];
68 73
74static struct kvm_book3e_206_tlb_entry *get_entry(
75 struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int entry)
76{
77 int offset = vcpu_e500->gtlb_offset[tlbsel];
78 return &vcpu_e500->gtlb_arch[offset + entry];
79}
80
69/* 81/*
70 * Allocate a free shadow id and setup a valid sid mapping in given entry. 82 * Allocate a free shadow id and setup a valid sid mapping in given entry.
71 * A mapping is only valid when vcpu_id_table and pcpu_id_table are match. 83 * A mapping is only valid when vcpu_id_table and pcpu_id_table are match.
@@ -217,34 +229,13 @@ void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *vcpu_e500)
217 preempt_enable(); 229 preempt_enable();
218} 230}
219 231
220void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
221{
222 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
223 struct tlbe *tlbe;
224 int i, tlbsel;
225
226 printk("| %8s | %8s | %8s | %8s | %8s |\n",
227 "nr", "mas1", "mas2", "mas3", "mas7");
228
229 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
230 printk("Guest TLB%d:\n", tlbsel);
231 for (i = 0; i < vcpu_e500->gtlb_size[tlbsel]; i++) {
232 tlbe = &vcpu_e500->gtlb_arch[tlbsel][i];
233 if (tlbe->mas1 & MAS1_VALID)
234 printk(" G[%d][%3d] | %08X | %08X | %08X | %08X |\n",
235 tlbsel, i, tlbe->mas1, tlbe->mas2,
236 tlbe->mas3, tlbe->mas7);
237 }
238 }
239}
240
241static inline unsigned int gtlb0_get_next_victim( 232static inline unsigned int gtlb0_get_next_victim(
242 struct kvmppc_vcpu_e500 *vcpu_e500) 233 struct kvmppc_vcpu_e500 *vcpu_e500)
243{ 234{
244 unsigned int victim; 235 unsigned int victim;
245 236
246 victim = vcpu_e500->gtlb_nv[0]++; 237 victim = vcpu_e500->gtlb_nv[0]++;
247 if (unlikely(vcpu_e500->gtlb_nv[0] >= KVM_E500_TLB0_WAY_NUM)) 238 if (unlikely(vcpu_e500->gtlb_nv[0] >= vcpu_e500->gtlb_params[0].ways))
248 vcpu_e500->gtlb_nv[0] = 0; 239 vcpu_e500->gtlb_nv[0] = 0;
249 240
250 return victim; 241 return victim;
@@ -256,9 +247,9 @@ static inline unsigned int tlb1_max_shadow_size(void)
256 return host_tlb_params[1].entries - tlbcam_index - 1; 247 return host_tlb_params[1].entries - tlbcam_index - 1;
257} 248}
258 249
259static inline int tlbe_is_writable(struct tlbe *tlbe) 250static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
260{ 251{
261 return tlbe->mas3 & (MAS3_SW|MAS3_UW); 252 return tlbe->mas7_3 & (MAS3_SW|MAS3_UW);
262} 253}
263 254
264static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode) 255static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
@@ -289,39 +280,41 @@ static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
289/* 280/*
290 * writing shadow tlb entry to host TLB 281 * writing shadow tlb entry to host TLB
291 */ 282 */
292static inline void __write_host_tlbe(struct tlbe *stlbe, uint32_t mas0) 283static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
284 uint32_t mas0)
293{ 285{
294 unsigned long flags; 286 unsigned long flags;
295 287
296 local_irq_save(flags); 288 local_irq_save(flags);
297 mtspr(SPRN_MAS0, mas0); 289 mtspr(SPRN_MAS0, mas0);
298 mtspr(SPRN_MAS1, stlbe->mas1); 290 mtspr(SPRN_MAS1, stlbe->mas1);
299 mtspr(SPRN_MAS2, stlbe->mas2); 291 mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2);
300 mtspr(SPRN_MAS3, stlbe->mas3); 292 mtspr(SPRN_MAS3, (u32)stlbe->mas7_3);
301 mtspr(SPRN_MAS7, stlbe->mas7); 293 mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32));
302 asm volatile("isync; tlbwe" : : : "memory"); 294 asm volatile("isync; tlbwe" : : : "memory");
303 local_irq_restore(flags); 295 local_irq_restore(flags);
304} 296}
305 297
306/* esel is index into set, not whole array */ 298/* esel is index into set, not whole array */
307static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500, 299static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
308 int tlbsel, int esel, struct tlbe *stlbe) 300 int tlbsel, int esel, struct kvm_book3e_206_tlb_entry *stlbe)
309{ 301{
310 if (tlbsel == 0) { 302 if (tlbsel == 0) {
311 __write_host_tlbe(stlbe, MAS0_TLBSEL(0) | MAS0_ESEL(esel)); 303 int way = esel & (vcpu_e500->gtlb_params[0].ways - 1);
304 __write_host_tlbe(stlbe, MAS0_TLBSEL(0) | MAS0_ESEL(way));
312 } else { 305 } else {
313 __write_host_tlbe(stlbe, 306 __write_host_tlbe(stlbe,
314 MAS0_TLBSEL(1) | 307 MAS0_TLBSEL(1) |
315 MAS0_ESEL(to_htlb1_esel(esel))); 308 MAS0_ESEL(to_htlb1_esel(esel)));
316 } 309 }
317 trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2, 310 trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2,
318 stlbe->mas3, stlbe->mas7); 311 (u32)stlbe->mas7_3, (u32)(stlbe->mas7_3 >> 32));
319} 312}
320 313
321void kvmppc_map_magic(struct kvm_vcpu *vcpu) 314void kvmppc_map_magic(struct kvm_vcpu *vcpu)
322{ 315{
323 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 316 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
324 struct tlbe magic; 317 struct kvm_book3e_206_tlb_entry magic;
325 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; 318 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
326 unsigned int stid; 319 unsigned int stid;
327 pfn_t pfn; 320 pfn_t pfn;
@@ -335,9 +328,8 @@ void kvmppc_map_magic(struct kvm_vcpu *vcpu)
335 magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) | 328 magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) |
336 MAS1_TSIZE(BOOK3E_PAGESZ_4K); 329 MAS1_TSIZE(BOOK3E_PAGESZ_4K);
337 magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M; 330 magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
338 magic.mas3 = (pfn << PAGE_SHIFT) | 331 magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) |
339 MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR; 332 MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
340 magic.mas7 = pfn >> (32 - PAGE_SHIFT);
341 333
342 __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index)); 334 __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
343 preempt_enable(); 335 preempt_enable();
@@ -358,7 +350,8 @@ void kvmppc_e500_tlb_put(struct kvm_vcpu *vcpu)
358static void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, 350static void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500,
359 int tlbsel, int esel) 351 int tlbsel, int esel)
360{ 352{
361 struct tlbe *gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel]; 353 struct kvm_book3e_206_tlb_entry *gtlbe =
354 get_entry(vcpu_e500, tlbsel, esel);
362 struct vcpu_id_table *idt = vcpu_e500->idt; 355 struct vcpu_id_table *idt = vcpu_e500->idt;
363 unsigned int pr, tid, ts, pid; 356 unsigned int pr, tid, ts, pid;
364 u32 val, eaddr; 357 u32 val, eaddr;
@@ -424,9 +417,8 @@ static int tlb0_set_base(gva_t addr, int sets, int ways)
424 417
425static int gtlb0_set_base(struct kvmppc_vcpu_e500 *vcpu_e500, gva_t addr) 418static int gtlb0_set_base(struct kvmppc_vcpu_e500 *vcpu_e500, gva_t addr)
426{ 419{
427 int sets = KVM_E500_TLB0_SIZE / KVM_E500_TLB0_WAY_NUM; 420 return tlb0_set_base(addr, vcpu_e500->gtlb_params[0].sets,
428 421 vcpu_e500->gtlb_params[0].ways);
429 return tlb0_set_base(addr, sets, KVM_E500_TLB0_WAY_NUM);
430} 422}
431 423
432static int htlb0_set_base(gva_t addr) 424static int htlb0_set_base(gva_t addr)
@@ -440,10 +432,10 @@ static unsigned int get_tlb_esel(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel)
440 unsigned int esel = get_tlb_esel_bit(vcpu_e500); 432 unsigned int esel = get_tlb_esel_bit(vcpu_e500);
441 433
442 if (tlbsel == 0) { 434 if (tlbsel == 0) {
443 esel &= KVM_E500_TLB0_WAY_NUM_MASK; 435 esel &= vcpu_e500->gtlb_params[0].ways - 1;
444 esel += gtlb0_set_base(vcpu_e500, vcpu_e500->mas2); 436 esel += gtlb0_set_base(vcpu_e500, vcpu_e500->mas2);
445 } else { 437 } else {
446 esel &= vcpu_e500->gtlb_size[tlbsel] - 1; 438 esel &= vcpu_e500->gtlb_params[tlbsel].entries - 1;
447 } 439 }
448 440
449 return esel; 441 return esel;
@@ -453,19 +445,22 @@ static unsigned int get_tlb_esel(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel)
453static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500, 445static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
454 gva_t eaddr, int tlbsel, unsigned int pid, int as) 446 gva_t eaddr, int tlbsel, unsigned int pid, int as)
455{ 447{
456 int size = vcpu_e500->gtlb_size[tlbsel]; 448 int size = vcpu_e500->gtlb_params[tlbsel].entries;
457 unsigned int set_base; 449 unsigned int set_base, offset;
458 int i; 450 int i;
459 451
460 if (tlbsel == 0) { 452 if (tlbsel == 0) {
461 set_base = gtlb0_set_base(vcpu_e500, eaddr); 453 set_base = gtlb0_set_base(vcpu_e500, eaddr);
462 size = KVM_E500_TLB0_WAY_NUM; 454 size = vcpu_e500->gtlb_params[0].ways;
463 } else { 455 } else {
464 set_base = 0; 456 set_base = 0;
465 } 457 }
466 458
459 offset = vcpu_e500->gtlb_offset[tlbsel];
460
467 for (i = 0; i < size; i++) { 461 for (i = 0; i < size; i++) {
468 struct tlbe *tlbe = &vcpu_e500->gtlb_arch[tlbsel][set_base + i]; 462 struct kvm_book3e_206_tlb_entry *tlbe =
463 &vcpu_e500->gtlb_arch[offset + set_base + i];
469 unsigned int tid; 464 unsigned int tid;
470 465
471 if (eaddr < get_tlb_eaddr(tlbe)) 466 if (eaddr < get_tlb_eaddr(tlbe))
@@ -491,7 +486,7 @@ static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
491} 486}
492 487
493static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, 488static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
494 struct tlbe *gtlbe, 489 struct kvm_book3e_206_tlb_entry *gtlbe,
495 pfn_t pfn) 490 pfn_t pfn)
496{ 491{
497 ref->pfn = pfn; 492 ref->pfn = pfn;
@@ -518,7 +513,7 @@ static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
518 int tlbsel = 0; 513 int tlbsel = 0;
519 int i; 514 int i;
520 515
521 for (i = 0; i < vcpu_e500->gtlb_size[tlbsel]; i++) { 516 for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
522 struct tlbe_ref *ref = 517 struct tlbe_ref *ref =
523 &vcpu_e500->gtlb_priv[tlbsel][i].ref; 518 &vcpu_e500->gtlb_priv[tlbsel][i].ref;
524 kvmppc_e500_ref_release(ref); 519 kvmppc_e500_ref_release(ref);
@@ -530,6 +525,8 @@ static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500)
530 int stlbsel = 1; 525 int stlbsel = 1;
531 int i; 526 int i;
532 527
528 kvmppc_e500_id_table_reset_all(vcpu_e500);
529
533 for (i = 0; i < host_tlb_params[stlbsel].entries; i++) { 530 for (i = 0; i < host_tlb_params[stlbsel].entries; i++) {
534 struct tlbe_ref *ref = 531 struct tlbe_ref *ref =
535 &vcpu_e500->tlb_refs[stlbsel][i]; 532 &vcpu_e500->tlb_refs[stlbsel][i];
@@ -559,18 +556,18 @@ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
559 | MAS1_TSIZE(tsized); 556 | MAS1_TSIZE(tsized);
560 vcpu_e500->mas2 = (eaddr & MAS2_EPN) 557 vcpu_e500->mas2 = (eaddr & MAS2_EPN)
561 | (vcpu_e500->mas4 & MAS2_ATTRIB_MASK); 558 | (vcpu_e500->mas4 & MAS2_ATTRIB_MASK);
562 vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3; 559 vcpu_e500->mas7_3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
563 vcpu_e500->mas6 = (vcpu_e500->mas6 & MAS6_SPID1) 560 vcpu_e500->mas6 = (vcpu_e500->mas6 & MAS6_SPID1)
564 | (get_cur_pid(vcpu) << 16) 561 | (get_cur_pid(vcpu) << 16)
565 | (as ? MAS6_SAS : 0); 562 | (as ? MAS6_SAS : 0);
566 vcpu_e500->mas7 = 0;
567} 563}
568 564
569/* TID must be supplied by the caller */ 565/* TID must be supplied by the caller */
570static inline void kvmppc_e500_setup_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500, 566static inline void kvmppc_e500_setup_stlbe(
571 struct tlbe *gtlbe, int tsize, 567 struct kvmppc_vcpu_e500 *vcpu_e500,
572 struct tlbe_ref *ref, 568 struct kvm_book3e_206_tlb_entry *gtlbe,
573 u64 gvaddr, struct tlbe *stlbe) 569 int tsize, struct tlbe_ref *ref, u64 gvaddr,
570 struct kvm_book3e_206_tlb_entry *stlbe)
574{ 571{
575 pfn_t pfn = ref->pfn; 572 pfn_t pfn = ref->pfn;
576 573
@@ -581,16 +578,16 @@ static inline void kvmppc_e500_setup_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
581 stlbe->mas2 = (gvaddr & MAS2_EPN) 578 stlbe->mas2 = (gvaddr & MAS2_EPN)
582 | e500_shadow_mas2_attrib(gtlbe->mas2, 579 | e500_shadow_mas2_attrib(gtlbe->mas2,
583 vcpu_e500->vcpu.arch.shared->msr & MSR_PR); 580 vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
584 stlbe->mas3 = ((pfn << PAGE_SHIFT) & MAS3_RPN) 581 stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT)
585 | e500_shadow_mas3_attrib(gtlbe->mas3, 582 | e500_shadow_mas3_attrib(gtlbe->mas7_3,
586 vcpu_e500->vcpu.arch.shared->msr & MSR_PR); 583 vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
587 stlbe->mas7 = (pfn >> (32 - PAGE_SHIFT)) & MAS7_RPN;
588} 584}
589 585
590/* sesel is an index into the entire array, not just the set */ 586/* sesel is an index into the entire array, not just the set */
591static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, 587static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
592 u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, int tlbsel, int sesel, 588 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
593 struct tlbe *stlbe, struct tlbe_ref *ref) 589 int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe,
590 struct tlbe_ref *ref)
594{ 591{
595 struct kvm_memory_slot *slot; 592 struct kvm_memory_slot *slot;
596 unsigned long pfn, hva; 593 unsigned long pfn, hva;
@@ -700,15 +697,16 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
700 697
701/* XXX only map the one-one case, for now use TLB0 */ 698/* XXX only map the one-one case, for now use TLB0 */
702static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, 699static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500,
703 int esel, struct tlbe *stlbe) 700 int esel,
701 struct kvm_book3e_206_tlb_entry *stlbe)
704{ 702{
705 struct tlbe *gtlbe; 703 struct kvm_book3e_206_tlb_entry *gtlbe;
706 struct tlbe_ref *ref; 704 struct tlbe_ref *ref;
707 int sesel = esel & (host_tlb_params[0].ways - 1); 705 int sesel = esel & (host_tlb_params[0].ways - 1);
708 int sesel_base; 706 int sesel_base;
709 gva_t ea; 707 gva_t ea;
710 708
711 gtlbe = &vcpu_e500->gtlb_arch[0][esel]; 709 gtlbe = get_entry(vcpu_e500, 0, esel);
712 ref = &vcpu_e500->gtlb_priv[0][esel].ref; 710 ref = &vcpu_e500->gtlb_priv[0][esel].ref;
713 711
714 ea = get_tlb_eaddr(gtlbe); 712 ea = get_tlb_eaddr(gtlbe);
@@ -725,7 +723,8 @@ static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500,
725 * the shadow TLB. */ 723 * the shadow TLB. */
726/* XXX for both one-one and one-to-many , for now use TLB1 */ 724/* XXX for both one-one and one-to-many , for now use TLB1 */
727static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, 725static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
728 u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, struct tlbe *stlbe) 726 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
727 struct kvm_book3e_206_tlb_entry *stlbe)
729{ 728{
730 struct tlbe_ref *ref; 729 struct tlbe_ref *ref;
731 unsigned int victim; 730 unsigned int victim;
@@ -754,7 +753,8 @@ static inline int kvmppc_e500_gtlbe_invalidate(
754 struct kvmppc_vcpu_e500 *vcpu_e500, 753 struct kvmppc_vcpu_e500 *vcpu_e500,
755 int tlbsel, int esel) 754 int tlbsel, int esel)
756{ 755{
757 struct tlbe *gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel]; 756 struct kvm_book3e_206_tlb_entry *gtlbe =
757 get_entry(vcpu_e500, tlbsel, esel);
758 758
759 if (unlikely(get_tlb_iprot(gtlbe))) 759 if (unlikely(get_tlb_iprot(gtlbe)))
760 return -1; 760 return -1;
@@ -769,10 +769,10 @@ int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value)
769 int esel; 769 int esel;
770 770
771 if (value & MMUCSR0_TLB0FI) 771 if (value & MMUCSR0_TLB0FI)
772 for (esel = 0; esel < vcpu_e500->gtlb_size[0]; esel++) 772 for (esel = 0; esel < vcpu_e500->gtlb_params[0].entries; esel++)
773 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel); 773 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel);
774 if (value & MMUCSR0_TLB1FI) 774 if (value & MMUCSR0_TLB1FI)
775 for (esel = 0; esel < vcpu_e500->gtlb_size[1]; esel++) 775 for (esel = 0; esel < vcpu_e500->gtlb_params[1].entries; esel++)
776 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel); 776 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel);
777 777
778 /* Invalidate all vcpu id mappings */ 778 /* Invalidate all vcpu id mappings */
@@ -797,7 +797,8 @@ int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb)
797 797
798 if (ia) { 798 if (ia) {
799 /* invalidate all entries */ 799 /* invalidate all entries */
800 for (esel = 0; esel < vcpu_e500->gtlb_size[tlbsel]; esel++) 800 for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries;
801 esel++)
801 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); 802 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
802 } else { 803 } else {
803 ea &= 0xfffff000; 804 ea &= 0xfffff000;
@@ -817,18 +818,17 @@ int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
817{ 818{
818 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 819 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
819 int tlbsel, esel; 820 int tlbsel, esel;
820 struct tlbe *gtlbe; 821 struct kvm_book3e_206_tlb_entry *gtlbe;
821 822
822 tlbsel = get_tlb_tlbsel(vcpu_e500); 823 tlbsel = get_tlb_tlbsel(vcpu_e500);
823 esel = get_tlb_esel(vcpu_e500, tlbsel); 824 esel = get_tlb_esel(vcpu_e500, tlbsel);
824 825
825 gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel]; 826 gtlbe = get_entry(vcpu_e500, tlbsel, esel);
826 vcpu_e500->mas0 &= ~MAS0_NV(~0); 827 vcpu_e500->mas0 &= ~MAS0_NV(~0);
827 vcpu_e500->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); 828 vcpu_e500->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
828 vcpu_e500->mas1 = gtlbe->mas1; 829 vcpu_e500->mas1 = gtlbe->mas1;
829 vcpu_e500->mas2 = gtlbe->mas2; 830 vcpu_e500->mas2 = gtlbe->mas2;
830 vcpu_e500->mas3 = gtlbe->mas3; 831 vcpu_e500->mas7_3 = gtlbe->mas7_3;
831 vcpu_e500->mas7 = gtlbe->mas7;
832 832
833 return EMULATE_DONE; 833 return EMULATE_DONE;
834} 834}
@@ -839,7 +839,7 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
839 int as = !!get_cur_sas(vcpu_e500); 839 int as = !!get_cur_sas(vcpu_e500);
840 unsigned int pid = get_cur_spid(vcpu_e500); 840 unsigned int pid = get_cur_spid(vcpu_e500);
841 int esel, tlbsel; 841 int esel, tlbsel;
842 struct tlbe *gtlbe = NULL; 842 struct kvm_book3e_206_tlb_entry *gtlbe = NULL;
843 gva_t ea; 843 gva_t ea;
844 844
845 ea = kvmppc_get_gpr(vcpu, rb); 845 ea = kvmppc_get_gpr(vcpu, rb);
@@ -847,7 +847,7 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
847 for (tlbsel = 0; tlbsel < 2; tlbsel++) { 847 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
848 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as); 848 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as);
849 if (esel >= 0) { 849 if (esel >= 0) {
850 gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel]; 850 gtlbe = get_entry(vcpu_e500, tlbsel, esel);
851 break; 851 break;
852 } 852 }
853 } 853 }
@@ -857,8 +857,7 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
857 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); 857 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
858 vcpu_e500->mas1 = gtlbe->mas1; 858 vcpu_e500->mas1 = gtlbe->mas1;
859 vcpu_e500->mas2 = gtlbe->mas2; 859 vcpu_e500->mas2 = gtlbe->mas2;
860 vcpu_e500->mas3 = gtlbe->mas3; 860 vcpu_e500->mas7_3 = gtlbe->mas7_3;
861 vcpu_e500->mas7 = gtlbe->mas7;
862 } else { 861 } else {
863 int victim; 862 int victim;
864 863
@@ -873,8 +872,7 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
873 | (vcpu_e500->mas4 & MAS4_TSIZED(~0)); 872 | (vcpu_e500->mas4 & MAS4_TSIZED(~0));
874 vcpu_e500->mas2 &= MAS2_EPN; 873 vcpu_e500->mas2 &= MAS2_EPN;
875 vcpu_e500->mas2 |= vcpu_e500->mas4 & MAS2_ATTRIB_MASK; 874 vcpu_e500->mas2 |= vcpu_e500->mas4 & MAS2_ATTRIB_MASK;
876 vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3; 875 vcpu_e500->mas7_3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
877 vcpu_e500->mas7 = 0;
878 } 876 }
879 877
880 kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS); 878 kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
@@ -883,8 +881,8 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
883 881
884/* sesel is index into the set, not the whole array */ 882/* sesel is index into the set, not the whole array */
885static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500, 883static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
886 struct tlbe *gtlbe, 884 struct kvm_book3e_206_tlb_entry *gtlbe,
887 struct tlbe *stlbe, 885 struct kvm_book3e_206_tlb_entry *stlbe,
888 int stlbsel, int sesel) 886 int stlbsel, int sesel)
889{ 887{
890 int stid; 888 int stid;
@@ -902,28 +900,27 @@ static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
902int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) 900int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
903{ 901{
904 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 902 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
905 struct tlbe *gtlbe; 903 struct kvm_book3e_206_tlb_entry *gtlbe;
906 int tlbsel, esel; 904 int tlbsel, esel;
907 905
908 tlbsel = get_tlb_tlbsel(vcpu_e500); 906 tlbsel = get_tlb_tlbsel(vcpu_e500);
909 esel = get_tlb_esel(vcpu_e500, tlbsel); 907 esel = get_tlb_esel(vcpu_e500, tlbsel);
910 908
911 gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel]; 909 gtlbe = get_entry(vcpu_e500, tlbsel, esel);
912 910
913 if (get_tlb_v(gtlbe)) 911 if (get_tlb_v(gtlbe))
914 inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); 912 inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
915 913
916 gtlbe->mas1 = vcpu_e500->mas1; 914 gtlbe->mas1 = vcpu_e500->mas1;
917 gtlbe->mas2 = vcpu_e500->mas2; 915 gtlbe->mas2 = vcpu_e500->mas2;
918 gtlbe->mas3 = vcpu_e500->mas3; 916 gtlbe->mas7_3 = vcpu_e500->mas7_3;
919 gtlbe->mas7 = vcpu_e500->mas7;
920 917
921 trace_kvm_gtlb_write(vcpu_e500->mas0, gtlbe->mas1, gtlbe->mas2, 918 trace_kvm_gtlb_write(vcpu_e500->mas0, gtlbe->mas1, gtlbe->mas2,
922 gtlbe->mas3, gtlbe->mas7); 919 (u32)gtlbe->mas7_3, (u32)(gtlbe->mas7_3 >> 32));
923 920
924 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ 921 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
925 if (tlbe_is_host_safe(vcpu, gtlbe)) { 922 if (tlbe_is_host_safe(vcpu, gtlbe)) {
926 struct tlbe stlbe; 923 struct kvm_book3e_206_tlb_entry stlbe;
927 int stlbsel, sesel; 924 int stlbsel, sesel;
928 u64 eaddr; 925 u64 eaddr;
929 u64 raddr; 926 u64 raddr;
@@ -996,9 +993,11 @@ gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
996 gva_t eaddr) 993 gva_t eaddr)
997{ 994{
998 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 995 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
999 struct tlbe *gtlbe = 996 struct kvm_book3e_206_tlb_entry *gtlbe;
1000 &vcpu_e500->gtlb_arch[tlbsel_of(index)][esel_of(index)]; 997 u64 pgmask;
1001 u64 pgmask = get_tlb_bytes(gtlbe) - 1; 998
999 gtlbe = get_entry(vcpu_e500, tlbsel_of(index), esel_of(index));
1000 pgmask = get_tlb_bytes(gtlbe) - 1;
1002 1001
1003 return get_tlb_raddr(gtlbe) | (eaddr & pgmask); 1002 return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
1004} 1003}
@@ -1012,12 +1011,12 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
1012{ 1011{
1013 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 1012 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
1014 struct tlbe_priv *priv; 1013 struct tlbe_priv *priv;
1015 struct tlbe *gtlbe, stlbe; 1014 struct kvm_book3e_206_tlb_entry *gtlbe, stlbe;
1016 int tlbsel = tlbsel_of(index); 1015 int tlbsel = tlbsel_of(index);
1017 int esel = esel_of(index); 1016 int esel = esel_of(index);
1018 int stlbsel, sesel; 1017 int stlbsel, sesel;
1019 1018
1020 gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel]; 1019 gtlbe = get_entry(vcpu_e500, tlbsel, esel);
1021 1020
1022 switch (tlbsel) { 1021 switch (tlbsel) {
1023 case 0: 1022 case 0:
@@ -1073,25 +1072,174 @@ void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
1073 1072
1074void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500) 1073void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
1075{ 1074{
1076 struct tlbe *tlbe; 1075 struct kvm_book3e_206_tlb_entry *tlbe;
1077 1076
1078 /* Insert large initial mapping for guest. */ 1077 /* Insert large initial mapping for guest. */
1079 tlbe = &vcpu_e500->gtlb_arch[1][0]; 1078 tlbe = get_entry(vcpu_e500, 1, 0);
1080 tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M); 1079 tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M);
1081 tlbe->mas2 = 0; 1080 tlbe->mas2 = 0;
1082 tlbe->mas3 = E500_TLB_SUPER_PERM_MASK; 1081 tlbe->mas7_3 = E500_TLB_SUPER_PERM_MASK;
1083 tlbe->mas7 = 0;
1084 1082
1085 /* 4K map for serial output. Used by kernel wrapper. */ 1083 /* 4K map for serial output. Used by kernel wrapper. */
1086 tlbe = &vcpu_e500->gtlb_arch[1][1]; 1084 tlbe = get_entry(vcpu_e500, 1, 1);
1087 tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K); 1085 tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K);
1088 tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G; 1086 tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G;
1089 tlbe->mas3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK; 1087 tlbe->mas7_3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK;
1090 tlbe->mas7 = 0; 1088}
1089
1090static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500)
1091{
1092 int i;
1093
1094 clear_tlb_refs(vcpu_e500);
1095 kfree(vcpu_e500->gtlb_priv[0]);
1096 kfree(vcpu_e500->gtlb_priv[1]);
1097
1098 if (vcpu_e500->shared_tlb_pages) {
1099 vfree((void *)(round_down((uintptr_t)vcpu_e500->gtlb_arch,
1100 PAGE_SIZE)));
1101
1102 for (i = 0; i < vcpu_e500->num_shared_tlb_pages; i++) {
1103 set_page_dirty_lock(vcpu_e500->shared_tlb_pages[i]);
1104 put_page(vcpu_e500->shared_tlb_pages[i]);
1105 }
1106
1107 vcpu_e500->num_shared_tlb_pages = 0;
1108 vcpu_e500->shared_tlb_pages = NULL;
1109 } else {
1110 kfree(vcpu_e500->gtlb_arch);
1111 }
1112
1113 vcpu_e500->gtlb_arch = NULL;
1114}
1115
1116int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
1117 struct kvm_config_tlb *cfg)
1118{
1119 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
1120 struct kvm_book3e_206_tlb_params params;
1121 char *virt;
1122 struct page **pages;
1123 struct tlbe_priv *privs[2] = {};
1124 size_t array_len;
1125 u32 sets;
1126 int num_pages, ret, i;
1127
1128 if (cfg->mmu_type != KVM_MMU_FSL_BOOKE_NOHV)
1129 return -EINVAL;
1130
1131 if (copy_from_user(&params, (void __user *)(uintptr_t)cfg->params,
1132 sizeof(params)))
1133 return -EFAULT;
1134
1135 if (params.tlb_sizes[1] > 64)
1136 return -EINVAL;
1137 if (params.tlb_ways[1] != params.tlb_sizes[1])
1138 return -EINVAL;
1139 if (params.tlb_sizes[2] != 0 || params.tlb_sizes[3] != 0)
1140 return -EINVAL;
1141 if (params.tlb_ways[2] != 0 || params.tlb_ways[3] != 0)
1142 return -EINVAL;
1143
1144 if (!is_power_of_2(params.tlb_ways[0]))
1145 return -EINVAL;
1146
1147 sets = params.tlb_sizes[0] >> ilog2(params.tlb_ways[0]);
1148 if (!is_power_of_2(sets))
1149 return -EINVAL;
1150
1151 array_len = params.tlb_sizes[0] + params.tlb_sizes[1];
1152 array_len *= sizeof(struct kvm_book3e_206_tlb_entry);
1153
1154 if (cfg->array_len < array_len)
1155 return -EINVAL;
1156
1157 num_pages = DIV_ROUND_UP(cfg->array + array_len - 1, PAGE_SIZE) -
1158 cfg->array / PAGE_SIZE;
1159 pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
1160 if (!pages)
1161 return -ENOMEM;
1162
1163 ret = get_user_pages_fast(cfg->array, num_pages, 1, pages);
1164 if (ret < 0)
1165 goto err_pages;
1166
1167 if (ret != num_pages) {
1168 num_pages = ret;
1169 ret = -EFAULT;
1170 goto err_put_page;
1171 }
1172
1173 virt = vmap(pages, num_pages, VM_MAP, PAGE_KERNEL);
1174 if (!virt)
1175 goto err_put_page;
1176
1177 privs[0] = kzalloc(sizeof(struct tlbe_priv) * params.tlb_sizes[0],
1178 GFP_KERNEL);
1179 privs[1] = kzalloc(sizeof(struct tlbe_priv) * params.tlb_sizes[1],
1180 GFP_KERNEL);
1181
1182 if (!privs[0] || !privs[1])
1183 goto err_put_page;
1184
1185 free_gtlb(vcpu_e500);
1186
1187 vcpu_e500->gtlb_priv[0] = privs[0];
1188 vcpu_e500->gtlb_priv[1] = privs[1];
1189
1190 vcpu_e500->gtlb_arch = (struct kvm_book3e_206_tlb_entry *)
1191 (virt + (cfg->array & (PAGE_SIZE - 1)));
1192
1193 vcpu_e500->gtlb_params[0].entries = params.tlb_sizes[0];
1194 vcpu_e500->gtlb_params[1].entries = params.tlb_sizes[1];
1195
1196 vcpu_e500->gtlb_offset[0] = 0;
1197 vcpu_e500->gtlb_offset[1] = params.tlb_sizes[0];
1198
1199 vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & ~0xfffUL;
1200 if (params.tlb_sizes[0] <= 2048)
1201 vcpu_e500->tlb0cfg |= params.tlb_sizes[0];
1202
1203 vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) & ~0xfffUL;
1204 vcpu_e500->tlb1cfg |= params.tlb_sizes[1];
1205
1206 vcpu_e500->shared_tlb_pages = pages;
1207 vcpu_e500->num_shared_tlb_pages = num_pages;
1208
1209 vcpu_e500->gtlb_params[0].ways = params.tlb_ways[0];
1210 vcpu_e500->gtlb_params[0].sets = sets;
1211
1212 vcpu_e500->gtlb_params[1].ways = params.tlb_sizes[1];
1213 vcpu_e500->gtlb_params[1].sets = 1;
1214
1215 return 0;
1216
1217err_put_page:
1218 kfree(privs[0]);
1219 kfree(privs[1]);
1220
1221 for (i = 0; i < num_pages; i++)
1222 put_page(pages[i]);
1223
1224err_pages:
1225 kfree(pages);
1226 return ret;
1227}
1228
1229int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
1230 struct kvm_dirty_tlb *dirty)
1231{
1232 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
1233
1234 clear_tlb_refs(vcpu_e500);
1235 return 0;
1091} 1236}
1092 1237
1093int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500) 1238int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
1094{ 1239{
1240 int entry_size = sizeof(struct kvm_book3e_206_tlb_entry);
1241 int entries = KVM_E500_TLB0_SIZE + KVM_E500_TLB1_SIZE;
1242
1095 host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY; 1243 host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY;
1096 host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; 1244 host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
1097 1245
@@ -1124,17 +1272,22 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
1124 host_tlb_params[0].entries / host_tlb_params[0].ways; 1272 host_tlb_params[0].entries / host_tlb_params[0].ways;
1125 host_tlb_params[1].sets = 1; 1273 host_tlb_params[1].sets = 1;
1126 1274
1127 vcpu_e500->gtlb_size[0] = KVM_E500_TLB0_SIZE; 1275 vcpu_e500->gtlb_params[0].entries = KVM_E500_TLB0_SIZE;
1128 vcpu_e500->gtlb_arch[0] = 1276 vcpu_e500->gtlb_params[1].entries = KVM_E500_TLB1_SIZE;
1129 kzalloc(sizeof(struct tlbe) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
1130 if (vcpu_e500->gtlb_arch[0] == NULL)
1131 goto err;
1132 1277
1133 vcpu_e500->gtlb_size[1] = KVM_E500_TLB1_SIZE; 1278 vcpu_e500->gtlb_params[0].ways = KVM_E500_TLB0_WAY_NUM;
1134 vcpu_e500->gtlb_arch[1] = 1279 vcpu_e500->gtlb_params[0].sets =
1135 kzalloc(sizeof(struct tlbe) * KVM_E500_TLB1_SIZE, GFP_KERNEL); 1280 KVM_E500_TLB0_SIZE / KVM_E500_TLB0_WAY_NUM;
1136 if (vcpu_e500->gtlb_arch[1] == NULL) 1281
1137 goto err; 1282 vcpu_e500->gtlb_params[1].ways = KVM_E500_TLB1_SIZE;
1283 vcpu_e500->gtlb_params[1].sets = 1;
1284
1285 vcpu_e500->gtlb_arch = kmalloc(entries * entry_size, GFP_KERNEL);
1286 if (!vcpu_e500->gtlb_arch)
1287 return -ENOMEM;
1288
1289 vcpu_e500->gtlb_offset[0] = 0;
1290 vcpu_e500->gtlb_offset[1] = KVM_E500_TLB0_SIZE;
1138 1291
1139 vcpu_e500->tlb_refs[0] = 1292 vcpu_e500->tlb_refs[0] =
1140 kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries, 1293 kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries,
@@ -1148,15 +1301,15 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
1148 if (!vcpu_e500->tlb_refs[1]) 1301 if (!vcpu_e500->tlb_refs[1])
1149 goto err; 1302 goto err;
1150 1303
1151 vcpu_e500->gtlb_priv[0] = 1304 vcpu_e500->gtlb_priv[0] = kzalloc(sizeof(struct tlbe_ref) *
1152 kzalloc(sizeof(struct tlbe_ref) * vcpu_e500->gtlb_size[0], 1305 vcpu_e500->gtlb_params[0].entries,
1153 GFP_KERNEL); 1306 GFP_KERNEL);
1154 if (!vcpu_e500->gtlb_priv[0]) 1307 if (!vcpu_e500->gtlb_priv[0])
1155 goto err; 1308 goto err;
1156 1309
1157 vcpu_e500->gtlb_priv[1] = 1310 vcpu_e500->gtlb_priv[1] = kzalloc(sizeof(struct tlbe_ref) *
1158 kzalloc(sizeof(struct tlbe_ref) * vcpu_e500->gtlb_size[1], 1311 vcpu_e500->gtlb_params[1].entries,
1159 GFP_KERNEL); 1312 GFP_KERNEL);
1160 if (!vcpu_e500->gtlb_priv[1]) 1313 if (!vcpu_e500->gtlb_priv[1])
1161 goto err; 1314 goto err;
1162 1315
@@ -1165,32 +1318,24 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
1165 1318
1166 /* Init TLB configuration register */ 1319 /* Init TLB configuration register */
1167 vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & ~0xfffUL; 1320 vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & ~0xfffUL;
1168 vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_size[0]; 1321 vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_params[0].entries;
1169 vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) & ~0xfffUL; 1322 vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) & ~0xfffUL;
1170 vcpu_e500->tlb1cfg |= vcpu_e500->gtlb_size[1]; 1323 vcpu_e500->tlb1cfg |= vcpu_e500->gtlb_params[1].entries;
1171 1324
1172 return 0; 1325 return 0;
1173 1326
1174err: 1327err:
1328 free_gtlb(vcpu_e500);
1175 kfree(vcpu_e500->tlb_refs[0]); 1329 kfree(vcpu_e500->tlb_refs[0]);
1176 kfree(vcpu_e500->tlb_refs[1]); 1330 kfree(vcpu_e500->tlb_refs[1]);
1177 kfree(vcpu_e500->gtlb_priv[0]);
1178 kfree(vcpu_e500->gtlb_priv[1]);
1179 kfree(vcpu_e500->gtlb_arch[0]);
1180 kfree(vcpu_e500->gtlb_arch[1]);
1181 return -1; 1331 return -1;
1182} 1332}
1183 1333
1184void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) 1334void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
1185{ 1335{
1186 clear_tlb_refs(vcpu_e500); 1336 free_gtlb(vcpu_e500);
1187
1188 kvmppc_e500_id_table_free(vcpu_e500); 1337 kvmppc_e500_id_table_free(vcpu_e500);
1189 1338
1190 kfree(vcpu_e500->tlb_refs[0]); 1339 kfree(vcpu_e500->tlb_refs[0]);
1191 kfree(vcpu_e500->tlb_refs[1]); 1340 kfree(vcpu_e500->tlb_refs[1]);
1192 kfree(vcpu_e500->gtlb_priv[0]);
1193 kfree(vcpu_e500->gtlb_priv[1]);
1194 kfree(vcpu_e500->gtlb_arch[1]);
1195 kfree(vcpu_e500->gtlb_arch[0]);
1196} 1341}
diff --git a/arch/powerpc/kvm/e500_tlb.h b/arch/powerpc/kvm/e500_tlb.h
index b587f691459..2c296407e75 100644
--- a/arch/powerpc/kvm/e500_tlb.h
+++ b/arch/powerpc/kvm/e500_tlb.h
@@ -20,13 +20,9 @@
20#include <asm/tlb.h> 20#include <asm/tlb.h>
21#include <asm/kvm_e500.h> 21#include <asm/kvm_e500.h>
22 22
23#define KVM_E500_TLB0_WAY_SIZE_BIT 7 /* Fixed */ 23/* This geometry is the legacy default -- can be overridden by userspace */
24#define KVM_E500_TLB0_WAY_SIZE (1UL << KVM_E500_TLB0_WAY_SIZE_BIT) 24#define KVM_E500_TLB0_WAY_SIZE 128
25#define KVM_E500_TLB0_WAY_SIZE_MASK (KVM_E500_TLB0_WAY_SIZE - 1) 25#define KVM_E500_TLB0_WAY_NUM 2
26
27#define KVM_E500_TLB0_WAY_NUM_BIT 1 /* No greater than 7 */
28#define KVM_E500_TLB0_WAY_NUM (1UL << KVM_E500_TLB0_WAY_NUM_BIT)
29#define KVM_E500_TLB0_WAY_NUM_MASK (KVM_E500_TLB0_WAY_NUM - 1)
30 26
31#define KVM_E500_TLB0_SIZE (KVM_E500_TLB0_WAY_SIZE * KVM_E500_TLB0_WAY_NUM) 27#define KVM_E500_TLB0_SIZE (KVM_E500_TLB0_WAY_SIZE * KVM_E500_TLB0_WAY_NUM)
32#define KVM_E500_TLB1_SIZE 16 28#define KVM_E500_TLB1_SIZE 16
@@ -58,50 +54,54 @@ extern void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *);
58extern void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *); 54extern void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *);
59 55
60/* TLB helper functions */ 56/* TLB helper functions */
61static inline unsigned int get_tlb_size(const struct tlbe *tlbe) 57static inline unsigned int
58get_tlb_size(const struct kvm_book3e_206_tlb_entry *tlbe)
62{ 59{
63 return (tlbe->mas1 >> 7) & 0x1f; 60 return (tlbe->mas1 >> 7) & 0x1f;
64} 61}
65 62
66static inline gva_t get_tlb_eaddr(const struct tlbe *tlbe) 63static inline gva_t get_tlb_eaddr(const struct kvm_book3e_206_tlb_entry *tlbe)
67{ 64{
68 return tlbe->mas2 & 0xfffff000; 65 return tlbe->mas2 & 0xfffff000;
69} 66}
70 67
71static inline u64 get_tlb_bytes(const struct tlbe *tlbe) 68static inline u64 get_tlb_bytes(const struct kvm_book3e_206_tlb_entry *tlbe)
72{ 69{
73 unsigned int pgsize = get_tlb_size(tlbe); 70 unsigned int pgsize = get_tlb_size(tlbe);
74 return 1ULL << 10 << pgsize; 71 return 1ULL << 10 << pgsize;
75} 72}
76 73
77static inline gva_t get_tlb_end(const struct tlbe *tlbe) 74static inline gva_t get_tlb_end(const struct kvm_book3e_206_tlb_entry *tlbe)
78{ 75{
79 u64 bytes = get_tlb_bytes(tlbe); 76 u64 bytes = get_tlb_bytes(tlbe);
80 return get_tlb_eaddr(tlbe) + bytes - 1; 77 return get_tlb_eaddr(tlbe) + bytes - 1;
81} 78}
82 79
83static inline u64 get_tlb_raddr(const struct tlbe *tlbe) 80static inline u64 get_tlb_raddr(const struct kvm_book3e_206_tlb_entry *tlbe)
84{ 81{
85 u64 rpn = tlbe->mas7; 82 return tlbe->mas7_3 & ~0xfffULL;
86 return (rpn << 32) | (tlbe->mas3 & 0xfffff000);
87} 83}
88 84
89static inline unsigned int get_tlb_tid(const struct tlbe *tlbe) 85static inline unsigned int
86get_tlb_tid(const struct kvm_book3e_206_tlb_entry *tlbe)
90{ 87{
91 return (tlbe->mas1 >> 16) & 0xff; 88 return (tlbe->mas1 >> 16) & 0xff;
92} 89}
93 90
94static inline unsigned int get_tlb_ts(const struct tlbe *tlbe) 91static inline unsigned int
92get_tlb_ts(const struct kvm_book3e_206_tlb_entry *tlbe)
95{ 93{
96 return (tlbe->mas1 >> 12) & 0x1; 94 return (tlbe->mas1 >> 12) & 0x1;
97} 95}
98 96
99static inline unsigned int get_tlb_v(const struct tlbe *tlbe) 97static inline unsigned int
98get_tlb_v(const struct kvm_book3e_206_tlb_entry *tlbe)
100{ 99{
101 return (tlbe->mas1 >> 31) & 0x1; 100 return (tlbe->mas1 >> 31) & 0x1;
102} 101}
103 102
104static inline unsigned int get_tlb_iprot(const struct tlbe *tlbe) 103static inline unsigned int
104get_tlb_iprot(const struct kvm_book3e_206_tlb_entry *tlbe)
105{ 105{
106 return (tlbe->mas1 >> 30) & 0x1; 106 return (tlbe->mas1 >> 30) & 0x1;
107} 107}
@@ -156,7 +156,7 @@ static inline unsigned int get_tlb_esel_bit(
156} 156}
157 157
158static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu, 158static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
159 const struct tlbe *tlbe) 159 const struct kvm_book3e_206_tlb_entry *tlbe)
160{ 160{
161 gpa_t gpa; 161 gpa_t gpa;
162 162
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index a5671616af8..3cf6fba513a 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -222,6 +222,9 @@ int kvm_dev_ioctl_check_extension(long ext)
222 case KVM_CAP_PPC_PAIRED_SINGLES: 222 case KVM_CAP_PPC_PAIRED_SINGLES:
223 case KVM_CAP_PPC_OSI: 223 case KVM_CAP_PPC_OSI:
224 case KVM_CAP_PPC_GET_PVINFO: 224 case KVM_CAP_PPC_GET_PVINFO:
225#ifdef CONFIG_KVM_E500
226 case KVM_CAP_SW_TLB:
227#endif
225 r = 1; 228 r = 1;
226 break; 229 break;
227 case KVM_CAP_COALESCED_MMIO: 230 case KVM_CAP_COALESCED_MMIO:
@@ -602,6 +605,19 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
602 r = 0; 605 r = 0;
603 vcpu->arch.papr_enabled = true; 606 vcpu->arch.papr_enabled = true;
604 break; 607 break;
608#ifdef CONFIG_KVM_E500
609 case KVM_CAP_SW_TLB: {
610 struct kvm_config_tlb cfg;
611 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
612
613 r = -EFAULT;
614 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
615 break;
616
617 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
618 break;
619 }
620#endif
605 default: 621 default:
606 r = -EINVAL; 622 r = -EINVAL;
607 break; 623 break;
@@ -651,6 +667,18 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
651 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); 667 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
652 break; 668 break;
653 } 669 }
670
671#ifdef CONFIG_KVM_E500
672 case KVM_DIRTY_TLB: {
673 struct kvm_dirty_tlb dirty;
674 r = -EFAULT;
675 if (copy_from_user(&dirty, argp, sizeof(dirty)))
676 goto out;
677 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
678 break;
679 }
680#endif
681
654 default: 682 default:
655 r = -EINVAL; 683 r = -EINVAL;
656 } 684 }
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 245bcb3a0fc..fa029ced4bd 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -581,6 +581,7 @@ struct kvm_ppc_pvinfo {
581#define KVM_CAP_PPC_RMA 65 581#define KVM_CAP_PPC_RMA 65
582#define KVM_CAP_MAX_VCPUS 66 /* returns max vcpus per vm */ 582#define KVM_CAP_MAX_VCPUS 66 /* returns max vcpus per vm */
583#define KVM_CAP_PPC_PAPR 68 583#define KVM_CAP_PPC_PAPR 68
584#define KVM_CAP_SW_TLB 69
584#define KVM_CAP_S390_GMAP 71 585#define KVM_CAP_S390_GMAP 71
585#define KVM_CAP_TSC_DEADLINE_TIMER 72 586#define KVM_CAP_TSC_DEADLINE_TIMER 72
586#define KVM_CAP_S390_UCONTROL 73 587#define KVM_CAP_S390_UCONTROL 73
@@ -664,6 +665,21 @@ struct kvm_clock_data {
664 __u32 pad[9]; 665 __u32 pad[9];
665}; 666};
666 667
668#define KVM_MMU_FSL_BOOKE_NOHV 0
669#define KVM_MMU_FSL_BOOKE_HV 1
670
671struct kvm_config_tlb {
672 __u64 params;
673 __u64 array;
674 __u32 mmu_type;
675 __u32 array_len;
676};
677
678struct kvm_dirty_tlb {
679 __u64 bitmap;
680 __u32 num_dirty;
681};
682
667/* 683/*
668 * ioctls for VM fds 684 * ioctls for VM fds
669 */ 685 */
@@ -801,6 +817,8 @@ struct kvm_s390_ucas_mapping {
801#define KVM_CREATE_SPAPR_TCE _IOW(KVMIO, 0xa8, struct kvm_create_spapr_tce) 817#define KVM_CREATE_SPAPR_TCE _IOW(KVMIO, 0xa8, struct kvm_create_spapr_tce)
802/* Available with KVM_CAP_RMA */ 818/* Available with KVM_CAP_RMA */
803#define KVM_ALLOCATE_RMA _IOR(KVMIO, 0xa9, struct kvm_allocate_rma) 819#define KVM_ALLOCATE_RMA _IOR(KVMIO, 0xa9, struct kvm_allocate_rma)
820/* Available with KVM_CAP_SW_TLB */
821#define KVM_DIRTY_TLB _IOW(KVMIO, 0xaa, struct kvm_dirty_tlb)
804 822
805#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) 823#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
806 824