diff options
author | Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | 2012-09-09 22:52:50 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2012-09-17 02:31:49 -0400 |
commit | 5524a27d39b68770f203d8d42eb5a95dde4933bc (patch) | |
tree | a238d67d29bce6fd893cd4c5545b5daf058ba5fd /arch/powerpc/include | |
parent | dcda287a9b26309ae43a091d0ecde16f8f61b4c0 (diff) |
powerpc/mm: Convert virtual address to vpn
This patch convert different functions to take virtual page number
instead of virtual address. Virtual page number is virtual address
shifted right by VPN_SHIFT (12) bits. This enable us to have an
address range of upto 76 bits.
Reviewed-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r-- | arch/powerpc/include/asm/kvm_book3s.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/machdep.h | 6 | ||||
-rw-r--r-- | arch/powerpc/include/asm/mmu-hash64.h | 78 | ||||
-rw-r--r-- | arch/powerpc/include/asm/pte-hash64-64k.h | 18 | ||||
-rw-r--r-- | arch/powerpc/include/asm/tlbflush.h | 4 |
5 files changed, 77 insertions, 31 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index f0e0c6a66d97..7aefdb3e1ce4 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h | |||
@@ -59,7 +59,7 @@ struct hpte_cache { | |||
59 | struct hlist_node list_vpte; | 59 | struct hlist_node list_vpte; |
60 | struct hlist_node list_vpte_long; | 60 | struct hlist_node list_vpte_long; |
61 | struct rcu_head rcu_head; | 61 | struct rcu_head rcu_head; |
62 | u64 host_va; | 62 | u64 host_vpn; |
63 | u64 pfn; | 63 | u64 pfn; |
64 | ulong slot; | 64 | ulong slot; |
65 | struct kvmppc_pte pte; | 65 | struct kvmppc_pte pte; |
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 8111e1b78f7f..c4231973edd3 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h | |||
@@ -34,19 +34,19 @@ struct machdep_calls { | |||
34 | char *name; | 34 | char *name; |
35 | #ifdef CONFIG_PPC64 | 35 | #ifdef CONFIG_PPC64 |
36 | void (*hpte_invalidate)(unsigned long slot, | 36 | void (*hpte_invalidate)(unsigned long slot, |
37 | unsigned long va, | 37 | unsigned long vpn, |
38 | int psize, int ssize, | 38 | int psize, int ssize, |
39 | int local); | 39 | int local); |
40 | long (*hpte_updatepp)(unsigned long slot, | 40 | long (*hpte_updatepp)(unsigned long slot, |
41 | unsigned long newpp, | 41 | unsigned long newpp, |
42 | unsigned long va, | 42 | unsigned long vpn, |
43 | int psize, int ssize, | 43 | int psize, int ssize, |
44 | int local); | 44 | int local); |
45 | void (*hpte_updateboltedpp)(unsigned long newpp, | 45 | void (*hpte_updateboltedpp)(unsigned long newpp, |
46 | unsigned long ea, | 46 | unsigned long ea, |
47 | int psize, int ssize); | 47 | int psize, int ssize); |
48 | long (*hpte_insert)(unsigned long hpte_group, | 48 | long (*hpte_insert)(unsigned long hpte_group, |
49 | unsigned long va, | 49 | unsigned long vpn, |
50 | unsigned long prpn, | 50 | unsigned long prpn, |
51 | unsigned long rflags, | 51 | unsigned long rflags, |
52 | unsigned long vflags, | 52 | unsigned long vflags, |
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index 1c65a59881ea..6aeb4986a373 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h | |||
@@ -154,9 +154,25 @@ struct mmu_psize_def | |||
154 | #define MMU_SEGSIZE_256M 0 | 154 | #define MMU_SEGSIZE_256M 0 |
155 | #define MMU_SEGSIZE_1T 1 | 155 | #define MMU_SEGSIZE_1T 1 |
156 | 156 | ||
157 | /* | ||
158 | * encode page number shift. | ||
159 | * in order to fit the 78 bit va in a 64 bit variable we shift the va by | ||
160 | * 12 bits. This enable us to address upto 76 bit va. | ||
161 | * For hpt hash from a va we can ignore the page size bits of va and for | ||
162 | * hpte encoding we ignore up to 23 bits of va. So ignoring lower 12 bits ensure | ||
163 | * we work in all cases including 4k page size. | ||
164 | */ | ||
165 | #define VPN_SHIFT 12 | ||
157 | 166 | ||
158 | #ifndef __ASSEMBLY__ | 167 | #ifndef __ASSEMBLY__ |
159 | 168 | ||
169 | static inline int segment_shift(int ssize) | ||
170 | { | ||
171 | if (ssize == MMU_SEGSIZE_256M) | ||
172 | return SID_SHIFT; | ||
173 | return SID_SHIFT_1T; | ||
174 | } | ||
175 | |||
160 | /* | 176 | /* |
161 | * The current system page and segment sizes | 177 | * The current system page and segment sizes |
162 | */ | 178 | */ |
@@ -180,18 +196,39 @@ extern unsigned long tce_alloc_start, tce_alloc_end; | |||
180 | extern int mmu_ci_restrictions; | 196 | extern int mmu_ci_restrictions; |
181 | 197 | ||
182 | /* | 198 | /* |
199 | * This computes the AVPN and B fields of the first dword of a HPTE, | ||
200 | * for use when we want to match an existing PTE. The bottom 7 bits | ||
201 | * of the returned value are zero. | ||
202 | */ | ||
203 | static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize, | ||
204 | int ssize) | ||
205 | { | ||
206 | unsigned long v; | ||
207 | /* | ||
208 | * The AVA field omits the low-order 23 bits of the 78 bits VA. | ||
209 | * These bits are not needed in the PTE, because the | ||
210 | * low-order b of these bits are part of the byte offset | ||
211 | * into the virtual page and, if b < 23, the high-order | ||
212 | * 23-b of these bits are always used in selecting the | ||
213 | * PTEGs to be searched | ||
214 | */ | ||
215 | v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm); | ||
216 | v <<= HPTE_V_AVPN_SHIFT; | ||
217 | v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT; | ||
218 | return v; | ||
219 | } | ||
220 | |||
221 | /* | ||
183 | * This function sets the AVPN and L fields of the HPTE appropriately | 222 | * This function sets the AVPN and L fields of the HPTE appropriately |
184 | * for the page size | 223 | * for the page size |
185 | */ | 224 | */ |
186 | static inline unsigned long hpte_encode_v(unsigned long va, int psize, | 225 | static inline unsigned long hpte_encode_v(unsigned long vpn, |
187 | int ssize) | 226 | int psize, int ssize) |
188 | { | 227 | { |
189 | unsigned long v; | 228 | unsigned long v; |
190 | v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm); | 229 | v = hpte_encode_avpn(vpn, psize, ssize); |
191 | v <<= HPTE_V_AVPN_SHIFT; | ||
192 | if (psize != MMU_PAGE_4K) | 230 | if (psize != MMU_PAGE_4K) |
193 | v |= HPTE_V_LARGE; | 231 | v |= HPTE_V_LARGE; |
194 | v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT; | ||
195 | return v; | 232 | return v; |
196 | } | 233 | } |
197 | 234 | ||
@@ -216,30 +253,37 @@ static inline unsigned long hpte_encode_r(unsigned long pa, int psize) | |||
216 | } | 253 | } |
217 | 254 | ||
218 | /* | 255 | /* |
219 | * Build a VA given VSID, EA and segment size | 256 | * Build a VPN_SHIFT bit shifted va given VSID, EA and segment size. |
220 | */ | 257 | */ |
221 | static inline unsigned long hpt_va(unsigned long ea, unsigned long vsid, | 258 | static inline unsigned long hpt_vpn(unsigned long ea, |
222 | int ssize) | 259 | unsigned long vsid, int ssize) |
223 | { | 260 | { |
224 | if (ssize == MMU_SEGSIZE_256M) | 261 | unsigned long mask; |
225 | return (vsid << 28) | (ea & 0xfffffffUL); | 262 | int s_shift = segment_shift(ssize); |
226 | return (vsid << 40) | (ea & 0xffffffffffUL); | 263 | |
264 | mask = (1ul << (s_shift - VPN_SHIFT)) - 1; | ||
265 | return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask); | ||
227 | } | 266 | } |
228 | 267 | ||
229 | /* | 268 | /* |
230 | * This hashes a virtual address | 269 | * This hashes a virtual address |
231 | */ | 270 | */ |
232 | 271 | static inline unsigned long hpt_hash(unsigned long vpn, | |
233 | static inline unsigned long hpt_hash(unsigned long va, unsigned int shift, | 272 | unsigned int shift, int ssize) |
234 | int ssize) | ||
235 | { | 273 | { |
274 | int mask; | ||
236 | unsigned long hash, vsid; | 275 | unsigned long hash, vsid; |
237 | 276 | ||
277 | /* VPN_SHIFT can be atmost 12 */ | ||
238 | if (ssize == MMU_SEGSIZE_256M) { | 278 | if (ssize == MMU_SEGSIZE_256M) { |
239 | hash = (va >> 28) ^ ((va & 0x0fffffffUL) >> shift); | 279 | mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1; |
280 | hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^ | ||
281 | ((vpn & mask) >> (shift - VPN_SHIFT)); | ||
240 | } else { | 282 | } else { |
241 | vsid = va >> 40; | 283 | mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1; |
242 | hash = vsid ^ (vsid << 25) ^ ((va & 0xffffffffffUL) >> shift); | 284 | vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT); |
285 | hash = vsid ^ (vsid << 25) ^ | ||
286 | ((vpn & mask) >> (shift - VPN_SHIFT)) ; | ||
243 | } | 287 | } |
244 | return hash & 0x7fffffffffUL; | 288 | return hash & 0x7fffffffffUL; |
245 | } | 289 | } |
diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h index 59247e816ac5..eedf427c9124 100644 --- a/arch/powerpc/include/asm/pte-hash64-64k.h +++ b/arch/powerpc/include/asm/pte-hash64-64k.h | |||
@@ -58,14 +58,16 @@ | |||
58 | /* Trick: we set __end to va + 64k, which happens works for | 58 | /* Trick: we set __end to va + 64k, which happens works for |
59 | * a 16M page as well as we want only one iteration | 59 | * a 16M page as well as we want only one iteration |
60 | */ | 60 | */ |
61 | #define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \ | 61 | #define pte_iterate_hashed_subpages(rpte, psize, vpn, index, shift) \ |
62 | do { \ | 62 | do { \ |
63 | unsigned long __end = va + PAGE_SIZE; \ | 63 | unsigned long __end = vpn + (1UL << (PAGE_SHIFT - VPN_SHIFT)); \ |
64 | unsigned __split = (psize == MMU_PAGE_4K || \ | 64 | unsigned __split = (psize == MMU_PAGE_4K || \ |
65 | psize == MMU_PAGE_64K_AP); \ | 65 | psize == MMU_PAGE_64K_AP); \ |
66 | shift = mmu_psize_defs[psize].shift; \ | 66 | shift = mmu_psize_defs[psize].shift; \ |
67 | for (index = 0; va < __end; index++, va += (1L << shift)) { \ | 67 | for (index = 0; vpn < __end; index++, \ |
68 | if (!__split || __rpte_sub_valid(rpte, index)) do { \ | 68 | vpn += (1L << (shift - VPN_SHIFT))) { \ |
69 | if (!__split || __rpte_sub_valid(rpte, index)) \ | ||
70 | do { | ||
69 | 71 | ||
70 | #define pte_iterate_hashed_end() } while(0); } } while(0) | 72 | #define pte_iterate_hashed_end() } while(0); } } while(0) |
71 | 73 | ||
diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h index 81143fcbd113..fc02d1dee955 100644 --- a/arch/powerpc/include/asm/tlbflush.h +++ b/arch/powerpc/include/asm/tlbflush.h | |||
@@ -95,7 +95,7 @@ struct ppc64_tlb_batch { | |||
95 | unsigned long index; | 95 | unsigned long index; |
96 | struct mm_struct *mm; | 96 | struct mm_struct *mm; |
97 | real_pte_t pte[PPC64_TLB_BATCH_NR]; | 97 | real_pte_t pte[PPC64_TLB_BATCH_NR]; |
98 | unsigned long vaddr[PPC64_TLB_BATCH_NR]; | 98 | unsigned long vpn[PPC64_TLB_BATCH_NR]; |
99 | unsigned int psize; | 99 | unsigned int psize; |
100 | int ssize; | 100 | int ssize; |
101 | }; | 101 | }; |
@@ -127,7 +127,7 @@ static inline void arch_leave_lazy_mmu_mode(void) | |||
127 | #define arch_flush_lazy_mmu_mode() do {} while (0) | 127 | #define arch_flush_lazy_mmu_mode() do {} while (0) |
128 | 128 | ||
129 | 129 | ||
130 | extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize, | 130 | extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, |
131 | int ssize, int local); | 131 | int ssize, int local); |
132 | extern void flush_hash_range(unsigned long number, int local); | 132 | extern void flush_hash_range(unsigned long number, int local); |
133 | 133 | ||