aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/hash_utils_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/hash_utils_64.c')
-rw-r--r--arch/powerpc/mm/hash_utils_64.c532
1 files changed, 415 insertions, 117 deletions
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 6e9e05cce02c..b2f3dbca6952 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -19,6 +19,7 @@
19 */ 19 */
20 20
21#undef DEBUG 21#undef DEBUG
22#undef DEBUG_LOW
22 23
23#include <linux/config.h> 24#include <linux/config.h>
24#include <linux/spinlock.h> 25#include <linux/spinlock.h>
@@ -59,6 +60,15 @@
59#define DBG(fmt...) 60#define DBG(fmt...)
60#endif 61#endif
61 62
63#ifdef DEBUG_LOW
64#define DBG_LOW(fmt...) udbg_printf(fmt)
65#else
66#define DBG_LOW(fmt...)
67#endif
68
69#define KB (1024)
70#define MB (1024*KB)
71
62/* 72/*
63 * Note: pte --> Linux PTE 73 * Note: pte --> Linux PTE
64 * HPTE --> PowerPC Hashed Page Table Entry 74 * HPTE --> PowerPC Hashed Page Table Entry
@@ -77,91 +87,290 @@ extern unsigned long dart_tablebase;
77 87
78hpte_t *htab_address; 88hpte_t *htab_address;
79unsigned long htab_hash_mask; 89unsigned long htab_hash_mask;
80
81unsigned long _SDR1; 90unsigned long _SDR1;
91struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
92int mmu_linear_psize = MMU_PAGE_4K;
93int mmu_virtual_psize = MMU_PAGE_4K;
94#ifdef CONFIG_HUGETLB_PAGE
95int mmu_huge_psize = MMU_PAGE_16M;
96unsigned int HPAGE_SHIFT;
97#endif
82 98
83#define KB (1024) 99/* There are definitions of page sizes arrays to be used when none
84#define MB (1024*KB) 100 * is provided by the firmware.
85 101 */
86static inline void loop_forever(void)
87{
88 volatile unsigned long x = 1;
89 for(;x;x|=1)
90 ;
91}
92 102
93static inline void create_pte_mapping(unsigned long start, unsigned long end, 103/* Pre-POWER4 CPUs (4k pages only)
94 unsigned long mode, int large) 104 */
105struct mmu_psize_def mmu_psize_defaults_old[] = {
106 [MMU_PAGE_4K] = {
107 .shift = 12,
108 .sllp = 0,
109 .penc = 0,
110 .avpnm = 0,
111 .tlbiel = 0,
112 },
113};
114
115/* POWER4, GPUL, POWER5
116 *
117 * Support for 16Mb large pages
118 */
119struct mmu_psize_def mmu_psize_defaults_gp[] = {
120 [MMU_PAGE_4K] = {
121 .shift = 12,
122 .sllp = 0,
123 .penc = 0,
124 .avpnm = 0,
125 .tlbiel = 1,
126 },
127 [MMU_PAGE_16M] = {
128 .shift = 24,
129 .sllp = SLB_VSID_L,
130 .penc = 0,
131 .avpnm = 0x1UL,
132 .tlbiel = 0,
133 },
134};
135
136
137int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
138 unsigned long pstart, unsigned long mode, int psize)
95{ 139{
96 unsigned long addr; 140 unsigned long vaddr, paddr;
97 unsigned int step; 141 unsigned int step, shift;
98 unsigned long tmp_mode; 142 unsigned long tmp_mode;
99 unsigned long vflags; 143 int ret = 0;
100 144
101 if (large) { 145 shift = mmu_psize_defs[psize].shift;
102 step = 16*MB; 146 step = 1 << shift;
103 vflags = HPTE_V_BOLTED | HPTE_V_LARGE;
104 } else {
105 step = 4*KB;
106 vflags = HPTE_V_BOLTED;
107 }
108 147
109 for (addr = start; addr < end; addr += step) { 148 for (vaddr = vstart, paddr = pstart; vaddr < vend;
149 vaddr += step, paddr += step) {
110 unsigned long vpn, hash, hpteg; 150 unsigned long vpn, hash, hpteg;
111 unsigned long vsid = get_kernel_vsid(addr); 151 unsigned long vsid = get_kernel_vsid(vaddr);
112 unsigned long va = (vsid << 28) | (addr & 0xfffffff); 152 unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff);
113 int ret = -1;
114
115 if (large)
116 vpn = va >> HPAGE_SHIFT;
117 else
118 vpn = va >> PAGE_SHIFT;
119
120 153
154 vpn = va >> shift;
121 tmp_mode = mode; 155 tmp_mode = mode;
122 156
123 /* Make non-kernel text non-executable */ 157 /* Make non-kernel text non-executable */
124 if (!in_kernel_text(addr)) 158 if (!in_kernel_text(vaddr))
125 tmp_mode = mode | HW_NO_EXEC; 159 tmp_mode = mode | HPTE_R_N;
126
127 hash = hpt_hash(vpn, large);
128 160
161 hash = hpt_hash(va, shift);
129 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 162 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
130 163
164 /* The crap below can be cleaned once ppd_md.probe() can
165 * set up the hash callbacks, thus we can just used the
166 * normal insert callback here.
167 */
131#ifdef CONFIG_PPC_ISERIES 168#ifdef CONFIG_PPC_ISERIES
132 if (systemcfg->platform & PLATFORM_ISERIES_LPAR) 169 if (systemcfg->platform == PLATFORM_ISERIES_LPAR)
133 ret = iSeries_hpte_bolt_or_insert(hpteg, va, 170 ret = iSeries_hpte_insert(hpteg, va,
134 virt_to_abs(addr) >> PAGE_SHIFT, 171 virt_to_abs(paddr),
135 vflags, tmp_mode); 172 tmp_mode,
173 HPTE_V_BOLTED,
174 psize);
136 else 175 else
137#endif 176#endif
138#ifdef CONFIG_PPC_PSERIES 177#ifdef CONFIG_PPC_PSERIES
139 if (systemcfg->platform & PLATFORM_LPAR) 178 if (systemcfg->platform & PLATFORM_LPAR)
140 ret = pSeries_lpar_hpte_insert(hpteg, va, 179 ret = pSeries_lpar_hpte_insert(hpteg, va,
141 virt_to_abs(addr) >> PAGE_SHIFT, 180 virt_to_abs(paddr),
142 vflags, tmp_mode); 181 tmp_mode,
182 HPTE_V_BOLTED,
183 psize);
143 else 184 else
144#endif 185#endif
145#ifdef CONFIG_PPC_MULTIPLATFORM 186#ifdef CONFIG_PPC_MULTIPLATFORM
146 ret = native_hpte_insert(hpteg, va, 187 ret = native_hpte_insert(hpteg, va,
147 virt_to_abs(addr) >> PAGE_SHIFT, 188 virt_to_abs(paddr),
148 vflags, tmp_mode); 189 tmp_mode, HPTE_V_BOLTED,
190 psize);
149#endif 191#endif
192 if (ret < 0)
193 break;
194 }
195 return ret < 0 ? ret : 0;
196}
150 197
151 if (ret == -1) { 198static int __init htab_dt_scan_page_sizes(unsigned long node,
152 ppc64_terminate_msg(0x20, "create_pte_mapping"); 199 const char *uname, int depth,
153 loop_forever(); 200 void *data)
201{
202 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
203 u32 *prop;
204 unsigned long size = 0;
205
206 /* We are scanning "cpu" nodes only */
207 if (type == NULL || strcmp(type, "cpu") != 0)
208 return 0;
209
210 prop = (u32 *)of_get_flat_dt_prop(node,
211 "ibm,segment-page-sizes", &size);
212 if (prop != NULL) {
213 DBG("Page sizes from device-tree:\n");
214 size /= 4;
215 cur_cpu_spec->cpu_features &= ~(CPU_FTR_16M_PAGE);
216 while(size > 0) {
217 unsigned int shift = prop[0];
218 unsigned int slbenc = prop[1];
219 unsigned int lpnum = prop[2];
220 unsigned int lpenc = 0;
221 struct mmu_psize_def *def;
222 int idx = -1;
223
224 size -= 3; prop += 3;
225 while(size > 0 && lpnum) {
226 if (prop[0] == shift)
227 lpenc = prop[1];
228 prop += 2; size -= 2;
229 lpnum--;
230 }
231 switch(shift) {
232 case 0xc:
233 idx = MMU_PAGE_4K;
234 break;
235 case 0x10:
236 idx = MMU_PAGE_64K;
237 break;
238 case 0x14:
239 idx = MMU_PAGE_1M;
240 break;
241 case 0x18:
242 idx = MMU_PAGE_16M;
243 cur_cpu_spec->cpu_features |= CPU_FTR_16M_PAGE;
244 break;
245 case 0x22:
246 idx = MMU_PAGE_16G;
247 break;
248 }
249 if (idx < 0)
250 continue;
251 def = &mmu_psize_defs[idx];
252 def->shift = shift;
253 if (shift <= 23)
254 def->avpnm = 0;
255 else
256 def->avpnm = (1 << (shift - 23)) - 1;
257 def->sllp = slbenc;
258 def->penc = lpenc;
259 /* We don't know for sure what's up with tlbiel, so
260 * for now we only set it for 4K and 64K pages
261 */
262 if (idx == MMU_PAGE_4K || idx == MMU_PAGE_64K)
263 def->tlbiel = 1;
264 else
265 def->tlbiel = 0;
266
267 DBG(" %d: shift=%02x, sllp=%04x, avpnm=%08x, "
268 "tlbiel=%d, penc=%d\n",
269 idx, shift, def->sllp, def->avpnm, def->tlbiel,
270 def->penc);
154 } 271 }
272 return 1;
273 }
274 return 0;
275}
276
277
278static void __init htab_init_page_sizes(void)
279{
280 int rc;
281
282 /* Default to 4K pages only */
283 memcpy(mmu_psize_defs, mmu_psize_defaults_old,
284 sizeof(mmu_psize_defaults_old));
285
286 /*
287 * Try to find the available page sizes in the device-tree
288 */
289 rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL);
290 if (rc != 0) /* Found */
291 goto found;
292
293 /*
294 * Not in the device-tree, let's fallback on known size
295 * list for 16M capable GP & GR
296 */
297 if ((systemcfg->platform != PLATFORM_ISERIES_LPAR) &&
298 cpu_has_feature(CPU_FTR_16M_PAGE))
299 memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
300 sizeof(mmu_psize_defaults_gp));
301 found:
302 /*
303 * Pick a size for the linear mapping. Currently, we only support
304 * 16M, 1M and 4K which is the default
305 */
306 if (mmu_psize_defs[MMU_PAGE_16M].shift)
307 mmu_linear_psize = MMU_PAGE_16M;
308 else if (mmu_psize_defs[MMU_PAGE_1M].shift)
309 mmu_linear_psize = MMU_PAGE_1M;
310
311 /*
312 * Pick a size for the ordinary pages. Default is 4K, we support
313 * 64K if cache inhibited large pages are supported by the
314 * processor
315 */
316#ifdef CONFIG_PPC_64K_PAGES
317 if (mmu_psize_defs[MMU_PAGE_64K].shift &&
318 cpu_has_feature(CPU_FTR_CI_LARGE_PAGE))
319 mmu_virtual_psize = MMU_PAGE_64K;
320#endif
321
322 printk(KERN_INFO "Page orders: linear mapping = %d, others = %d\n",
323 mmu_psize_defs[mmu_linear_psize].shift,
324 mmu_psize_defs[mmu_virtual_psize].shift);
325
326#ifdef CONFIG_HUGETLB_PAGE
327 /* Init large page size. Currently, we pick 16M or 1M depending
328 * on what is available
329 */
330 if (mmu_psize_defs[MMU_PAGE_16M].shift)
331 mmu_huge_psize = MMU_PAGE_16M;
332 else if (mmu_psize_defs[MMU_PAGE_1M].shift)
333 mmu_huge_psize = MMU_PAGE_1M;
334
335 /* Calculate HPAGE_SHIFT and sanity check it */
336 if (mmu_psize_defs[mmu_huge_psize].shift > 16 &&
337 mmu_psize_defs[mmu_huge_psize].shift < 28)
338 HPAGE_SHIFT = mmu_psize_defs[mmu_huge_psize].shift;
339 else
340 HPAGE_SHIFT = 0; /* No huge pages dude ! */
341#endif /* CONFIG_HUGETLB_PAGE */
342}
343
344static int __init htab_dt_scan_pftsize(unsigned long node,
345 const char *uname, int depth,
346 void *data)
347{
348 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
349 u32 *prop;
350
351 /* We are scanning "cpu" nodes only */
352 if (type == NULL || strcmp(type, "cpu") != 0)
353 return 0;
354
355 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,pft-size", NULL);
356 if (prop != NULL) {
357 /* pft_size[0] is the NUMA CEC cookie */
358 ppc64_pft_size = prop[1];
359 return 1;
155 } 360 }
361 return 0;
156} 362}
157 363
158static unsigned long get_hashtable_size(void) 364static unsigned long __init htab_get_table_size(void)
159{ 365{
160 unsigned long rnd_mem_size, pteg_count; 366 unsigned long rnd_mem_size, pteg_count;
161 367
162 /* If hash size wasn't obtained in prom.c, we calculate it now based on 368 /* If hash size isn't already provided by the platform, we try to
163 * the total RAM size 369 * retreive it from the device-tree. If it's not there neither, we
370 * calculate it now based on the total RAM size
164 */ 371 */
372 if (ppc64_pft_size == 0)
373 of_scan_flat_dt(htab_dt_scan_pftsize, NULL);
165 if (ppc64_pft_size) 374 if (ppc64_pft_size)
166 return 1UL << ppc64_pft_size; 375 return 1UL << ppc64_pft_size;
167 376
@@ -181,17 +390,21 @@ void __init htab_initialize(void)
181 unsigned long table, htab_size_bytes; 390 unsigned long table, htab_size_bytes;
182 unsigned long pteg_count; 391 unsigned long pteg_count;
183 unsigned long mode_rw; 392 unsigned long mode_rw;
184 int i, use_largepages = 0;
185 unsigned long base = 0, size = 0; 393 unsigned long base = 0, size = 0;
394 int i;
395
186 extern unsigned long tce_alloc_start, tce_alloc_end; 396 extern unsigned long tce_alloc_start, tce_alloc_end;
187 397
188 DBG(" -> htab_initialize()\n"); 398 DBG(" -> htab_initialize()\n");
189 399
400 /* Initialize page sizes */
401 htab_init_page_sizes();
402
190 /* 403 /*
191 * Calculate the required size of the htab. We want the number of 404 * Calculate the required size of the htab. We want the number of
192 * PTEGs to equal one half the number of real pages. 405 * PTEGs to equal one half the number of real pages.
193 */ 406 */
194 htab_size_bytes = get_hashtable_size(); 407 htab_size_bytes = htab_get_table_size();
195 pteg_count = htab_size_bytes >> 7; 408 pteg_count = htab_size_bytes >> 7;
196 409
197 /* For debug, make the HTAB 1/8 as big as it normally would be. */ 410 /* For debug, make the HTAB 1/8 as big as it normally would be. */
@@ -211,14 +424,11 @@ void __init htab_initialize(void)
211 * the absolute address space. 424 * the absolute address space.
212 */ 425 */
213 table = lmb_alloc(htab_size_bytes, htab_size_bytes); 426 table = lmb_alloc(htab_size_bytes, htab_size_bytes);
427 BUG_ON(table == 0);
214 428
215 DBG("Hash table allocated at %lx, size: %lx\n", table, 429 DBG("Hash table allocated at %lx, size: %lx\n", table,
216 htab_size_bytes); 430 htab_size_bytes);
217 431
218 if ( !table ) {
219 ppc64_terminate_msg(0x20, "hpt space");
220 loop_forever();
221 }
222 htab_address = abs_to_virt(table); 432 htab_address = abs_to_virt(table);
223 433
224 /* htab absolute addr + encoded htabsize */ 434 /* htab absolute addr + encoded htabsize */
@@ -234,8 +444,6 @@ void __init htab_initialize(void)
234 * _NOT_ map it to avoid cache paradoxes as it's remapped non 444 * _NOT_ map it to avoid cache paradoxes as it's remapped non
235 * cacheable later on 445 * cacheable later on
236 */ 446 */
237 if (cpu_has_feature(CPU_FTR_16M_PAGE))
238 use_largepages = 1;
239 447
240 /* create bolted the linear mapping in the hash table */ 448 /* create bolted the linear mapping in the hash table */
241 for (i=0; i < lmb.memory.cnt; i++) { 449 for (i=0; i < lmb.memory.cnt; i++) {
@@ -246,27 +454,32 @@ void __init htab_initialize(void)
246 454
247#ifdef CONFIG_U3_DART 455#ifdef CONFIG_U3_DART
248 /* Do not map the DART space. Fortunately, it will be aligned 456 /* Do not map the DART space. Fortunately, it will be aligned
249 * in such a way that it will not cross two lmb regions and will 457 * in such a way that it will not cross two lmb regions and
250 * fit within a single 16Mb page. 458 * will fit within a single 16Mb page.
251 * The DART space is assumed to be a full 16Mb region even if we 459 * The DART space is assumed to be a full 16Mb region even if
252 * only use 2Mb of that space. We will use more of it later for 460 * we only use 2Mb of that space. We will use more of it later
253 * AGP GART. We have to use a full 16Mb large page. 461 * for AGP GART. We have to use a full 16Mb large page.
254 */ 462 */
255 DBG("DART base: %lx\n", dart_tablebase); 463 DBG("DART base: %lx\n", dart_tablebase);
256 464
257 if (dart_tablebase != 0 && dart_tablebase >= base 465 if (dart_tablebase != 0 && dart_tablebase >= base
258 && dart_tablebase < (base + size)) { 466 && dart_tablebase < (base + size)) {
259 if (base != dart_tablebase) 467 if (base != dart_tablebase)
260 create_pte_mapping(base, dart_tablebase, mode_rw, 468 BUG_ON(htab_bolt_mapping(base, dart_tablebase,
261 use_largepages); 469 base, mode_rw,
470 mmu_linear_psize));
262 if ((base + size) > (dart_tablebase + 16*MB)) 471 if ((base + size) > (dart_tablebase + 16*MB))
263 create_pte_mapping(dart_tablebase + 16*MB, base + size, 472 BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB,
264 mode_rw, use_largepages); 473 base + size,
474 dart_tablebase+16*MB,
475 mode_rw,
476 mmu_linear_psize));
265 continue; 477 continue;
266 } 478 }
267#endif /* CONFIG_U3_DART */ 479#endif /* CONFIG_U3_DART */
268 create_pte_mapping(base, base + size, mode_rw, use_largepages); 480 BUG_ON(htab_bolt_mapping(base, base + size, base,
269 } 481 mode_rw, mmu_linear_psize));
482 }
270 483
271 /* 484 /*
272 * If we have a memory_limit and we've allocated TCEs then we need to 485 * If we have a memory_limit and we've allocated TCEs then we need to
@@ -282,8 +495,9 @@ void __init htab_initialize(void)
282 if (base + size >= tce_alloc_start) 495 if (base + size >= tce_alloc_start)
283 tce_alloc_start = base + size + 1; 496 tce_alloc_start = base + size + 1;
284 497
285 create_pte_mapping(tce_alloc_start, tce_alloc_end, 498 BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end,
286 mode_rw, use_largepages); 499 tce_alloc_start, mode_rw,
500 mmu_linear_psize));
287 } 501 }
288 502
289 DBG(" <- htab_initialize()\n"); 503 DBG(" <- htab_initialize()\n");
@@ -298,9 +512,6 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
298{ 512{
299 struct page *page; 513 struct page *page;
300 514
301 if (!pfn_valid(pte_pfn(pte)))
302 return pp;
303
304 page = pte_page(pte); 515 page = pte_page(pte);
305 516
306 /* page is dirty */ 517 /* page is dirty */
@@ -309,7 +520,7 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
309 __flush_dcache_icache(page_address(page)); 520 __flush_dcache_icache(page_address(page));
310 set_bit(PG_arch_1, &page->flags); 521 set_bit(PG_arch_1, &page->flags);
311 } else 522 } else
312 pp |= HW_NO_EXEC; 523 pp |= HPTE_R_N;
313 } 524 }
314 return pp; 525 return pp;
315} 526}
@@ -325,94 +536,169 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
325 unsigned long vsid; 536 unsigned long vsid;
326 struct mm_struct *mm; 537 struct mm_struct *mm;
327 pte_t *ptep; 538 pte_t *ptep;
328 int ret;
329 int user_region = 0;
330 int local = 0;
331 cpumask_t tmp; 539 cpumask_t tmp;
540 int rc, user_region = 0, local = 0;
332 541
333 if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) 542 DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
334 return 1; 543 ea, access, trap);
335 544
545 if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) {
546 DBG_LOW(" out of pgtable range !\n");
547 return 1;
548 }
549
550 /* Get region & vsid */
336 switch (REGION_ID(ea)) { 551 switch (REGION_ID(ea)) {
337 case USER_REGION_ID: 552 case USER_REGION_ID:
338 user_region = 1; 553 user_region = 1;
339 mm = current->mm; 554 mm = current->mm;
340 if (! mm) 555 if (! mm) {
556 DBG_LOW(" user region with no mm !\n");
341 return 1; 557 return 1;
342 558 }
343 vsid = get_vsid(mm->context.id, ea); 559 vsid = get_vsid(mm->context.id, ea);
344 break; 560 break;
345 case VMALLOC_REGION_ID: 561 case VMALLOC_REGION_ID:
346 mm = &init_mm; 562 mm = &init_mm;
347 vsid = get_kernel_vsid(ea); 563 vsid = get_kernel_vsid(ea);
348 break; 564 break;
349#if 0
350 case KERNEL_REGION_ID:
351 /*
352 * Should never get here - entire 0xC0... region is bolted.
353 * Send the problem up to do_page_fault
354 */
355#endif
356 default: 565 default:
357 /* Not a valid range 566 /* Not a valid range
358 * Send the problem up to do_page_fault 567 * Send the problem up to do_page_fault
359 */ 568 */
360 return 1; 569 return 1;
361 break;
362 } 570 }
571 DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
363 572
573 /* Get pgdir */
364 pgdir = mm->pgd; 574 pgdir = mm->pgd;
365
366 if (pgdir == NULL) 575 if (pgdir == NULL)
367 return 1; 576 return 1;
368 577
578 /* Check CPU locality */
369 tmp = cpumask_of_cpu(smp_processor_id()); 579 tmp = cpumask_of_cpu(smp_processor_id());
370 if (user_region && cpus_equal(mm->cpu_vm_mask, tmp)) 580 if (user_region && cpus_equal(mm->cpu_vm_mask, tmp))
371 local = 1; 581 local = 1;
372 582
373 /* Is this a huge page ? */ 583 /* Handle hugepage regions */
374 if (unlikely(in_hugepage_area(mm->context, ea))) 584 if (unlikely(in_hugepage_area(mm->context, ea))) {
375 ret = hash_huge_page(mm, access, ea, vsid, local); 585 DBG_LOW(" -> huge page !\n");
376 else { 586 return hash_huge_page(mm, access, ea, vsid, local);
377 ptep = find_linux_pte(pgdir, ea); 587 }
378 if (ptep == NULL) 588
379 return 1; 589 /* Get PTE and page size from page tables */
380 ret = __hash_page(ea, access, vsid, ptep, trap, local); 590 ptep = find_linux_pte(pgdir, ea);
591 if (ptep == NULL || !pte_present(*ptep)) {
592 DBG_LOW(" no PTE !\n");
593 return 1;
594 }
595
596#ifndef CONFIG_PPC_64K_PAGES
597 DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep));
598#else
599 DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep),
600 pte_val(*(ptep + PTRS_PER_PTE)));
601#endif
602 /* Pre-check access permissions (will be re-checked atomically
603 * in __hash_page_XX but this pre-check is a fast path
604 */
605 if (access & ~pte_val(*ptep)) {
606 DBG_LOW(" no access !\n");
607 return 1;
381 } 608 }
382 609
383 return ret; 610 /* Do actual hashing */
611#ifndef CONFIG_PPC_64K_PAGES
612 rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
613#else
614 if (mmu_virtual_psize == MMU_PAGE_64K)
615 rc = __hash_page_64K(ea, access, vsid, ptep, trap, local);
616 else
617 rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
618#endif /* CONFIG_PPC_64K_PAGES */
619
620#ifndef CONFIG_PPC_64K_PAGES
621 DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep));
622#else
623 DBG_LOW(" o-pte: %016lx %016lx\n", pte_val(*ptep),
624 pte_val(*(ptep + PTRS_PER_PTE)));
625#endif
626 DBG_LOW(" -> rc=%d\n", rc);
627 return rc;
384} 628}
385 629
386void flush_hash_page(unsigned long va, pte_t pte, int local) 630void hash_preload(struct mm_struct *mm, unsigned long ea,
631 unsigned long access, unsigned long trap)
387{ 632{
388 unsigned long vpn, hash, secondary, slot; 633 unsigned long vsid;
389 unsigned long huge = pte_huge(pte); 634 void *pgdir;
635 pte_t *ptep;
636 cpumask_t mask;
637 unsigned long flags;
638 int local = 0;
639
640 /* We don't want huge pages prefaulted for now
641 */
642 if (unlikely(in_hugepage_area(mm->context, ea)))
643 return;
644
645 DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx,"
646 " trap=%lx\n", mm, mm->pgd, ea, access, trap);
390 647
391 if (huge) 648 /* Get PTE, VSID, access mask */
392 vpn = va >> HPAGE_SHIFT; 649 pgdir = mm->pgd;
650 if (pgdir == NULL)
651 return;
652 ptep = find_linux_pte(pgdir, ea);
653 if (!ptep)
654 return;
655 vsid = get_vsid(mm->context.id, ea);
656
657 /* Hash it in */
658 local_irq_save(flags);
659 mask = cpumask_of_cpu(smp_processor_id());
660 if (cpus_equal(mm->cpu_vm_mask, mask))
661 local = 1;
662#ifndef CONFIG_PPC_64K_PAGES
663 __hash_page_4K(ea, access, vsid, ptep, trap, local);
664#else
665 if (mmu_virtual_psize == MMU_PAGE_64K)
666 __hash_page_64K(ea, access, vsid, ptep, trap, local);
393 else 667 else
394 vpn = va >> PAGE_SHIFT; 668 __hash_page_4K(ea, access, vsid, ptep, trap, local);
395 hash = hpt_hash(vpn, huge); 669#endif /* CONFIG_PPC_64K_PAGES */
396 secondary = (pte_val(pte) & _PAGE_SECONDARY) >> 15; 670 local_irq_restore(flags);
397 if (secondary) 671}
398 hash = ~hash; 672
399 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 673void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int local)
400 slot += (pte_val(pte) & _PAGE_GROUP_IX) >> 12; 674{
401 675 unsigned long hash, index, shift, hidx, slot;
402 ppc_md.hpte_invalidate(slot, va, huge, local); 676
677 DBG_LOW("flush_hash_page(va=%016x)\n", va);
678 pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
679 hash = hpt_hash(va, shift);
680 hidx = __rpte_to_hidx(pte, index);
681 if (hidx & _PTEIDX_SECONDARY)
682 hash = ~hash;
683 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
684 slot += hidx & _PTEIDX_GROUP_IX;
685 DBG_LOW(" sub %d: hash=%x, hidx=%x\n", index, slot, hidx);
686 ppc_md.hpte_invalidate(slot, va, psize, local);
687 } pte_iterate_hashed_end();
403} 688}
404 689
405void flush_hash_range(unsigned long number, int local) 690void flush_hash_range(unsigned long number, int local)
406{ 691{
407 if (ppc_md.flush_hash_range) { 692 if (ppc_md.flush_hash_range)
408 ppc_md.flush_hash_range(number, local); 693 ppc_md.flush_hash_range(number, local);
409 } else { 694 else {
410 int i; 695 int i;
411 struct ppc64_tlb_batch *batch = 696 struct ppc64_tlb_batch *batch =
412 &__get_cpu_var(ppc64_tlb_batch); 697 &__get_cpu_var(ppc64_tlb_batch);
413 698
414 for (i = 0; i < number; i++) 699 for (i = 0; i < number; i++)
415 flush_hash_page(batch->vaddr[i], batch->pte[i], local); 700 flush_hash_page(batch->vaddr[i], batch->pte[i],
701 batch->psize, local);
416 } 702 }
417} 703}
418 704
@@ -452,6 +738,18 @@ void __init htab_finish_init(void)
452 extern unsigned int *htab_call_hpte_remove; 738 extern unsigned int *htab_call_hpte_remove;
453 extern unsigned int *htab_call_hpte_updatepp; 739 extern unsigned int *htab_call_hpte_updatepp;
454 740
741#ifdef CONFIG_PPC_64K_PAGES
742 extern unsigned int *ht64_call_hpte_insert1;
743 extern unsigned int *ht64_call_hpte_insert2;
744 extern unsigned int *ht64_call_hpte_remove;
745 extern unsigned int *ht64_call_hpte_updatepp;
746
747 make_bl(ht64_call_hpte_insert1, ppc_md.hpte_insert);
748 make_bl(ht64_call_hpte_insert2, ppc_md.hpte_insert);
749 make_bl(ht64_call_hpte_remove, ppc_md.hpte_remove);
750 make_bl(ht64_call_hpte_updatepp, ppc_md.hpte_updatepp);
751#endif /* CONFIG_PPC_64K_PAGES */
752
455 make_bl(htab_call_hpte_insert1, ppc_md.hpte_insert); 753 make_bl(htab_call_hpte_insert1, ppc_md.hpte_insert);
456 make_bl(htab_call_hpte_insert2, ppc_md.hpte_insert); 754 make_bl(htab_call_hpte_insert2, ppc_md.hpte_insert);
457 make_bl(htab_call_hpte_remove, ppc_md.hpte_remove); 755 make_bl(htab_call_hpte_remove, ppc_md.hpte_remove);