diff options
author | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-23 01:11:30 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-23 01:11:30 -0400 |
commit | 45c091bb2d453ce4a8b06cf19872ec7a77fc4799 (patch) | |
tree | 06fb2e05518ebfba163f8424e028e7faf5672d66 /arch/powerpc/mm | |
parent | d588fcbe5a7ba8bba2cebf7799ab2d573717a806 (diff) | |
parent | 2191fe3e39159e3375f4b7ec1420df149f154101 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc
* git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc: (139 commits)
[POWERPC] re-enable OProfile for iSeries, using timer interrupt
[POWERPC] support ibm,extended-*-frequency properties
[POWERPC] Extra sanity check in EEH code
[POWERPC] Dont look for class-code in pci children
[POWERPC] Fix mdelay badness on shared processor partitions
[POWERPC] disable floating point exceptions for init
[POWERPC] Unify ppc syscall tables
[POWERPC] mpic: add support for serial mode interrupts
[POWERPC] pseries: Print PCI slot location code on failure
[POWERPC] spufs: one more fix for 64k pages
[POWERPC] spufs: fail spu_create with invalid flags
[POWERPC] spufs: clear class2 interrupt status before wakeup
[POWERPC] spufs: fix Makefile for "make clean"
[POWERPC] spufs: remove stop_code from struct spu
[POWERPC] spufs: fix spu irq affinity setting
[POWERPC] spufs: further abstract priv1 register access
[POWERPC] spufs: split the Cell BE support into generic and platform dependant parts
[POWERPC] spufs: dont try to access SPE channel 1 count
[POWERPC] spufs: use kzalloc in create_spu
[POWERPC] spufs: fix initial state of wbox file
...
Manually resolved conflicts in:
drivers/net/phy/Makefile
include/asm-powerpc/spu.h
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/hash_low_32.S | 34 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_low_64.S | 31 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_native_64.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 84 | ||||
-rw-r--r-- | arch/powerpc/mm/lmb.c | 43 | ||||
-rw-r--r-- | arch/powerpc/mm/mem.c | 6 | ||||
-rw-r--r-- | arch/powerpc/mm/mmu_context_32.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/mmu_context_64.c | 3 | ||||
-rw-r--r-- | arch/powerpc/mm/numa.c | 8 | ||||
-rw-r--r-- | arch/powerpc/mm/ppc_mmu_32.c | 16 | ||||
-rw-r--r-- | arch/powerpc/mm/slb.c | 32 | ||||
-rw-r--r-- | arch/powerpc/mm/slb_low.S | 17 | ||||
-rw-r--r-- | arch/powerpc/mm/stab.c | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb_32.c | 6 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb_64.c | 5 |
15 files changed, 185 insertions, 108 deletions
diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S index ea469eefa146..94255beeecd3 100644 --- a/arch/powerpc/mm/hash_low_32.S +++ b/arch/powerpc/mm/hash_low_32.S | |||
@@ -74,12 +74,6 @@ _GLOBAL(hash_page_sync) | |||
74 | */ | 74 | */ |
75 | .text | 75 | .text |
76 | _GLOBAL(hash_page) | 76 | _GLOBAL(hash_page) |
77 | #ifdef CONFIG_PPC64BRIDGE | ||
78 | mfmsr r0 | ||
79 | clrldi r0,r0,1 /* make sure it's in 32-bit mode */ | ||
80 | MTMSRD(r0) | ||
81 | isync | ||
82 | #endif | ||
83 | tophys(r7,0) /* gets -KERNELBASE into r7 */ | 77 | tophys(r7,0) /* gets -KERNELBASE into r7 */ |
84 | #ifdef CONFIG_SMP | 78 | #ifdef CONFIG_SMP |
85 | addis r8,r7,mmu_hash_lock@h | 79 | addis r8,r7,mmu_hash_lock@h |
@@ -285,7 +279,6 @@ Hash_base = 0xc0180000 | |||
285 | Hash_bits = 12 /* e.g. 256kB hash table */ | 279 | Hash_bits = 12 /* e.g. 256kB hash table */ |
286 | Hash_msk = (((1 << Hash_bits) - 1) * 64) | 280 | Hash_msk = (((1 << Hash_bits) - 1) * 64) |
287 | 281 | ||
288 | #ifndef CONFIG_PPC64BRIDGE | ||
289 | /* defines for the PTE format for 32-bit PPCs */ | 282 | /* defines for the PTE format for 32-bit PPCs */ |
290 | #define PTE_SIZE 8 | 283 | #define PTE_SIZE 8 |
291 | #define PTEG_SIZE 64 | 284 | #define PTEG_SIZE 64 |
@@ -299,21 +292,6 @@ Hash_msk = (((1 << Hash_bits) - 1) * 64) | |||
299 | #define SET_V(r) oris r,r,PTE_V@h | 292 | #define SET_V(r) oris r,r,PTE_V@h |
300 | #define CLR_V(r,t) rlwinm r,r,0,1,31 | 293 | #define CLR_V(r,t) rlwinm r,r,0,1,31 |
301 | 294 | ||
302 | #else | ||
303 | /* defines for the PTE format for 64-bit PPCs */ | ||
304 | #define PTE_SIZE 16 | ||
305 | #define PTEG_SIZE 128 | ||
306 | #define LG_PTEG_SIZE 7 | ||
307 | #define LDPTEu ldu | ||
308 | #define STPTE std | ||
309 | #define CMPPTE cmpd | ||
310 | #define PTE_H 2 | ||
311 | #define PTE_V 1 | ||
312 | #define TST_V(r) andi. r,r,PTE_V | ||
313 | #define SET_V(r) ori r,r,PTE_V | ||
314 | #define CLR_V(r,t) li t,PTE_V; andc r,r,t | ||
315 | #endif /* CONFIG_PPC64BRIDGE */ | ||
316 | |||
317 | #define HASH_LEFT 31-(LG_PTEG_SIZE+Hash_bits-1) | 295 | #define HASH_LEFT 31-(LG_PTEG_SIZE+Hash_bits-1) |
318 | #define HASH_RIGHT 31-LG_PTEG_SIZE | 296 | #define HASH_RIGHT 31-LG_PTEG_SIZE |
319 | 297 | ||
@@ -331,14 +309,8 @@ BEGIN_FTR_SECTION | |||
331 | END_FTR_SECTION_IFSET(CPU_FTR_NEED_COHERENT) | 309 | END_FTR_SECTION_IFSET(CPU_FTR_NEED_COHERENT) |
332 | 310 | ||
333 | /* Construct the high word of the PPC-style PTE (r5) */ | 311 | /* Construct the high word of the PPC-style PTE (r5) */ |
334 | #ifndef CONFIG_PPC64BRIDGE | ||
335 | rlwinm r5,r3,7,1,24 /* put VSID in 0x7fffff80 bits */ | 312 | rlwinm r5,r3,7,1,24 /* put VSID in 0x7fffff80 bits */ |
336 | rlwimi r5,r4,10,26,31 /* put in API (abbrev page index) */ | 313 | rlwimi r5,r4,10,26,31 /* put in API (abbrev page index) */ |
337 | #else /* CONFIG_PPC64BRIDGE */ | ||
338 | clrlwi r3,r3,8 /* reduce vsid to 24 bits */ | ||
339 | sldi r5,r3,12 /* shift vsid into position */ | ||
340 | rlwimi r5,r4,16,20,24 /* put in API (abbrev page index) */ | ||
341 | #endif /* CONFIG_PPC64BRIDGE */ | ||
342 | SET_V(r5) /* set V (valid) bit */ | 314 | SET_V(r5) /* set V (valid) bit */ |
343 | 315 | ||
344 | /* Get the address of the primary PTE group in the hash table (r3) */ | 316 | /* Get the address of the primary PTE group in the hash table (r3) */ |
@@ -516,14 +488,8 @@ _GLOBAL(flush_hash_pages) | |||
516 | add r3,r3,r0 /* note code below trims to 24 bits */ | 488 | add r3,r3,r0 /* note code below trims to 24 bits */ |
517 | 489 | ||
518 | /* Construct the high word of the PPC-style PTE (r11) */ | 490 | /* Construct the high word of the PPC-style PTE (r11) */ |
519 | #ifndef CONFIG_PPC64BRIDGE | ||
520 | rlwinm r11,r3,7,1,24 /* put VSID in 0x7fffff80 bits */ | 491 | rlwinm r11,r3,7,1,24 /* put VSID in 0x7fffff80 bits */ |
521 | rlwimi r11,r4,10,26,31 /* put in API (abbrev page index) */ | 492 | rlwimi r11,r4,10,26,31 /* put in API (abbrev page index) */ |
522 | #else /* CONFIG_PPC64BRIDGE */ | ||
523 | clrlwi r3,r3,8 /* reduce vsid to 24 bits */ | ||
524 | sldi r11,r3,12 /* shift vsid into position */ | ||
525 | rlwimi r11,r4,16,20,24 /* put in API (abbrev page index) */ | ||
526 | #endif /* CONFIG_PPC64BRIDGE */ | ||
527 | SET_V(r11) /* set V (valid) bit */ | 493 | SET_V(r11) /* set V (valid) bit */ |
528 | 494 | ||
529 | #ifdef CONFIG_SMP | 495 | #ifdef CONFIG_SMP |
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S index e0d02c4a2615..52e914238959 100644 --- a/arch/powerpc/mm/hash_low_64.S +++ b/arch/powerpc/mm/hash_low_64.S | |||
@@ -136,6 +136,7 @@ _GLOBAL(__hash_page_4K) | |||
136 | and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/ | 136 | and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/ |
137 | andc r0,r30,r0 /* r0 = pte & ~r0 */ | 137 | andc r0,r30,r0 /* r0 = pte & ~r0 */ |
138 | rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */ | 138 | rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */ |
139 | ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */ | ||
139 | 140 | ||
140 | /* We eventually do the icache sync here (maybe inline that | 141 | /* We eventually do the icache sync here (maybe inline that |
141 | * code rather than call a C function...) | 142 | * code rather than call a C function...) |
@@ -368,6 +369,7 @@ _GLOBAL(__hash_page_4K) | |||
368 | rlwinm r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */ | 369 | rlwinm r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */ |
369 | or r30,r30,r31 | 370 | or r30,r30,r31 |
370 | ori r30,r30,_PAGE_BUSY | _PAGE_ACCESSED | _PAGE_HASHPTE | 371 | ori r30,r30,_PAGE_BUSY | _PAGE_ACCESSED | _PAGE_HASHPTE |
372 | oris r30,r30,_PAGE_COMBO@h | ||
371 | /* Write the linux PTE atomically (setting busy) */ | 373 | /* Write the linux PTE atomically (setting busy) */ |
372 | stdcx. r30,0,r6 | 374 | stdcx. r30,0,r6 |
373 | bne- 1b | 375 | bne- 1b |
@@ -400,6 +402,7 @@ _GLOBAL(__hash_page_4K) | |||
400 | and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/ | 402 | and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/ |
401 | andc r0,r30,r0 /* r0 = pte & ~r0 */ | 403 | andc r0,r30,r0 /* r0 = pte & ~r0 */ |
402 | rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */ | 404 | rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */ |
405 | ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */ | ||
403 | 406 | ||
404 | /* We eventually do the icache sync here (maybe inline that | 407 | /* We eventually do the icache sync here (maybe inline that |
405 | * code rather than call a C function...) | 408 | * code rather than call a C function...) |
@@ -426,6 +429,14 @@ END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) | |||
426 | andi. r0,r31,_PAGE_HASHPTE | 429 | andi. r0,r31,_PAGE_HASHPTE |
427 | li r26,0 /* Default hidx */ | 430 | li r26,0 /* Default hidx */ |
428 | beq htab_insert_pte | 431 | beq htab_insert_pte |
432 | |||
433 | /* | ||
434 | * Check if the pte was already inserted into the hash table | ||
435 | * as a 64k HW page, and invalidate the 64k HPTE if so. | ||
436 | */ | ||
437 | andis. r0,r31,_PAGE_COMBO@h | ||
438 | beq htab_inval_old_hpte | ||
439 | |||
429 | ld r6,STK_PARM(r6)(r1) | 440 | ld r6,STK_PARM(r6)(r1) |
430 | ori r26,r6,0x8000 /* Load the hidx mask */ | 441 | ori r26,r6,0x8000 /* Load the hidx mask */ |
431 | ld r26,0(r26) | 442 | ld r26,0(r26) |
@@ -496,6 +507,19 @@ _GLOBAL(htab_call_hpte_remove) | |||
496 | /* Try all again */ | 507 | /* Try all again */ |
497 | b htab_insert_pte | 508 | b htab_insert_pte |
498 | 509 | ||
510 | /* | ||
511 | * Call out to C code to invalidate an 64k HW HPTE that is | ||
512 | * useless now that the segment has been switched to 4k pages. | ||
513 | */ | ||
514 | htab_inval_old_hpte: | ||
515 | mr r3,r29 /* virtual addr */ | ||
516 | mr r4,r31 /* PTE.pte */ | ||
517 | li r5,0 /* PTE.hidx */ | ||
518 | li r6,MMU_PAGE_64K /* psize */ | ||
519 | ld r7,STK_PARM(r8)(r1) /* local */ | ||
520 | bl .flush_hash_page | ||
521 | b htab_insert_pte | ||
522 | |||
499 | htab_bail_ok: | 523 | htab_bail_ok: |
500 | li r3,0 | 524 | li r3,0 |
501 | b htab_bail | 525 | b htab_bail |
@@ -636,6 +660,12 @@ _GLOBAL(__hash_page_64K) | |||
636 | * is changing this PTE anyway and might hash it. | 660 | * is changing this PTE anyway and might hash it. |
637 | */ | 661 | */ |
638 | bne- ht64_bail_ok | 662 | bne- ht64_bail_ok |
663 | BEGIN_FTR_SECTION | ||
664 | /* Check if PTE has the cache-inhibit bit set */ | ||
665 | andi. r0,r31,_PAGE_NO_CACHE | ||
666 | /* If so, bail out and refault as a 4k page */ | ||
667 | bne- ht64_bail_ok | ||
668 | END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE) | ||
639 | /* Prepare new PTE value (turn access RW into DIRTY, then | 669 | /* Prepare new PTE value (turn access RW into DIRTY, then |
640 | * add BUSY,HASHPTE and ACCESSED) | 670 | * add BUSY,HASHPTE and ACCESSED) |
641 | */ | 671 | */ |
@@ -671,6 +701,7 @@ _GLOBAL(__hash_page_64K) | |||
671 | and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/ | 701 | and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/ |
672 | andc r0,r30,r0 /* r0 = pte & ~r0 */ | 702 | andc r0,r30,r0 /* r0 = pte & ~r0 */ |
673 | rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */ | 703 | rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */ |
704 | ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */ | ||
674 | 705 | ||
675 | /* We eventually do the icache sync here (maybe inline that | 706 | /* We eventually do the icache sync here (maybe inline that |
676 | * code rather than call a C function...) | 707 | * code rather than call a C function...) |
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index 994856e55b7c..a0f3cbd00d39 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c | |||
@@ -238,7 +238,7 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, | |||
238 | DBG_LOW(" -> hit\n"); | 238 | DBG_LOW(" -> hit\n"); |
239 | /* Update the HPTE */ | 239 | /* Update the HPTE */ |
240 | hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) | | 240 | hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) | |
241 | (newpp & (HPTE_R_PP | HPTE_R_N)); | 241 | (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C)); |
242 | native_unlock_hpte(hptep); | 242 | native_unlock_hpte(hptep); |
243 | } | 243 | } |
244 | 244 | ||
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index c006d9039633..d03fd2b4445e 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
@@ -92,10 +92,15 @@ unsigned long htab_size_bytes; | |||
92 | unsigned long htab_hash_mask; | 92 | unsigned long htab_hash_mask; |
93 | int mmu_linear_psize = MMU_PAGE_4K; | 93 | int mmu_linear_psize = MMU_PAGE_4K; |
94 | int mmu_virtual_psize = MMU_PAGE_4K; | 94 | int mmu_virtual_psize = MMU_PAGE_4K; |
95 | int mmu_vmalloc_psize = MMU_PAGE_4K; | ||
96 | int mmu_io_psize = MMU_PAGE_4K; | ||
95 | #ifdef CONFIG_HUGETLB_PAGE | 97 | #ifdef CONFIG_HUGETLB_PAGE |
96 | int mmu_huge_psize = MMU_PAGE_16M; | 98 | int mmu_huge_psize = MMU_PAGE_16M; |
97 | unsigned int HPAGE_SHIFT; | 99 | unsigned int HPAGE_SHIFT; |
98 | #endif | 100 | #endif |
101 | #ifdef CONFIG_PPC_64K_PAGES | ||
102 | int mmu_ci_restrictions; | ||
103 | #endif | ||
99 | 104 | ||
100 | /* There are definitions of page sizes arrays to be used when none | 105 | /* There are definitions of page sizes arrays to be used when none |
101 | * is provided by the firmware. | 106 | * is provided by the firmware. |
@@ -308,20 +313,31 @@ static void __init htab_init_page_sizes(void) | |||
308 | else if (mmu_psize_defs[MMU_PAGE_1M].shift) | 313 | else if (mmu_psize_defs[MMU_PAGE_1M].shift) |
309 | mmu_linear_psize = MMU_PAGE_1M; | 314 | mmu_linear_psize = MMU_PAGE_1M; |
310 | 315 | ||
316 | #ifdef CONFIG_PPC_64K_PAGES | ||
311 | /* | 317 | /* |
312 | * Pick a size for the ordinary pages. Default is 4K, we support | 318 | * Pick a size for the ordinary pages. Default is 4K, we support |
313 | * 64K if cache inhibited large pages are supported by the | 319 | * 64K for user mappings and vmalloc if supported by the processor. |
314 | * processor | 320 | * We only use 64k for ioremap if the processor |
321 | * (and firmware) support cache-inhibited large pages. | ||
322 | * If not, we use 4k and set mmu_ci_restrictions so that | ||
323 | * hash_page knows to switch processes that use cache-inhibited | ||
324 | * mappings to 4k pages. | ||
315 | */ | 325 | */ |
316 | #ifdef CONFIG_PPC_64K_PAGES | 326 | if (mmu_psize_defs[MMU_PAGE_64K].shift) { |
317 | if (mmu_psize_defs[MMU_PAGE_64K].shift && | ||
318 | cpu_has_feature(CPU_FTR_CI_LARGE_PAGE)) | ||
319 | mmu_virtual_psize = MMU_PAGE_64K; | 327 | mmu_virtual_psize = MMU_PAGE_64K; |
328 | mmu_vmalloc_psize = MMU_PAGE_64K; | ||
329 | if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE)) | ||
330 | mmu_io_psize = MMU_PAGE_64K; | ||
331 | else | ||
332 | mmu_ci_restrictions = 1; | ||
333 | } | ||
320 | #endif | 334 | #endif |
321 | 335 | ||
322 | printk(KERN_INFO "Page orders: linear mapping = %d, others = %d\n", | 336 | printk(KERN_DEBUG "Page orders: linear mapping = %d, " |
337 | "virtual = %d, io = %d\n", | ||
323 | mmu_psize_defs[mmu_linear_psize].shift, | 338 | mmu_psize_defs[mmu_linear_psize].shift, |
324 | mmu_psize_defs[mmu_virtual_psize].shift); | 339 | mmu_psize_defs[mmu_virtual_psize].shift, |
340 | mmu_psize_defs[mmu_io_psize].shift); | ||
325 | 341 | ||
326 | #ifdef CONFIG_HUGETLB_PAGE | 342 | #ifdef CONFIG_HUGETLB_PAGE |
327 | /* Init large page size. Currently, we pick 16M or 1M depending | 343 | /* Init large page size. Currently, we pick 16M or 1M depending |
@@ -556,6 +572,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
556 | pte_t *ptep; | 572 | pte_t *ptep; |
557 | cpumask_t tmp; | 573 | cpumask_t tmp; |
558 | int rc, user_region = 0, local = 0; | 574 | int rc, user_region = 0, local = 0; |
575 | int psize; | ||
559 | 576 | ||
560 | DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n", | 577 | DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n", |
561 | ea, access, trap); | 578 | ea, access, trap); |
@@ -575,10 +592,15 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
575 | return 1; | 592 | return 1; |
576 | } | 593 | } |
577 | vsid = get_vsid(mm->context.id, ea); | 594 | vsid = get_vsid(mm->context.id, ea); |
595 | psize = mm->context.user_psize; | ||
578 | break; | 596 | break; |
579 | case VMALLOC_REGION_ID: | 597 | case VMALLOC_REGION_ID: |
580 | mm = &init_mm; | 598 | mm = &init_mm; |
581 | vsid = get_kernel_vsid(ea); | 599 | vsid = get_kernel_vsid(ea); |
600 | if (ea < VMALLOC_END) | ||
601 | psize = mmu_vmalloc_psize; | ||
602 | else | ||
603 | psize = mmu_io_psize; | ||
582 | break; | 604 | break; |
583 | default: | 605 | default: |
584 | /* Not a valid range | 606 | /* Not a valid range |
@@ -629,7 +651,40 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
629 | #ifndef CONFIG_PPC_64K_PAGES | 651 | #ifndef CONFIG_PPC_64K_PAGES |
630 | rc = __hash_page_4K(ea, access, vsid, ptep, trap, local); | 652 | rc = __hash_page_4K(ea, access, vsid, ptep, trap, local); |
631 | #else | 653 | #else |
632 | if (mmu_virtual_psize == MMU_PAGE_64K) | 654 | if (mmu_ci_restrictions) { |
655 | /* If this PTE is non-cacheable, switch to 4k */ | ||
656 | if (psize == MMU_PAGE_64K && | ||
657 | (pte_val(*ptep) & _PAGE_NO_CACHE)) { | ||
658 | if (user_region) { | ||
659 | psize = MMU_PAGE_4K; | ||
660 | mm->context.user_psize = MMU_PAGE_4K; | ||
661 | mm->context.sllp = SLB_VSID_USER | | ||
662 | mmu_psize_defs[MMU_PAGE_4K].sllp; | ||
663 | } else if (ea < VMALLOC_END) { | ||
664 | /* | ||
665 | * some driver did a non-cacheable mapping | ||
666 | * in vmalloc space, so switch vmalloc | ||
667 | * to 4k pages | ||
668 | */ | ||
669 | printk(KERN_ALERT "Reducing vmalloc segment " | ||
670 | "to 4kB pages because of " | ||
671 | "non-cacheable mapping\n"); | ||
672 | psize = mmu_vmalloc_psize = MMU_PAGE_4K; | ||
673 | } | ||
674 | } | ||
675 | if (user_region) { | ||
676 | if (psize != get_paca()->context.user_psize) { | ||
677 | get_paca()->context = mm->context; | ||
678 | slb_flush_and_rebolt(); | ||
679 | } | ||
680 | } else if (get_paca()->vmalloc_sllp != | ||
681 | mmu_psize_defs[mmu_vmalloc_psize].sllp) { | ||
682 | get_paca()->vmalloc_sllp = | ||
683 | mmu_psize_defs[mmu_vmalloc_psize].sllp; | ||
684 | slb_flush_and_rebolt(); | ||
685 | } | ||
686 | } | ||
687 | if (psize == MMU_PAGE_64K) | ||
633 | rc = __hash_page_64K(ea, access, vsid, ptep, trap, local); | 688 | rc = __hash_page_64K(ea, access, vsid, ptep, trap, local); |
634 | else | 689 | else |
635 | rc = __hash_page_4K(ea, access, vsid, ptep, trap, local); | 690 | rc = __hash_page_4K(ea, access, vsid, ptep, trap, local); |
@@ -681,7 +736,18 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, | |||
681 | #ifndef CONFIG_PPC_64K_PAGES | 736 | #ifndef CONFIG_PPC_64K_PAGES |
682 | __hash_page_4K(ea, access, vsid, ptep, trap, local); | 737 | __hash_page_4K(ea, access, vsid, ptep, trap, local); |
683 | #else | 738 | #else |
684 | if (mmu_virtual_psize == MMU_PAGE_64K) | 739 | if (mmu_ci_restrictions) { |
740 | /* If this PTE is non-cacheable, switch to 4k */ | ||
741 | if (mm->context.user_psize == MMU_PAGE_64K && | ||
742 | (pte_val(*ptep) & _PAGE_NO_CACHE)) { | ||
743 | mm->context.user_psize = MMU_PAGE_4K; | ||
744 | mm->context.sllp = SLB_VSID_USER | | ||
745 | mmu_psize_defs[MMU_PAGE_4K].sllp; | ||
746 | get_paca()->context = mm->context; | ||
747 | slb_flush_and_rebolt(); | ||
748 | } | ||
749 | } | ||
750 | if (mm->context.user_psize == MMU_PAGE_64K) | ||
685 | __hash_page_64K(ea, access, vsid, ptep, trap, local); | 751 | __hash_page_64K(ea, access, vsid, ptep, trap, local); |
686 | else | 752 | else |
687 | __hash_page_4K(ea, access, vsid, ptep, trap, local); | 753 | __hash_page_4K(ea, access, vsid, ptep, trap, local); |
diff --git a/arch/powerpc/mm/lmb.c b/arch/powerpc/mm/lmb.c index 417d58518558..8b6f522655a6 100644 --- a/arch/powerpc/mm/lmb.c +++ b/arch/powerpc/mm/lmb.c | |||
@@ -89,20 +89,25 @@ static long __init lmb_regions_adjacent(struct lmb_region *rgn, | |||
89 | return lmb_addrs_adjacent(base1, size1, base2, size2); | 89 | return lmb_addrs_adjacent(base1, size1, base2, size2); |
90 | } | 90 | } |
91 | 91 | ||
92 | /* Assumption: base addr of region 1 < base addr of region 2 */ | 92 | static void __init lmb_remove_region(struct lmb_region *rgn, unsigned long r) |
93 | static void __init lmb_coalesce_regions(struct lmb_region *rgn, | ||
94 | unsigned long r1, unsigned long r2) | ||
95 | { | 93 | { |
96 | unsigned long i; | 94 | unsigned long i; |
97 | 95 | ||
98 | rgn->region[r1].size += rgn->region[r2].size; | 96 | for (i = r; i < rgn->cnt - 1; i++) { |
99 | for (i=r2; i < rgn->cnt-1; i++) { | 97 | rgn->region[i].base = rgn->region[i + 1].base; |
100 | rgn->region[i].base = rgn->region[i+1].base; | 98 | rgn->region[i].size = rgn->region[i + 1].size; |
101 | rgn->region[i].size = rgn->region[i+1].size; | ||
102 | } | 99 | } |
103 | rgn->cnt--; | 100 | rgn->cnt--; |
104 | } | 101 | } |
105 | 102 | ||
103 | /* Assumption: base addr of region 1 < base addr of region 2 */ | ||
104 | static void __init lmb_coalesce_regions(struct lmb_region *rgn, | ||
105 | unsigned long r1, unsigned long r2) | ||
106 | { | ||
107 | rgn->region[r1].size += rgn->region[r2].size; | ||
108 | lmb_remove_region(rgn, r2); | ||
109 | } | ||
110 | |||
106 | /* This routine called with relocation disabled. */ | 111 | /* This routine called with relocation disabled. */ |
107 | void __init lmb_init(void) | 112 | void __init lmb_init(void) |
108 | { | 113 | { |
@@ -294,17 +299,16 @@ unsigned long __init lmb_end_of_DRAM(void) | |||
294 | return (lmb.memory.region[idx].base + lmb.memory.region[idx].size); | 299 | return (lmb.memory.region[idx].base + lmb.memory.region[idx].size); |
295 | } | 300 | } |
296 | 301 | ||
297 | /* | 302 | /* You must call lmb_analyze() after this. */ |
298 | * Truncate the lmb list to memory_limit if it's set | ||
299 | * You must call lmb_analyze() after this. | ||
300 | */ | ||
301 | void __init lmb_enforce_memory_limit(unsigned long memory_limit) | 303 | void __init lmb_enforce_memory_limit(unsigned long memory_limit) |
302 | { | 304 | { |
303 | unsigned long i, limit; | 305 | unsigned long i, limit; |
306 | struct lmb_property *p; | ||
304 | 307 | ||
305 | if (! memory_limit) | 308 | if (! memory_limit) |
306 | return; | 309 | return; |
307 | 310 | ||
311 | /* Truncate the lmb regions to satisfy the memory limit. */ | ||
308 | limit = memory_limit; | 312 | limit = memory_limit; |
309 | for (i = 0; i < lmb.memory.cnt; i++) { | 313 | for (i = 0; i < lmb.memory.cnt; i++) { |
310 | if (limit > lmb.memory.region[i].size) { | 314 | if (limit > lmb.memory.region[i].size) { |
@@ -316,4 +320,21 @@ void __init lmb_enforce_memory_limit(unsigned long memory_limit) | |||
316 | lmb.memory.cnt = i + 1; | 320 | lmb.memory.cnt = i + 1; |
317 | break; | 321 | break; |
318 | } | 322 | } |
323 | |||
324 | lmb.rmo_size = lmb.memory.region[0].size; | ||
325 | |||
326 | /* And truncate any reserves above the limit also. */ | ||
327 | for (i = 0; i < lmb.reserved.cnt; i++) { | ||
328 | p = &lmb.reserved.region[i]; | ||
329 | |||
330 | if (p->base > memory_limit) | ||
331 | p->size = 0; | ||
332 | else if ((p->base + p->size) > memory_limit) | ||
333 | p->size = memory_limit - p->base; | ||
334 | |||
335 | if (p->size == 0) { | ||
336 | lmb_remove_region(&lmb.reserved, i); | ||
337 | i--; | ||
338 | } | ||
339 | } | ||
319 | } | 340 | } |
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 741dd8802d49..69f3b9a20beb 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c | |||
@@ -299,9 +299,9 @@ void __init paging_init(void) | |||
299 | kmap_prot = PAGE_KERNEL; | 299 | kmap_prot = PAGE_KERNEL; |
300 | #endif /* CONFIG_HIGHMEM */ | 300 | #endif /* CONFIG_HIGHMEM */ |
301 | 301 | ||
302 | printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", | 302 | printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", |
303 | top_of_ram, total_ram); | 303 | top_of_ram, total_ram); |
304 | printk(KERN_INFO "Memory hole size: %ldMB\n", | 304 | printk(KERN_DEBUG "Memory hole size: %ldMB\n", |
305 | (top_of_ram - total_ram) >> 20); | 305 | (top_of_ram - total_ram) >> 20); |
306 | /* | 306 | /* |
307 | * All pages are DMA-able so we put them all in the DMA zone. | 307 | * All pages are DMA-able so we put them all in the DMA zone. |
@@ -380,7 +380,7 @@ void __init mem_init(void) | |||
380 | totalhigh_pages++; | 380 | totalhigh_pages++; |
381 | } | 381 | } |
382 | totalram_pages += totalhigh_pages; | 382 | totalram_pages += totalhigh_pages; |
383 | printk(KERN_INFO "High memory: %luk\n", | 383 | printk(KERN_DEBUG "High memory: %luk\n", |
384 | totalhigh_pages << (PAGE_SHIFT-10)); | 384 | totalhigh_pages << (PAGE_SHIFT-10)); |
385 | } | 385 | } |
386 | #endif /* CONFIG_HIGHMEM */ | 386 | #endif /* CONFIG_HIGHMEM */ |
diff --git a/arch/powerpc/mm/mmu_context_32.c b/arch/powerpc/mm/mmu_context_32.c index a8816e0f6a86..e326e4249e1a 100644 --- a/arch/powerpc/mm/mmu_context_32.c +++ b/arch/powerpc/mm/mmu_context_32.c | |||
@@ -30,7 +30,7 @@ | |||
30 | #include <asm/mmu_context.h> | 30 | #include <asm/mmu_context.h> |
31 | #include <asm/tlbflush.h> | 31 | #include <asm/tlbflush.h> |
32 | 32 | ||
33 | mm_context_t next_mmu_context; | 33 | unsigned long next_mmu_context; |
34 | unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1]; | 34 | unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1]; |
35 | #ifdef FEW_CONTEXTS | 35 | #ifdef FEW_CONTEXTS |
36 | atomic_t nr_free_contexts; | 36 | atomic_t nr_free_contexts; |
diff --git a/arch/powerpc/mm/mmu_context_64.c b/arch/powerpc/mm/mmu_context_64.c index 714a84dd8d5d..65d18dca266f 100644 --- a/arch/powerpc/mm/mmu_context_64.c +++ b/arch/powerpc/mm/mmu_context_64.c | |||
@@ -49,6 +49,9 @@ again: | |||
49 | } | 49 | } |
50 | 50 | ||
51 | mm->context.id = index; | 51 | mm->context.id = index; |
52 | mm->context.user_psize = mmu_virtual_psize; | ||
53 | mm->context.sllp = SLB_VSID_USER | | ||
54 | mmu_psize_defs[mmu_virtual_psize].sllp; | ||
52 | 55 | ||
53 | return 0; | 56 | return 0; |
54 | } | 57 | } |
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 092355f37399..aa98cb3b59d8 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c | |||
@@ -487,9 +487,9 @@ static void __init setup_nonnuma(void) | |||
487 | unsigned long total_ram = lmb_phys_mem_size(); | 487 | unsigned long total_ram = lmb_phys_mem_size(); |
488 | unsigned int i; | 488 | unsigned int i; |
489 | 489 | ||
490 | printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", | 490 | printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", |
491 | top_of_ram, total_ram); | 491 | top_of_ram, total_ram); |
492 | printk(KERN_INFO "Memory hole size: %ldMB\n", | 492 | printk(KERN_DEBUG "Memory hole size: %ldMB\n", |
493 | (top_of_ram - total_ram) >> 20); | 493 | (top_of_ram - total_ram) >> 20); |
494 | 494 | ||
495 | for (i = 0; i < lmb.memory.cnt; ++i) | 495 | for (i = 0; i < lmb.memory.cnt; ++i) |
@@ -507,7 +507,7 @@ void __init dump_numa_cpu_topology(void) | |||
507 | return; | 507 | return; |
508 | 508 | ||
509 | for_each_online_node(node) { | 509 | for_each_online_node(node) { |
510 | printk(KERN_INFO "Node %d CPUs:", node); | 510 | printk(KERN_DEBUG "Node %d CPUs:", node); |
511 | 511 | ||
512 | count = 0; | 512 | count = 0; |
513 | /* | 513 | /* |
@@ -543,7 +543,7 @@ static void __init dump_numa_memory_topology(void) | |||
543 | for_each_online_node(node) { | 543 | for_each_online_node(node) { |
544 | unsigned long i; | 544 | unsigned long i; |
545 | 545 | ||
546 | printk(KERN_INFO "Node %d Memory:", node); | 546 | printk(KERN_DEBUG "Node %d Memory:", node); |
547 | 547 | ||
548 | count = 0; | 548 | count = 0; |
549 | 549 | ||
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c index ed7fcfe5fd37..2ed43a493b31 100644 --- a/arch/powerpc/mm/ppc_mmu_32.c +++ b/arch/powerpc/mm/ppc_mmu_32.c | |||
@@ -42,18 +42,14 @@ unsigned long _SDR1; | |||
42 | 42 | ||
43 | union ubat { /* BAT register values to be loaded */ | 43 | union ubat { /* BAT register values to be loaded */ |
44 | BAT bat; | 44 | BAT bat; |
45 | #ifdef CONFIG_PPC64BRIDGE | ||
46 | u64 word[2]; | ||
47 | #else | ||
48 | u32 word[2]; | 45 | u32 word[2]; |
49 | #endif | 46 | } BATS[8][2]; /* 8 pairs of IBAT, DBAT */ |
50 | } BATS[4][2]; /* 4 pairs of IBAT, DBAT */ | ||
51 | 47 | ||
52 | struct batrange { /* stores address ranges mapped by BATs */ | 48 | struct batrange { /* stores address ranges mapped by BATs */ |
53 | unsigned long start; | 49 | unsigned long start; |
54 | unsigned long limit; | 50 | unsigned long limit; |
55 | unsigned long phys; | 51 | unsigned long phys; |
56 | } bat_addrs[4]; | 52 | } bat_addrs[8]; |
57 | 53 | ||
58 | /* | 54 | /* |
59 | * Return PA for this VA if it is mapped by a BAT, or 0 | 55 | * Return PA for this VA if it is mapped by a BAT, or 0 |
@@ -190,7 +186,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, | |||
190 | return; | 186 | return; |
191 | pmd = pmd_offset(pgd_offset(mm, ea), ea); | 187 | pmd = pmd_offset(pgd_offset(mm, ea), ea); |
192 | if (!pmd_none(*pmd)) | 188 | if (!pmd_none(*pmd)) |
193 | add_hash_page(mm->context, ea, pmd_val(*pmd)); | 189 | add_hash_page(mm->context.id, ea, pmd_val(*pmd)); |
194 | } | 190 | } |
195 | 191 | ||
196 | /* | 192 | /* |
@@ -220,15 +216,9 @@ void __init MMU_init_hw(void) | |||
220 | 216 | ||
221 | if ( ppc_md.progress ) ppc_md.progress("hash:enter", 0x105); | 217 | if ( ppc_md.progress ) ppc_md.progress("hash:enter", 0x105); |
222 | 218 | ||
223 | #ifdef CONFIG_PPC64BRIDGE | ||
224 | #define LG_HPTEG_SIZE 7 /* 128 bytes per HPTEG */ | ||
225 | #define SDR1_LOW_BITS (lg_n_hpteg - 11) | ||
226 | #define MIN_N_HPTEG 2048 /* min 256kB hash table */ | ||
227 | #else | ||
228 | #define LG_HPTEG_SIZE 6 /* 64 bytes per HPTEG */ | 219 | #define LG_HPTEG_SIZE 6 /* 64 bytes per HPTEG */ |
229 | #define SDR1_LOW_BITS ((n_hpteg - 1) >> 10) | 220 | #define SDR1_LOW_BITS ((n_hpteg - 1) >> 10) |
230 | #define MIN_N_HPTEG 1024 /* min 64kB hash table */ | 221 | #define MIN_N_HPTEG 1024 /* min 64kB hash table */ |
231 | #endif | ||
232 | 222 | ||
233 | /* | 223 | /* |
234 | * Allow 1 HPTE (1/8 HPTEG) for each page of memory. | 224 | * Allow 1 HPTE (1/8 HPTEG) for each page of memory. |
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index ffc8ed4de62d..6a8bf6c6000e 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c | |||
@@ -60,19 +60,19 @@ static inline void create_slbe(unsigned long ea, unsigned long flags, | |||
60 | : "memory" ); | 60 | : "memory" ); |
61 | } | 61 | } |
62 | 62 | ||
63 | static void slb_flush_and_rebolt(void) | 63 | void slb_flush_and_rebolt(void) |
64 | { | 64 | { |
65 | /* If you change this make sure you change SLB_NUM_BOLTED | 65 | /* If you change this make sure you change SLB_NUM_BOLTED |
66 | * appropriately too. */ | 66 | * appropriately too. */ |
67 | unsigned long linear_llp, virtual_llp, lflags, vflags; | 67 | unsigned long linear_llp, vmalloc_llp, lflags, vflags; |
68 | unsigned long ksp_esid_data; | 68 | unsigned long ksp_esid_data; |
69 | 69 | ||
70 | WARN_ON(!irqs_disabled()); | 70 | WARN_ON(!irqs_disabled()); |
71 | 71 | ||
72 | linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; | 72 | linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; |
73 | virtual_llp = mmu_psize_defs[mmu_virtual_psize].sllp; | 73 | vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; |
74 | lflags = SLB_VSID_KERNEL | linear_llp; | 74 | lflags = SLB_VSID_KERNEL | linear_llp; |
75 | vflags = SLB_VSID_KERNEL | virtual_llp; | 75 | vflags = SLB_VSID_KERNEL | vmalloc_llp; |
76 | 76 | ||
77 | ksp_esid_data = mk_esid_data(get_paca()->kstack, 2); | 77 | ksp_esid_data = mk_esid_data(get_paca()->kstack, 2); |
78 | if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET) | 78 | if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET) |
@@ -122,9 +122,6 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) | |||
122 | 122 | ||
123 | get_paca()->slb_cache_ptr = 0; | 123 | get_paca()->slb_cache_ptr = 0; |
124 | get_paca()->context = mm->context; | 124 | get_paca()->context = mm->context; |
125 | #ifdef CONFIG_PPC_64K_PAGES | ||
126 | get_paca()->pgdir = mm->pgd; | ||
127 | #endif /* CONFIG_PPC_64K_PAGES */ | ||
128 | 125 | ||
129 | /* | 126 | /* |
130 | * preload some userspace segments into the SLB. | 127 | * preload some userspace segments into the SLB. |
@@ -167,11 +164,10 @@ static inline void patch_slb_encoding(unsigned int *insn_addr, | |||
167 | 164 | ||
168 | void slb_initialize(void) | 165 | void slb_initialize(void) |
169 | { | 166 | { |
170 | unsigned long linear_llp, virtual_llp; | 167 | unsigned long linear_llp, vmalloc_llp, io_llp; |
171 | static int slb_encoding_inited; | 168 | static int slb_encoding_inited; |
172 | extern unsigned int *slb_miss_kernel_load_linear; | 169 | extern unsigned int *slb_miss_kernel_load_linear; |
173 | extern unsigned int *slb_miss_kernel_load_virtual; | 170 | extern unsigned int *slb_miss_kernel_load_io; |
174 | extern unsigned int *slb_miss_user_load_normal; | ||
175 | #ifdef CONFIG_HUGETLB_PAGE | 171 | #ifdef CONFIG_HUGETLB_PAGE |
176 | extern unsigned int *slb_miss_user_load_huge; | 172 | extern unsigned int *slb_miss_user_load_huge; |
177 | unsigned long huge_llp; | 173 | unsigned long huge_llp; |
@@ -181,18 +177,19 @@ void slb_initialize(void) | |||
181 | 177 | ||
182 | /* Prepare our SLB miss handler based on our page size */ | 178 | /* Prepare our SLB miss handler based on our page size */ |
183 | linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; | 179 | linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; |
184 | virtual_llp = mmu_psize_defs[mmu_virtual_psize].sllp; | 180 | io_llp = mmu_psize_defs[mmu_io_psize].sllp; |
181 | vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; | ||
182 | get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp; | ||
183 | |||
185 | if (!slb_encoding_inited) { | 184 | if (!slb_encoding_inited) { |
186 | slb_encoding_inited = 1; | 185 | slb_encoding_inited = 1; |
187 | patch_slb_encoding(slb_miss_kernel_load_linear, | 186 | patch_slb_encoding(slb_miss_kernel_load_linear, |
188 | SLB_VSID_KERNEL | linear_llp); | 187 | SLB_VSID_KERNEL | linear_llp); |
189 | patch_slb_encoding(slb_miss_kernel_load_virtual, | 188 | patch_slb_encoding(slb_miss_kernel_load_io, |
190 | SLB_VSID_KERNEL | virtual_llp); | 189 | SLB_VSID_KERNEL | io_llp); |
191 | patch_slb_encoding(slb_miss_user_load_normal, | ||
192 | SLB_VSID_USER | virtual_llp); | ||
193 | 190 | ||
194 | DBG("SLB: linear LLP = %04x\n", linear_llp); | 191 | DBG("SLB: linear LLP = %04x\n", linear_llp); |
195 | DBG("SLB: virtual LLP = %04x\n", virtual_llp); | 192 | DBG("SLB: io LLP = %04x\n", io_llp); |
196 | #ifdef CONFIG_HUGETLB_PAGE | 193 | #ifdef CONFIG_HUGETLB_PAGE |
197 | patch_slb_encoding(slb_miss_user_load_huge, | 194 | patch_slb_encoding(slb_miss_user_load_huge, |
198 | SLB_VSID_USER | huge_llp); | 195 | SLB_VSID_USER | huge_llp); |
@@ -207,7 +204,7 @@ void slb_initialize(void) | |||
207 | unsigned long lflags, vflags; | 204 | unsigned long lflags, vflags; |
208 | 205 | ||
209 | lflags = SLB_VSID_KERNEL | linear_llp; | 206 | lflags = SLB_VSID_KERNEL | linear_llp; |
210 | vflags = SLB_VSID_KERNEL | virtual_llp; | 207 | vflags = SLB_VSID_KERNEL | vmalloc_llp; |
211 | 208 | ||
212 | /* Invalidate the entire SLB (even slot 0) & all the ERATS */ | 209 | /* Invalidate the entire SLB (even slot 0) & all the ERATS */ |
213 | asm volatile("isync":::"memory"); | 210 | asm volatile("isync":::"memory"); |
@@ -215,7 +212,6 @@ void slb_initialize(void) | |||
215 | asm volatile("isync; slbia; isync":::"memory"); | 212 | asm volatile("isync; slbia; isync":::"memory"); |
216 | create_slbe(PAGE_OFFSET, lflags, 0); | 213 | create_slbe(PAGE_OFFSET, lflags, 0); |
217 | 214 | ||
218 | /* VMALLOC space has 4K pages always for now */ | ||
219 | create_slbe(VMALLOC_START, vflags, 1); | 215 | create_slbe(VMALLOC_START, vflags, 1); |
220 | 216 | ||
221 | /* We don't bolt the stack for the time being - we're in boot, | 217 | /* We don't bolt the stack for the time being - we're in boot, |
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index abfaabf667bf..8548dcf8ef8b 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S | |||
@@ -59,10 +59,19 @@ _GLOBAL(slb_miss_kernel_load_linear) | |||
59 | li r11,0 | 59 | li r11,0 |
60 | b slb_finish_load | 60 | b slb_finish_load |
61 | 61 | ||
62 | 1: /* vmalloc/ioremap mapping encoding bits, the "li" instruction below | 62 | 1: /* vmalloc/ioremap mapping encoding bits, the "li" instructions below |
63 | * will be patched by the kernel at boot | 63 | * will be patched by the kernel at boot |
64 | */ | 64 | */ |
65 | _GLOBAL(slb_miss_kernel_load_virtual) | 65 | BEGIN_FTR_SECTION |
66 | /* check whether this is in vmalloc or ioremap space */ | ||
67 | clrldi r11,r10,48 | ||
68 | cmpldi r11,(VMALLOC_SIZE >> 28) - 1 | ||
69 | bgt 5f | ||
70 | lhz r11,PACAVMALLOCSLLP(r13) | ||
71 | b slb_finish_load | ||
72 | 5: | ||
73 | END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE) | ||
74 | _GLOBAL(slb_miss_kernel_load_io) | ||
66 | li r11,0 | 75 | li r11,0 |
67 | b slb_finish_load | 76 | b slb_finish_load |
68 | 77 | ||
@@ -96,9 +105,7 @@ _GLOBAL(slb_miss_user_load_huge) | |||
96 | 1: | 105 | 1: |
97 | #endif /* CONFIG_HUGETLB_PAGE */ | 106 | #endif /* CONFIG_HUGETLB_PAGE */ |
98 | 107 | ||
99 | _GLOBAL(slb_miss_user_load_normal) | 108 | lhz r11,PACACONTEXTSLLP(r13) |
100 | li r11,0 | ||
101 | |||
102 | 2: | 109 | 2: |
103 | ld r9,PACACONTEXTID(r13) | 110 | ld r9,PACACONTEXTID(r13) |
104 | rldimi r10,r9,USER_ESID_BITS,0 | 111 | rldimi r10,r9,USER_ESID_BITS,0 |
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c index 4a9291d9fef8..691320c90b78 100644 --- a/arch/powerpc/mm/stab.c +++ b/arch/powerpc/mm/stab.c | |||
@@ -200,10 +200,6 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm) | |||
200 | 200 | ||
201 | __get_cpu_var(stab_cache_ptr) = 0; | 201 | __get_cpu_var(stab_cache_ptr) = 0; |
202 | 202 | ||
203 | #ifdef CONFIG_PPC_64K_PAGES | ||
204 | get_paca()->pgdir = mm->pgd; | ||
205 | #endif /* CONFIG_PPC_64K_PAGES */ | ||
206 | |||
207 | /* Now preload some entries for the new task */ | 203 | /* Now preload some entries for the new task */ |
208 | if (test_tsk_thread_flag(tsk, TIF_32BIT)) | 204 | if (test_tsk_thread_flag(tsk, TIF_32BIT)) |
209 | unmapped_base = TASK_UNMAPPED_BASE_USER32; | 205 | unmapped_base = TASK_UNMAPPED_BASE_USER32; |
diff --git a/arch/powerpc/mm/tlb_32.c b/arch/powerpc/mm/tlb_32.c index ad580f3742e5..02eb23e036d5 100644 --- a/arch/powerpc/mm/tlb_32.c +++ b/arch/powerpc/mm/tlb_32.c | |||
@@ -42,7 +42,7 @@ void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr) | |||
42 | 42 | ||
43 | if (Hash != 0) { | 43 | if (Hash != 0) { |
44 | ptephys = __pa(ptep) & PAGE_MASK; | 44 | ptephys = __pa(ptep) & PAGE_MASK; |
45 | flush_hash_pages(mm->context, addr, ptephys, 1); | 45 | flush_hash_pages(mm->context.id, addr, ptephys, 1); |
46 | } | 46 | } |
47 | } | 47 | } |
48 | 48 | ||
@@ -102,7 +102,7 @@ static void flush_range(struct mm_struct *mm, unsigned long start, | |||
102 | pmd_t *pmd; | 102 | pmd_t *pmd; |
103 | unsigned long pmd_end; | 103 | unsigned long pmd_end; |
104 | int count; | 104 | int count; |
105 | unsigned int ctx = mm->context; | 105 | unsigned int ctx = mm->context.id; |
106 | 106 | ||
107 | if (Hash == 0) { | 107 | if (Hash == 0) { |
108 | _tlbia(); | 108 | _tlbia(); |
@@ -172,7 +172,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) | |||
172 | mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm; | 172 | mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm; |
173 | pmd = pmd_offset(pgd_offset(mm, vmaddr), vmaddr); | 173 | pmd = pmd_offset(pgd_offset(mm, vmaddr), vmaddr); |
174 | if (!pmd_none(*pmd)) | 174 | if (!pmd_none(*pmd)) |
175 | flush_hash_pages(mm->context, vmaddr, pmd_val(*pmd), 1); | 175 | flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1); |
176 | FINISH_FLUSH; | 176 | FINISH_FLUSH; |
177 | } | 177 | } |
178 | 178 | ||
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c index f734b11566c2..e7449b068c82 100644 --- a/arch/powerpc/mm/tlb_64.c +++ b/arch/powerpc/mm/tlb_64.c | |||
@@ -131,7 +131,7 @@ void hpte_update(struct mm_struct *mm, unsigned long addr, | |||
131 | { | 131 | { |
132 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); | 132 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); |
133 | unsigned long vsid; | 133 | unsigned long vsid; |
134 | unsigned int psize = mmu_virtual_psize; | 134 | unsigned int psize; |
135 | int i; | 135 | int i; |
136 | 136 | ||
137 | i = batch->index; | 137 | i = batch->index; |
@@ -148,7 +148,8 @@ void hpte_update(struct mm_struct *mm, unsigned long addr, | |||
148 | #else | 148 | #else |
149 | BUG(); | 149 | BUG(); |
150 | #endif | 150 | #endif |
151 | } | 151 | } else |
152 | psize = pte_pagesize_index(pte); | ||
152 | 153 | ||
153 | /* | 154 | /* |
154 | * This can happen when we are in the middle of a TLB batch and | 155 | * This can happen when we are in the middle of a TLB batch and |