aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIlya Yanok <yanok@emcraft.com>2008-12-10 20:55:41 -0500
committerPaul Mackerras <paulus@samba.org>2008-12-28 17:53:25 -0500
commitca9153a3a2a7556d091dfe080e42b0e67881fff6 (patch)
tree35b5ce24f190690cf7a726cbb97980da51704855
parent6ca4f7494bde078b2b730e28e4ea1dc36a772f70 (diff)
powerpc/44x: Support 16K/64K base page sizes on 44x
This adds support for 16k and 64k page sizes on PowerPC 44x processors. The PGDIR table is much smaller than a page when using 16k or 64k pages (512 and 32 bytes respectively) so we allocate the PGDIR with kzalloc() instead of __get_free_pages(). One PTE table covers rather a large memory area when using 16k or 64k pages (32MB or 512MB respectively), so we can easily put FIXMAP and PKMAP in the area covered by one PTE table. Signed-off-by: Yuri Tikhonov <yur@emcraft.com> Signed-off-by: Vladimir Panfilov <pvr@emcraft.com> Signed-off-by: Ilya Yanok <yanok@emcraft.com> Acked-by: Josh Boyer <jwboyer@linux.vnet.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r--arch/powerpc/Kconfig58
-rw-r--r--arch/powerpc/include/asm/highmem.h19
-rw-r--r--arch/powerpc/include/asm/mmu-44x.h17
-rw-r--r--arch/powerpc/include/asm/page.h13
-rw-r--r--arch/powerpc/include/asm/page_32.h7
-rw-r--r--arch/powerpc/kernel/asm-offsets.c4
-rw-r--r--arch/powerpc/kernel/head_44x.S23
-rw-r--r--arch/powerpc/kernel/misc_32.S12
-rw-r--r--arch/powerpc/mm/pgtable_32.c23
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype2
10 files changed, 130 insertions, 48 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index f7f5448f863d..1af22579e3d4 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -405,23 +405,53 @@ config PPC_HAS_HASH_64K
405 depends on PPC64 405 depends on PPC64
406 default n 406 default n
407 407
408config PPC_64K_PAGES 408choice
409 bool "64k page size" 409 prompt "Page size"
410 depends on PPC64 410 default PPC_4K_PAGES
411 select PPC_HAS_HASH_64K
412 help 411 help
413 This option changes the kernel logical page size to 64k. On machines 412 Select the kernel logical page size. Increasing the page size
414 without processor support for 64k pages, the kernel will simulate 413 will reduce software overhead at each page boundary, allow
415 them by loading each individual 4k page on demand transparently, 414 hardware prefetch mechanisms to be more effective, and allow
416 while on hardware with such support, it will be used to map 415 larger dma transfers increasing IO efficiency and reducing
417 normal application pages. 416 overhead. However the utilization of memory will increase.
417 For example, each cached file will using a multiple of the
418 page size to hold its contents and the difference between the
419 end of file and the end of page is wasted.
420
421 Some dedicated systems, such as software raid serving with
422 accelerated calculations, have shown significant increases.
423
424 If you configure a 64 bit kernel for 64k pages but the
425 processor does not support them, then the kernel will simulate
426 them with 4k pages, loading them on demand, but with the
427 reduced software overhead and larger internal fragmentation.
428 For the 32 bit kernel, a large page option will not be offered
429 unless it is supported by the configured processor.
430
431 If unsure, choose 4K_PAGES.
432
433config PPC_4K_PAGES
434 bool "4k page size"
435
436config PPC_16K_PAGES
437 bool "16k page size" if 44x
438
439config PPC_64K_PAGES
440 bool "64k page size" if 44x || PPC_STD_MMU_64
441 select PPC_HAS_HASH_64K if PPC_STD_MMU_64
442
443endchoice
418 444
419config FORCE_MAX_ZONEORDER 445config FORCE_MAX_ZONEORDER
420 int "Maximum zone order" 446 int "Maximum zone order"
421 range 9 64 if PPC_64K_PAGES 447 range 9 64 if PPC_STD_MMU_64 && PPC_64K_PAGES
422 default "9" if PPC_64K_PAGES 448 default "9" if PPC_STD_MMU_64 && PPC_64K_PAGES
423 range 13 64 if PPC64 && !PPC_64K_PAGES 449 range 13 64 if PPC_STD_MMU_64 && !PPC_64K_PAGES
424 default "13" if PPC64 && !PPC_64K_PAGES 450 default "13" if PPC_STD_MMU_64 && !PPC_64K_PAGES
451 range 9 64 if PPC_STD_MMU_32 && PPC_16K_PAGES
452 default "9" if PPC_STD_MMU_32 && PPC_16K_PAGES
453 range 7 64 if PPC_STD_MMU_32 && PPC_64K_PAGES
454 default "7" if PPC_STD_MMU_32 && PPC_64K_PAGES
425 range 11 64 455 range 11 64
426 default "11" 456 default "11"
427 help 457 help
@@ -441,7 +471,7 @@ config FORCE_MAX_ZONEORDER
441 471
442config PPC_SUBPAGE_PROT 472config PPC_SUBPAGE_PROT
443 bool "Support setting protections for 4k subpages" 473 bool "Support setting protections for 4k subpages"
444 depends on PPC_64K_PAGES 474 depends on PPC_STD_MMU_64 && PPC_64K_PAGES
445 help 475 help
446 This option adds support for a system call to allow user programs 476 This option adds support for a system call to allow user programs
447 to set access permissions (read/write, readonly, or no access) 477 to set access permissions (read/write, readonly, or no access)
diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h
index fd97e501aa6a..04e4a620952e 100644
--- a/arch/powerpc/include/asm/highmem.h
+++ b/arch/powerpc/include/asm/highmem.h
@@ -38,9 +38,24 @@ extern pte_t *pkmap_page_table;
38 * easily, subsequent pte tables have to be allocated in one physical 38 * easily, subsequent pte tables have to be allocated in one physical
39 * chunk of RAM. 39 * chunk of RAM.
40 */ 40 */
41#define LAST_PKMAP (1 << PTE_SHIFT) 41/*
42#define LAST_PKMAP_MASK (LAST_PKMAP-1) 42 * We use one full pte table with 4K pages. And with 16K/64K pages pte
43 * table covers enough memory (32MB and 512MB resp.) that both FIXMAP
44 * and PKMAP can be placed in single pte table. We use 1024 pages for
45 * PKMAP in case of 16K/64K pages.
46 */
47#ifdef CONFIG_PPC_4K_PAGES
48#define PKMAP_ORDER PTE_SHIFT
49#else
50#define PKMAP_ORDER 10
51#endif
52#define LAST_PKMAP (1 << PKMAP_ORDER)
53#ifndef CONFIG_PPC_4K_PAGES
54#define PKMAP_BASE (FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1))
55#else
43#define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK) 56#define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK)
57#endif
58#define LAST_PKMAP_MASK (LAST_PKMAP-1)
44#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) 59#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
45#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) 60#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
46 61
diff --git a/arch/powerpc/include/asm/mmu-44x.h b/arch/powerpc/include/asm/mmu-44x.h
index b21af32ac6d6..8a97cfb08b7e 100644
--- a/arch/powerpc/include/asm/mmu-44x.h
+++ b/arch/powerpc/include/asm/mmu-44x.h
@@ -4,6 +4,8 @@
4 * PPC440 support 4 * PPC440 support
5 */ 5 */
6 6
7#include <asm/page.h>
8
7#define PPC44x_MMUCR_TID 0x000000ff 9#define PPC44x_MMUCR_TID 0x000000ff
8#define PPC44x_MMUCR_STS 0x00010000 10#define PPC44x_MMUCR_STS 0x00010000
9 11
@@ -74,4 +76,19 @@ typedef struct {
74/* Size of the TLBs used for pinning in lowmem */ 76/* Size of the TLBs used for pinning in lowmem */
75#define PPC_PIN_SIZE (1 << 28) /* 256M */ 77#define PPC_PIN_SIZE (1 << 28) /* 256M */
76 78
79#if (PAGE_SHIFT == 12)
80#define PPC44x_TLBE_SIZE PPC44x_TLB_4K
81#elif (PAGE_SHIFT == 14)
82#define PPC44x_TLBE_SIZE PPC44x_TLB_16K
83#elif (PAGE_SHIFT == 16)
84#define PPC44x_TLBE_SIZE PPC44x_TLB_64K
85#else
86#error "Unsupported PAGE_SIZE"
87#endif
88
89#define PPC44x_PGD_OFF_SHIFT (32 - PGDIR_SHIFT + PGD_T_LOG2)
90#define PPC44x_PGD_OFF_MASK_BIT (PGDIR_SHIFT - PGD_T_LOG2)
91#define PPC44x_PTE_ADD_SHIFT (32 - PGDIR_SHIFT + PTE_SHIFT + PTE_T_LOG2)
92#define PPC44x_PTE_ADD_MASK_BIT (32 - PTE_T_LOG2 - PTE_SHIFT)
93
77#endif /* _ASM_POWERPC_MMU_44X_H_ */ 94#endif /* _ASM_POWERPC_MMU_44X_H_ */
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index c0b8d4a29a91..197d569f5bd3 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -19,12 +19,15 @@
19#include <asm/kdump.h> 19#include <asm/kdump.h>
20 20
21/* 21/*
22 * On PPC32 page size is 4K. For PPC64 we support either 4K or 64K software 22 * On regular PPC32 page size is 4K (but we support 4K/16K/64K pages
23 * on PPC44x). For PPC64 we support either 4K or 64K software
23 * page size. When using 64K pages however, whether we are really supporting 24 * page size. When using 64K pages however, whether we are really supporting
24 * 64K pages in HW or not is irrelevant to those definitions. 25 * 64K pages in HW or not is irrelevant to those definitions.
25 */ 26 */
26#ifdef CONFIG_PPC_64K_PAGES 27#if defined(CONFIG_PPC_64K_PAGES)
27#define PAGE_SHIFT 16 28#define PAGE_SHIFT 16
29#elif defined(CONFIG_PPC_16K_PAGES)
30#define PAGE_SHIFT 14
28#else 31#else
29#define PAGE_SHIFT 12 32#define PAGE_SHIFT 12
30#endif 33#endif
@@ -151,7 +154,7 @@ typedef struct { pte_basic_t pte; } pte_t;
151/* 64k pages additionally define a bigger "real PTE" type that gathers 154/* 64k pages additionally define a bigger "real PTE" type that gathers
152 * the "second half" part of the PTE for pseudo 64k pages 155 * the "second half" part of the PTE for pseudo 64k pages
153 */ 156 */
154#ifdef CONFIG_PPC_64K_PAGES 157#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_STD_MMU_64)
155typedef struct { pte_t pte; unsigned long hidx; } real_pte_t; 158typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
156#else 159#else
157typedef struct { pte_t pte; } real_pte_t; 160typedef struct { pte_t pte; } real_pte_t;
@@ -191,10 +194,10 @@ typedef pte_basic_t pte_t;
191#define pte_val(x) (x) 194#define pte_val(x) (x)
192#define __pte(x) (x) 195#define __pte(x) (x)
193 196
194#ifdef CONFIG_PPC_64K_PAGES 197#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_STD_MMU_64)
195typedef struct { pte_t pte; unsigned long hidx; } real_pte_t; 198typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
196#else 199#else
197typedef unsigned long real_pte_t; 200typedef pte_t real_pte_t;
198#endif 201#endif
199 202
200 203
diff --git a/arch/powerpc/include/asm/page_32.h b/arch/powerpc/include/asm/page_32.h
index d77072a32cc6..1458d9500381 100644
--- a/arch/powerpc/include/asm/page_32.h
+++ b/arch/powerpc/include/asm/page_32.h
@@ -19,6 +19,8 @@
19#define PTE_FLAGS_OFFSET 0 19#define PTE_FLAGS_OFFSET 0
20#endif 20#endif
21 21
22#define PTE_SHIFT (PAGE_SHIFT - PTE_T_LOG2) /* full page */
23
22#ifndef __ASSEMBLY__ 24#ifndef __ASSEMBLY__
23/* 25/*
24 * The basic type of a PTE - 64 bits for those CPUs with > 32 bit 26 * The basic type of a PTE - 64 bits for those CPUs with > 32 bit
@@ -26,10 +28,8 @@
26 */ 28 */
27#ifdef CONFIG_PTE_64BIT 29#ifdef CONFIG_PTE_64BIT
28typedef unsigned long long pte_basic_t; 30typedef unsigned long long pte_basic_t;
29#define PTE_SHIFT (PAGE_SHIFT - 3) /* 512 ptes per page */
30#else 31#else
31typedef unsigned long pte_basic_t; 32typedef unsigned long pte_basic_t;
32#define PTE_SHIFT (PAGE_SHIFT - 2) /* 1024 ptes per page */
33#endif 33#endif
34 34
35struct page; 35struct page;
@@ -39,6 +39,9 @@ extern void copy_page(void *to, void *from);
39 39
40#include <asm-generic/page.h> 40#include <asm-generic/page.h>
41 41
42#define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1)
43#define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1)
44
42#endif /* __ASSEMBLY__ */ 45#endif /* __ASSEMBLY__ */
43 46
44#endif /* _ASM_POWERPC_PAGE_32_H */ 47#endif /* _ASM_POWERPC_PAGE_32_H */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index c05ab1d3e620..661d07d2146b 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -380,6 +380,10 @@ int main(void)
380 DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear)); 380 DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
381 DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr)); 381 DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
382#endif 382#endif
383#ifdef CONFIG_44x
384 DEFINE(PGD_T_LOG2, PGD_T_LOG2);
385 DEFINE(PTE_T_LOG2, PTE_T_LOG2);
386#endif
383 387
384 return 0; 388 return 0;
385} 389}
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index bd4fe9e7278b..b56fecc93a16 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -402,12 +402,14 @@ interrupt_base:
402 rlwimi r13,r12,10,30,30 402 rlwimi r13,r12,10,30,30
403 403
404 /* Load the PTE */ 404 /* Load the PTE */
405 rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */ 405 /* Compute pgdir/pmd offset */
406 rlwinm r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29
406 lwzx r11, r12, r11 /* Get pgd/pmd entry */ 407 lwzx r11, r12, r11 /* Get pgd/pmd entry */
407 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ 408 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
408 beq 2f /* Bail if no table */ 409 beq 2f /* Bail if no table */
409 410
410 rlwimi r12, r10, 23, 20, 28 /* Compute pte address */ 411 /* Compute pte address */
412 rlwimi r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28
411 lwz r11, 0(r12) /* Get high word of pte entry */ 413 lwz r11, 0(r12) /* Get high word of pte entry */
412 lwz r12, 4(r12) /* Get low word of pte entry */ 414 lwz r12, 4(r12) /* Get low word of pte entry */
413 415
@@ -496,12 +498,14 @@ tlb_44x_patch_hwater_D:
496 /* Make up the required permissions */ 498 /* Make up the required permissions */
497 li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_HWEXEC 499 li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_HWEXEC
498 500
499 rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */ 501 /* Compute pgdir/pmd offset */
502 rlwinm r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29
500 lwzx r11, r12, r11 /* Get pgd/pmd entry */ 503 lwzx r11, r12, r11 /* Get pgd/pmd entry */
501 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ 504 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
502 beq 2f /* Bail if no table */ 505 beq 2f /* Bail if no table */
503 506
504 rlwimi r12, r10, 23, 20, 28 /* Compute pte address */ 507 /* Compute pte address */
508 rlwimi r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28
505 lwz r11, 0(r12) /* Get high word of pte entry */ 509 lwz r11, 0(r12) /* Get high word of pte entry */
506 lwz r12, 4(r12) /* Get low word of pte entry */ 510 lwz r12, 4(r12) /* Get low word of pte entry */
507 511
@@ -565,15 +569,16 @@ tlb_44x_patch_hwater_I:
565 */ 569 */
566finish_tlb_load: 570finish_tlb_load:
567 /* Combine RPN & ERPN an write WS 0 */ 571 /* Combine RPN & ERPN an write WS 0 */
568 rlwimi r11,r12,0,0,19 572 rlwimi r11,r12,0,0,31-PAGE_SHIFT
569 tlbwe r11,r13,PPC44x_TLB_XLAT 573 tlbwe r11,r13,PPC44x_TLB_XLAT
570 574
571 /* 575 /*
572 * Create WS1. This is the faulting address (EPN), 576 * Create WS1. This is the faulting address (EPN),
573 * page size, and valid flag. 577 * page size, and valid flag.
574 */ 578 */
575 li r11,PPC44x_TLB_VALID | PPC44x_TLB_4K 579 li r11,PPC44x_TLB_VALID | PPC44x_TLBE_SIZE
576 rlwimi r10,r11,0,20,31 /* Insert valid and page size*/ 580 /* Insert valid and page size */
581 rlwimi r10,r11,0,PPC44x_PTE_ADD_MASK_BIT,31
577 tlbwe r10,r13,PPC44x_TLB_PAGEID /* Write PAGEID */ 582 tlbwe r10,r13,PPC44x_TLB_PAGEID /* Write PAGEID */
578 583
579 /* And WS 2 */ 584 /* And WS 2 */
@@ -645,12 +650,12 @@ _GLOBAL(set_context)
645 * goes at the beginning of the data segment, which is page-aligned. 650 * goes at the beginning of the data segment, which is page-aligned.
646 */ 651 */
647 .data 652 .data
648 .align 12 653 .align PAGE_SHIFT
649 .globl sdata 654 .globl sdata
650sdata: 655sdata:
651 .globl empty_zero_page 656 .globl empty_zero_page
652empty_zero_page: 657empty_zero_page:
653 .space 4096 658 .space PAGE_SIZE
654 659
655/* 660/*
656 * To support >32-bit physical addresses, we use an 8KB pgdir. 661 * To support >32-bit physical addresses, we use an 8KB pgdir.
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index ae0d084b6a24..15f28e0de78d 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -426,8 +426,8 @@ _GLOBAL(__flush_dcache_icache)
426BEGIN_FTR_SECTION 426BEGIN_FTR_SECTION
427 blr 427 blr
428END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) 428END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
429 rlwinm r3,r3,0,0,19 /* Get page base address */ 429 rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */
430 li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */ 430 li r4,PAGE_SIZE/L1_CACHE_BYTES /* Number of lines in a page */
431 mtctr r4 431 mtctr r4
432 mr r6,r3 432 mr r6,r3
4330: dcbst 0,r3 /* Write line to ram */ 4330: dcbst 0,r3 /* Write line to ram */
@@ -467,8 +467,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
467 rlwinm r0,r10,0,28,26 /* clear DR */ 467 rlwinm r0,r10,0,28,26 /* clear DR */
468 mtmsr r0 468 mtmsr r0
469 isync 469 isync
470 rlwinm r3,r3,0,0,19 /* Get page base address */ 470 rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */
471 li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */ 471 li r4,PAGE_SIZE/L1_CACHE_BYTES /* Number of lines in a page */
472 mtctr r4 472 mtctr r4
473 mr r6,r3 473 mr r6,r3
4740: dcbst 0,r3 /* Write line to ram */ 4740: dcbst 0,r3 /* Write line to ram */
@@ -492,7 +492,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
492 * void clear_pages(void *page, int order) ; 492 * void clear_pages(void *page, int order) ;
493 */ 493 */
494_GLOBAL(clear_pages) 494_GLOBAL(clear_pages)
495 li r0,4096/L1_CACHE_BYTES 495 li r0,PAGE_SIZE/L1_CACHE_BYTES
496 slw r0,r0,r4 496 slw r0,r0,r4
497 mtctr r0 497 mtctr r0
498#ifdef CONFIG_8xx 498#ifdef CONFIG_8xx
@@ -550,7 +550,7 @@ _GLOBAL(copy_page)
550 dcbt r5,r4 550 dcbt r5,r4
551 li r11,L1_CACHE_BYTES+4 551 li r11,L1_CACHE_BYTES+4
552#endif /* MAX_COPY_PREFETCH */ 552#endif /* MAX_COPY_PREFETCH */
553 li r0,4096/L1_CACHE_BYTES - MAX_COPY_PREFETCH 553 li r0,PAGE_SIZE/L1_CACHE_BYTES - MAX_COPY_PREFETCH
554 crclr 4*cr0+eq 554 crclr 4*cr0+eq
5552: 5552:
556 mtctr r0 556 mtctr r0
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 8cba46fc9e3b..38ff35f2142a 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -68,24 +68,29 @@ extern unsigned long p_mapped_by_tlbcam(unsigned long pa);
68#define p_mapped_by_tlbcam(x) (0UL) 68#define p_mapped_by_tlbcam(x) (0UL)
69#endif /* HAVE_TLBCAM */ 69#endif /* HAVE_TLBCAM */
70 70
71#ifdef CONFIG_PTE_64BIT 71#define PGDIR_ORDER (32 + PGD_T_LOG2 - PGDIR_SHIFT)
72/* Some processors use an 8kB pgdir because they have 8-byte Linux PTEs. */
73#define PGDIR_ORDER 1
74#else
75#define PGDIR_ORDER 0
76#endif
77 72
78pgd_t *pgd_alloc(struct mm_struct *mm) 73pgd_t *pgd_alloc(struct mm_struct *mm)
79{ 74{
80 pgd_t *ret; 75 pgd_t *ret;
81 76
82 ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, PGDIR_ORDER); 77 /* pgdir take page or two with 4K pages and a page fraction otherwise */
78#ifndef CONFIG_PPC_4K_PAGES
79 ret = (pgd_t *)kzalloc(1 << PGDIR_ORDER, GFP_KERNEL);
80#else
81 ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
82 PGDIR_ORDER - PAGE_SHIFT);
83#endif
83 return ret; 84 return ret;
84} 85}
85 86
86void pgd_free(struct mm_struct *mm, pgd_t *pgd) 87void pgd_free(struct mm_struct *mm, pgd_t *pgd)
87{ 88{
88 free_pages((unsigned long)pgd, PGDIR_ORDER); 89#ifndef CONFIG_PPC_4K_PAGES
90 kfree((void *)pgd);
91#else
92 free_pages((unsigned long)pgd, PGDIR_ORDER - PAGE_SHIFT);
93#endif
89} 94}
90 95
91__init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) 96__init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
@@ -385,7 +390,7 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
385#endif /* CONFIG_DEBUG_PAGEALLOC */ 390#endif /* CONFIG_DEBUG_PAGEALLOC */
386 391
387static int fixmaps; 392static int fixmaps;
388unsigned long FIXADDR_TOP = 0xfffff000; 393unsigned long FIXADDR_TOP = (-PAGE_SIZE);
389EXPORT_SYMBOL(FIXADDR_TOP); 394EXPORT_SYMBOL(FIXADDR_TOP);
390 395
391void __set_fixmap (enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags) 396void __set_fixmap (enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags)
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index db61dafb924d..3d0c776f888d 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -212,7 +212,7 @@ config PPC_MMU_NOHASH
212 212
213config PPC_MM_SLICES 213config PPC_MM_SLICES
214 bool 214 bool
215 default y if HUGETLB_PAGE || PPC_64K_PAGES 215 default y if HUGETLB_PAGE || (PPC_STD_MMU_64 && PPC_64K_PAGES)
216 default n 216 default n
217 217
218config VIRT_CPU_ACCOUNTING 218config VIRT_CPU_ACCOUNTING