aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2005-08-05 05:39:06 -0400
committerPaul Mackerras <paulus@samba.org>2005-08-28 20:53:31 -0400
commite28f7faf05159f1cfd564596f5e6178edba6bd49 (patch)
tree45534d2c33bff8b64e3fd155fba55146cb7518e6
parentdecd300b30e499fe6be1bbfc5650fc971de8c1fa (diff)
[PATCH] Four level pagetables for ppc64
Implement 4-level pagetables for ppc64 This patch implements full four-level page tables for ppc64, thereby extending the usable user address range to 44 bits (16T). The patch uses a full page for the tables at the bottom and top level, and a quarter page for the intermediate levels. It uses full 64-bit pointers at every level, thus also increasing the addressable range of physical memory. This patch also tweaks the VSID allocation to allow matching range for user addresses (this halves the number of available contexts) and adds some #if and BUILD_BUG sanity checks. Signed-off-by: David Gibson <dwg@au1.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r--arch/ppc64/mm/hash_utils.c2
-rw-r--r--arch/ppc64/mm/hugetlbpage.c187
-rw-r--r--arch/ppc64/mm/imalloc.c2
-rw-r--r--arch/ppc64/mm/init.c62
-rw-r--r--arch/ppc64/mm/slb_low.S2
-rw-r--r--arch/ppc64/mm/tlb.c95
-rw-r--r--include/asm-ppc64/imalloc.h2
-rw-r--r--include/asm-ppc64/mmu.h7
-rw-r--r--include/asm-ppc64/page.h26
-rw-r--r--include/asm-ppc64/pgalloc.h93
-rw-r--r--include/asm-ppc64/pgtable.h90
-rw-r--r--include/asm-ppc64/processor.h4
12 files changed, 294 insertions, 278 deletions
diff --git a/arch/ppc64/mm/hash_utils.c b/arch/ppc64/mm/hash_utils.c
index 623b5d130c31..65d6e8527948 100644
--- a/arch/ppc64/mm/hash_utils.c
+++ b/arch/ppc64/mm/hash_utils.c
@@ -302,7 +302,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
302 int local = 0; 302 int local = 0;
303 cpumask_t tmp; 303 cpumask_t tmp;
304 304
305 if ((ea & ~REGION_MASK) > EADDR_MASK) 305 if ((ea & ~REGION_MASK) >= PGTABLE_RANGE)
306 return 1; 306 return 1;
307 307
308 switch (REGION_ID(ea)) { 308 switch (REGION_ID(ea)) {
diff --git a/arch/ppc64/mm/hugetlbpage.c b/arch/ppc64/mm/hugetlbpage.c
index f9524602818d..a13e44230a6f 100644
--- a/arch/ppc64/mm/hugetlbpage.c
+++ b/arch/ppc64/mm/hugetlbpage.c
@@ -27,124 +27,91 @@
27 27
28#include <linux/sysctl.h> 28#include <linux/sysctl.h>
29 29
30#define HUGEPGDIR_SHIFT (HPAGE_SHIFT + PAGE_SHIFT - 3) 30/* Modelled after find_linux_pte() */
31#define HUGEPGDIR_SIZE (1UL << HUGEPGDIR_SHIFT) 31pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
32#define HUGEPGDIR_MASK (~(HUGEPGDIR_SIZE-1))
33
34#define HUGEPTE_INDEX_SIZE 9
35#define HUGEPGD_INDEX_SIZE 10
36
37#define PTRS_PER_HUGEPTE (1 << HUGEPTE_INDEX_SIZE)
38#define PTRS_PER_HUGEPGD (1 << HUGEPGD_INDEX_SIZE)
39
40static inline int hugepgd_index(unsigned long addr)
41{
42 return (addr & ~REGION_MASK) >> HUGEPGDIR_SHIFT;
43}
44
45static pud_t *hugepgd_offset(struct mm_struct *mm, unsigned long addr)
46{ 32{
47 int index; 33 pgd_t *pg;
34 pud_t *pu;
35 pmd_t *pm;
36 pte_t *pt;
48 37
49 if (! mm->context.huge_pgdir) 38 BUG_ON(! in_hugepage_area(mm->context, addr));
50 return NULL;
51 39
40 addr &= HPAGE_MASK;
41
42 pg = pgd_offset(mm, addr);
43 if (!pgd_none(*pg)) {
44 pu = pud_offset(pg, addr);
45 if (!pud_none(*pu)) {
46 pm = pmd_offset(pu, addr);
47 pt = (pte_t *)pm;
48 BUG_ON(!pmd_none(*pm)
49 && !(pte_present(*pt) && pte_huge(*pt)));
50 return pt;
51 }
52 }
52 53
53 index = hugepgd_index(addr); 54 return NULL;
54 BUG_ON(index >= PTRS_PER_HUGEPGD);
55 return (pud_t *)(mm->context.huge_pgdir + index);
56} 55}
57 56
58static inline pte_t *hugepte_offset(pud_t *dir, unsigned long addr) 57pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
59{ 58{
60 int index; 59 pgd_t *pg;
61 60 pud_t *pu;
62 if (pud_none(*dir)) 61 pmd_t *pm;
63 return NULL; 62 pte_t *pt;
64
65 index = (addr >> HPAGE_SHIFT) % PTRS_PER_HUGEPTE;
66 return (pte_t *)pud_page(*dir) + index;
67}
68 63
69static pud_t *hugepgd_alloc(struct mm_struct *mm, unsigned long addr)
70{
71 BUG_ON(! in_hugepage_area(mm->context, addr)); 64 BUG_ON(! in_hugepage_area(mm->context, addr));
72 65
73 if (! mm->context.huge_pgdir) { 66 addr &= HPAGE_MASK;
74 pgd_t *new;
75 spin_unlock(&mm->page_table_lock);
76 /* Don't use pgd_alloc(), because we want __GFP_REPEAT */
77 new = kmem_cache_alloc(zero_cache, GFP_KERNEL | __GFP_REPEAT);
78 BUG_ON(memcmp(new, empty_zero_page, PAGE_SIZE));
79 spin_lock(&mm->page_table_lock);
80
81 /*
82 * Because we dropped the lock, we should re-check the
83 * entry, as somebody else could have populated it..
84 */
85 if (mm->context.huge_pgdir)
86 pgd_free(new);
87 else
88 mm->context.huge_pgdir = new;
89 }
90 return hugepgd_offset(mm, addr);
91}
92 67
93static pte_t *hugepte_alloc(struct mm_struct *mm, pud_t *dir, unsigned long addr) 68 pg = pgd_offset(mm, addr);
94{ 69 pu = pud_alloc(mm, pg, addr);
95 if (! pud_present(*dir)) {
96 pte_t *new;
97 70
98 spin_unlock(&mm->page_table_lock); 71 if (pu) {
99 new = kmem_cache_alloc(zero_cache, GFP_KERNEL | __GFP_REPEAT); 72 pm = pmd_alloc(mm, pu, addr);
100 BUG_ON(memcmp(new, empty_zero_page, PAGE_SIZE)); 73 if (pm) {
101 spin_lock(&mm->page_table_lock); 74 pt = (pte_t *)pm;
102 /* 75 BUG_ON(!pmd_none(*pm)
103 * Because we dropped the lock, we should re-check the 76 && !(pte_present(*pt) && pte_huge(*pt)));
104 * entry, as somebody else could have populated it.. 77 return pt;
105 */
106 if (pud_present(*dir)) {
107 if (new)
108 kmem_cache_free(zero_cache, new);
109 } else {
110 struct page *ptepage;
111
112 if (! new)
113 return NULL;
114 ptepage = virt_to_page(new);
115 ptepage->mapping = (void *) mm;
116 ptepage->index = addr & HUGEPGDIR_MASK;
117 pud_populate(mm, dir, new);
118 } 78 }
119 } 79 }
120 80
121 return hugepte_offset(dir, addr); 81 return NULL;
122} 82}
123 83
124pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) 84#define HUGEPTE_BATCH_SIZE (HPAGE_SIZE / PMD_SIZE)
125{
126 pud_t *pud;
127 85
128 BUG_ON(! in_hugepage_area(mm->context, addr)); 86void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
87 pte_t *ptep, pte_t pte)
88{
89 int i;
129 90
130 pud = hugepgd_offset(mm, addr); 91 if (pte_present(*ptep)) {
131 if (! pud) 92 pte_clear(mm, addr, ptep);
132 return NULL; 93 flush_tlb_pending();
94 }
133 95
134 return hugepte_offset(pud, addr); 96 for (i = 0; i < HUGEPTE_BATCH_SIZE; i++) {
97 *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
98 ptep++;
99 }
135} 100}
136 101
137pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) 102pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
103 pte_t *ptep)
138{ 104{
139 pud_t *pud; 105 unsigned long old = pte_update(ptep, ~0UL);
106 int i;
140 107
141 BUG_ON(! in_hugepage_area(mm->context, addr)); 108 if (old & _PAGE_HASHPTE)
109 hpte_update(mm, addr, old, 0);
142 110
143 pud = hugepgd_alloc(mm, addr); 111 for (i = 1; i < HUGEPTE_BATCH_SIZE; i++)
144 if (! pud) 112 ptep[i] = __pte(0);
145 return NULL;
146 113
147 return hugepte_alloc(mm, pud, addr); 114 return __pte(old);
148} 115}
149 116
150/* 117/*
@@ -541,42 +508,6 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
541 } 508 }
542} 509}
543 510
544void hugetlb_mm_free_pgd(struct mm_struct *mm)
545{
546 int i;
547 pgd_t *pgdir;
548
549 spin_lock(&mm->page_table_lock);
550
551 pgdir = mm->context.huge_pgdir;
552 if (! pgdir)
553 goto out;
554
555 mm->context.huge_pgdir = NULL;
556
557 /* cleanup any hugepte pages leftover */
558 for (i = 0; i < PTRS_PER_HUGEPGD; i++) {
559 pud_t *pud = (pud_t *)(pgdir + i);
560
561 if (! pud_none(*pud)) {
562 pte_t *pte = (pte_t *)pud_page(*pud);
563 struct page *ptepage = virt_to_page(pte);
564
565 ptepage->mapping = NULL;
566
567 BUG_ON(memcmp(pte, empty_zero_page, PAGE_SIZE));
568 kmem_cache_free(zero_cache, pte);
569 }
570 pud_clear(pud);
571 }
572
573 BUG_ON(memcmp(pgdir, empty_zero_page, PAGE_SIZE));
574 kmem_cache_free(zero_cache, pgdir);
575
576 out:
577 spin_unlock(&mm->page_table_lock);
578}
579
580int hash_huge_page(struct mm_struct *mm, unsigned long access, 511int hash_huge_page(struct mm_struct *mm, unsigned long access,
581 unsigned long ea, unsigned long vsid, int local) 512 unsigned long ea, unsigned long vsid, int local)
582{ 513{
diff --git a/arch/ppc64/mm/imalloc.c b/arch/ppc64/mm/imalloc.c
index b6e75b891ac0..c65b87b92756 100644
--- a/arch/ppc64/mm/imalloc.c
+++ b/arch/ppc64/mm/imalloc.c
@@ -31,7 +31,7 @@ static int get_free_im_addr(unsigned long size, unsigned long *im_addr)
31 break; 31 break;
32 if ((unsigned long)tmp->addr >= ioremap_bot) 32 if ((unsigned long)tmp->addr >= ioremap_bot)
33 addr = tmp->size + (unsigned long) tmp->addr; 33 addr = tmp->size + (unsigned long) tmp->addr;
34 if (addr > IMALLOC_END-size) 34 if (addr >= IMALLOC_END-size)
35 return 1; 35 return 1;
36 } 36 }
37 *im_addr = addr; 37 *im_addr = addr;
diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c
index e58a24d42879..87f256df8de5 100644
--- a/arch/ppc64/mm/init.c
+++ b/arch/ppc64/mm/init.c
@@ -66,6 +66,14 @@
66#include <asm/vdso.h> 66#include <asm/vdso.h>
67#include <asm/imalloc.h> 67#include <asm/imalloc.h>
68 68
69#if PGTABLE_RANGE > USER_VSID_RANGE
70#warning Limited user VSID range means pagetable space is wasted
71#endif
72
73#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
74#warning TASK_SIZE is smaller than it needs to be.
75#endif
76
69int mem_init_done; 77int mem_init_done;
70unsigned long ioremap_bot = IMALLOC_BASE; 78unsigned long ioremap_bot = IMALLOC_BASE;
71static unsigned long phbs_io_bot = PHBS_IO_BASE; 79static unsigned long phbs_io_bot = PHBS_IO_BASE;
@@ -226,7 +234,7 @@ void __iomem * __ioremap(unsigned long addr, unsigned long size,
226 * Before that, we map using addresses going 234 * Before that, we map using addresses going
227 * up from ioremap_bot. imalloc will use 235 * up from ioremap_bot. imalloc will use
228 * the addresses from ioremap_bot through 236 * the addresses from ioremap_bot through
229 * IMALLOC_END (0xE000001fffffffff) 237 * IMALLOC_END
230 * 238 *
231 */ 239 */
232 pa = addr & PAGE_MASK; 240 pa = addr & PAGE_MASK;
@@ -417,12 +425,6 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
417 int index; 425 int index;
418 int err; 426 int err;
419 427
420#ifdef CONFIG_HUGETLB_PAGE
421 /* We leave htlb_segs as it was, but for a fork, we need to
422 * clear the huge_pgdir. */
423 mm->context.huge_pgdir = NULL;
424#endif
425
426again: 428again:
427 if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL)) 429 if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL))
428 return -ENOMEM; 430 return -ENOMEM;
@@ -453,8 +455,6 @@ void destroy_context(struct mm_struct *mm)
453 spin_unlock(&mmu_context_lock); 455 spin_unlock(&mmu_context_lock);
454 456
455 mm->context.id = NO_CONTEXT; 457 mm->context.id = NO_CONTEXT;
456
457 hugetlb_mm_free_pgd(mm);
458} 458}
459 459
460/* 460/*
@@ -833,23 +833,43 @@ void __iomem * reserve_phb_iospace(unsigned long size)
833 return virt_addr; 833 return virt_addr;
834} 834}
835 835
836kmem_cache_t *zero_cache; 836static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
837
838static void zero_ctor(void *pte, kmem_cache_t *cache, unsigned long flags)
839{ 837{
840 memset(pte, 0, PAGE_SIZE); 838 memset(addr, 0, kmem_cache_size(cache));
841} 839}
842 840
841static const int pgtable_cache_size[2] = {
842 PTE_TABLE_SIZE, PMD_TABLE_SIZE
843};
844static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
845 "pgd_pte_cache", "pud_pmd_cache",
846};
847
848kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
849
843void pgtable_cache_init(void) 850void pgtable_cache_init(void)
844{ 851{
845 zero_cache = kmem_cache_create("zero", 852 int i;
846 PAGE_SIZE, 853
847 0, 854 BUILD_BUG_ON(PTE_TABLE_SIZE != pgtable_cache_size[PTE_CACHE_NUM]);
848 SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN, 855 BUILD_BUG_ON(PMD_TABLE_SIZE != pgtable_cache_size[PMD_CACHE_NUM]);
849 zero_ctor, 856 BUILD_BUG_ON(PUD_TABLE_SIZE != pgtable_cache_size[PUD_CACHE_NUM]);
850 NULL); 857 BUILD_BUG_ON(PGD_TABLE_SIZE != pgtable_cache_size[PGD_CACHE_NUM]);
851 if (!zero_cache) 858
852 panic("pgtable_cache_init(): could not create zero_cache!\n"); 859 for (i = 0; i < ARRAY_SIZE(pgtable_cache_size); i++) {
860 int size = pgtable_cache_size[i];
861 const char *name = pgtable_cache_name[i];
862
863 pgtable_cache[i] = kmem_cache_create(name,
864 size, size,
865 SLAB_HWCACHE_ALIGN
866 | SLAB_MUST_HWCACHE_ALIGN,
867 zero_ctor,
868 NULL);
869 if (! pgtable_cache[i])
870 panic("pgtable_cache_init(): could not create %s!\n",
871 name);
872 }
853} 873}
854 874
855pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr, 875pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
diff --git a/arch/ppc64/mm/slb_low.S b/arch/ppc64/mm/slb_low.S
index 8379d678f70f..f20fc52483a7 100644
--- a/arch/ppc64/mm/slb_low.S
+++ b/arch/ppc64/mm/slb_low.S
@@ -91,7 +91,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
910: /* user address: proto-VSID = context<<15 | ESID */ 910: /* user address: proto-VSID = context<<15 | ESID */
92 li r11,SLB_VSID_USER 92 li r11,SLB_VSID_USER
93 93
94 srdi. r9,r3,13 94 srdi. r9,r3,USER_ESID_BITS
95 bne- 8f /* invalid ea bits set */ 95 bne- 8f /* invalid ea bits set */
96 96
97#ifdef CONFIG_HUGETLB_PAGE 97#ifdef CONFIG_HUGETLB_PAGE
diff --git a/arch/ppc64/mm/tlb.c b/arch/ppc64/mm/tlb.c
index 26f0172c4527..d8a6593a13f0 100644
--- a/arch/ppc64/mm/tlb.c
+++ b/arch/ppc64/mm/tlb.c
@@ -41,7 +41,58 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
41DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); 41DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
42unsigned long pte_freelist_forced_free; 42unsigned long pte_freelist_forced_free;
43 43
44void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage) 44struct pte_freelist_batch
45{
46 struct rcu_head rcu;
47 unsigned int index;
48 pgtable_free_t tables[0];
49};
50
51DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
52unsigned long pte_freelist_forced_free;
53
54#define PTE_FREELIST_SIZE \
55 ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
56 / sizeof(pgtable_free_t))
57
58#ifdef CONFIG_SMP
59static void pte_free_smp_sync(void *arg)
60{
61 /* Do nothing, just ensure we sync with all CPUs */
62}
63#endif
64
65/* This is only called when we are critically out of memory
66 * (and fail to get a page in pte_free_tlb).
67 */
68static void pgtable_free_now(pgtable_free_t pgf)
69{
70 pte_freelist_forced_free++;
71
72 smp_call_function(pte_free_smp_sync, NULL, 0, 1);
73
74 pgtable_free(pgf);
75}
76
77static void pte_free_rcu_callback(struct rcu_head *head)
78{
79 struct pte_freelist_batch *batch =
80 container_of(head, struct pte_freelist_batch, rcu);
81 unsigned int i;
82
83 for (i = 0; i < batch->index; i++)
84 pgtable_free(batch->tables[i]);
85
86 free_page((unsigned long)batch);
87}
88
89static void pte_free_submit(struct pte_freelist_batch *batch)
90{
91 INIT_RCU_HEAD(&batch->rcu);
92 call_rcu(&batch->rcu, pte_free_rcu_callback);
93}
94
95void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
45{ 96{
46 /* This is safe as we are holding page_table_lock */ 97 /* This is safe as we are holding page_table_lock */
47 cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id()); 98 cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
@@ -49,19 +100,19 @@ void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage)
49 100
50 if (atomic_read(&tlb->mm->mm_users) < 2 || 101 if (atomic_read(&tlb->mm->mm_users) < 2 ||
51 cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) { 102 cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
52 pte_free(ptepage); 103 pgtable_free(pgf);
53 return; 104 return;
54 } 105 }
55 106
56 if (*batchp == NULL) { 107 if (*batchp == NULL) {
57 *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC); 108 *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
58 if (*batchp == NULL) { 109 if (*batchp == NULL) {
59 pte_free_now(ptepage); 110 pgtable_free_now(pgf);
60 return; 111 return;
61 } 112 }
62 (*batchp)->index = 0; 113 (*batchp)->index = 0;
63 } 114 }
64 (*batchp)->pages[(*batchp)->index++] = ptepage; 115 (*batchp)->tables[(*batchp)->index++] = pgf;
65 if ((*batchp)->index == PTE_FREELIST_SIZE) { 116 if ((*batchp)->index == PTE_FREELIST_SIZE) {
66 pte_free_submit(*batchp); 117 pte_free_submit(*batchp);
67 *batchp = NULL; 118 *batchp = NULL;
@@ -132,42 +183,6 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
132 put_cpu(); 183 put_cpu();
133} 184}
134 185
135#ifdef CONFIG_SMP
136static void pte_free_smp_sync(void *arg)
137{
138 /* Do nothing, just ensure we sync with all CPUs */
139}
140#endif
141
142/* This is only called when we are critically out of memory
143 * (and fail to get a page in pte_free_tlb).
144 */
145void pte_free_now(struct page *ptepage)
146{
147 pte_freelist_forced_free++;
148
149 smp_call_function(pte_free_smp_sync, NULL, 0, 1);
150
151 pte_free(ptepage);
152}
153
154static void pte_free_rcu_callback(struct rcu_head *head)
155{
156 struct pte_freelist_batch *batch =
157 container_of(head, struct pte_freelist_batch, rcu);
158 unsigned int i;
159
160 for (i = 0; i < batch->index; i++)
161 pte_free(batch->pages[i]);
162 free_page((unsigned long)batch);
163}
164
165void pte_free_submit(struct pte_freelist_batch *batch)
166{
167 INIT_RCU_HEAD(&batch->rcu);
168 call_rcu(&batch->rcu, pte_free_rcu_callback);
169}
170
171void pte_free_finish(void) 186void pte_free_finish(void)
172{ 187{
173 /* This is safe as we are holding page_table_lock */ 188 /* This is safe as we are holding page_table_lock */
diff --git a/include/asm-ppc64/imalloc.h b/include/asm-ppc64/imalloc.h
index e46ff68a6e41..42adf7033a81 100644
--- a/include/asm-ppc64/imalloc.h
+++ b/include/asm-ppc64/imalloc.h
@@ -6,7 +6,7 @@
6 */ 6 */
7#define PHBS_IO_BASE VMALLOC_END 7#define PHBS_IO_BASE VMALLOC_END
8#define IMALLOC_BASE (PHBS_IO_BASE + 0x80000000ul) /* Reserve 2 gigs for PHBs */ 8#define IMALLOC_BASE (PHBS_IO_BASE + 0x80000000ul) /* Reserve 2 gigs for PHBs */
9#define IMALLOC_END (VMALLOC_START + EADDR_MASK) 9#define IMALLOC_END (VMALLOC_START + PGTABLE_RANGE)
10 10
11 11
12/* imalloc region types */ 12/* imalloc region types */
diff --git a/include/asm-ppc64/mmu.h b/include/asm-ppc64/mmu.h
index 70348a851313..959a4bfdcd6a 100644
--- a/include/asm-ppc64/mmu.h
+++ b/include/asm-ppc64/mmu.h
@@ -259,8 +259,10 @@ extern void stabs_alloc(void);
259#define VSID_BITS 36 259#define VSID_BITS 36
260#define VSID_MODULUS ((1UL<<VSID_BITS)-1) 260#define VSID_MODULUS ((1UL<<VSID_BITS)-1)
261 261
262#define CONTEXT_BITS 20 262#define CONTEXT_BITS 19
263#define USER_ESID_BITS 15 263#define USER_ESID_BITS 16
264
265#define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT))
264 266
265/* 267/*
266 * This macro generates asm code to compute the VSID scramble 268 * This macro generates asm code to compute the VSID scramble
@@ -302,7 +304,6 @@ typedef unsigned long mm_context_id_t;
302typedef struct { 304typedef struct {
303 mm_context_id_t id; 305 mm_context_id_t id;
304#ifdef CONFIG_HUGETLB_PAGE 306#ifdef CONFIG_HUGETLB_PAGE
305 pgd_t *huge_pgdir;
306 u16 htlb_segs; /* bitmask */ 307 u16 htlb_segs; /* bitmask */
307#endif 308#endif
308} mm_context_t; 309} mm_context_t;
diff --git a/include/asm-ppc64/page.h b/include/asm-ppc64/page.h
index a5893a305a09..7e7b18ea986e 100644
--- a/include/asm-ppc64/page.h
+++ b/include/asm-ppc64/page.h
@@ -46,6 +46,7 @@
46 46
47#define ARCH_HAS_HUGEPAGE_ONLY_RANGE 47#define ARCH_HAS_HUGEPAGE_ONLY_RANGE
48#define ARCH_HAS_PREPARE_HUGEPAGE_RANGE 48#define ARCH_HAS_PREPARE_HUGEPAGE_RANGE
49#define ARCH_HAS_SETCLEAR_HUGE_PTE
49 50
50#define touches_hugepage_low_range(mm, addr, len) \ 51#define touches_hugepage_low_range(mm, addr, len) \
51 (LOW_ESID_MASK((addr), (len)) & mm->context.htlb_segs) 52 (LOW_ESID_MASK((addr), (len)) & mm->context.htlb_segs)
@@ -125,36 +126,42 @@ extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct pag
125 * Entries in the pte table are 64b, while entries in the pgd & pmd are 32b. 126 * Entries in the pte table are 64b, while entries in the pgd & pmd are 32b.
126 */ 127 */
127typedef struct { unsigned long pte; } pte_t; 128typedef struct { unsigned long pte; } pte_t;
128typedef struct { unsigned int pmd; } pmd_t; 129typedef struct { unsigned long pmd; } pmd_t;
129typedef struct { unsigned int pgd; } pgd_t; 130typedef struct { unsigned long pud; } pud_t;
131typedef struct { unsigned long pgd; } pgd_t;
130typedef struct { unsigned long pgprot; } pgprot_t; 132typedef struct { unsigned long pgprot; } pgprot_t;
131 133
132#define pte_val(x) ((x).pte) 134#define pte_val(x) ((x).pte)
133#define pmd_val(x) ((x).pmd) 135#define pmd_val(x) ((x).pmd)
136#define pud_val(x) ((x).pud)
134#define pgd_val(x) ((x).pgd) 137#define pgd_val(x) ((x).pgd)
135#define pgprot_val(x) ((x).pgprot) 138#define pgprot_val(x) ((x).pgprot)
136 139
137#define __pte(x) ((pte_t) { (x) } ) 140#define __pte(x) ((pte_t) { (x) })
138#define __pmd(x) ((pmd_t) { (x) } ) 141#define __pmd(x) ((pmd_t) { (x) })
139#define __pgd(x) ((pgd_t) { (x) } ) 142#define __pud(x) ((pud_t) { (x) })
140#define __pgprot(x) ((pgprot_t) { (x) } ) 143#define __pgd(x) ((pgd_t) { (x) })
144#define __pgprot(x) ((pgprot_t) { (x) })
141 145
142#else 146#else
143/* 147/*
144 * .. while these make it easier on the compiler 148 * .. while these make it easier on the compiler
145 */ 149 */
146typedef unsigned long pte_t; 150typedef unsigned long pte_t;
147typedef unsigned int pmd_t; 151typedef unsigned long pmd_t;
148typedef unsigned int pgd_t; 152typedef unsigned long pud_t;
153typedef unsigned long pgd_t;
149typedef unsigned long pgprot_t; 154typedef unsigned long pgprot_t;
150 155
151#define pte_val(x) (x) 156#define pte_val(x) (x)
152#define pmd_val(x) (x) 157#define pmd_val(x) (x)
158#define pud_val(x) (x)
153#define pgd_val(x) (x) 159#define pgd_val(x) (x)
154#define pgprot_val(x) (x) 160#define pgprot_val(x) (x)
155 161
156#define __pte(x) (x) 162#define __pte(x) (x)
157#define __pmd(x) (x) 163#define __pmd(x) (x)
164#define __pud(x) (x)
158#define __pgd(x) (x) 165#define __pgd(x) (x)
159#define __pgprot(x) (x) 166#define __pgprot(x) (x)
160 167
@@ -208,9 +215,6 @@ extern u64 ppc64_pft_size; /* Log 2 of page table size */
208#define USER_REGION_ID (0UL) 215#define USER_REGION_ID (0UL)
209#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT) 216#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT)
210 217
211#define __bpn_to_ba(x) ((((unsigned long)(x)) << PAGE_SHIFT) + KERNELBASE)
212#define __ba_to_bpn(x) ((((unsigned long)(x)) & ~REGION_MASK) >> PAGE_SHIFT)
213
214#define __va(x) ((void *)((unsigned long)(x) + KERNELBASE)) 218#define __va(x) ((void *)((unsigned long)(x) + KERNELBASE))
215 219
216#ifdef CONFIG_DISCONTIGMEM 220#ifdef CONFIG_DISCONTIGMEM
diff --git a/include/asm-ppc64/pgalloc.h b/include/asm-ppc64/pgalloc.h
index 4fc4b739b380..26bc49c1108d 100644
--- a/include/asm-ppc64/pgalloc.h
+++ b/include/asm-ppc64/pgalloc.h
@@ -6,7 +6,12 @@
6#include <linux/cpumask.h> 6#include <linux/cpumask.h>
7#include <linux/percpu.h> 7#include <linux/percpu.h>
8 8
9extern kmem_cache_t *zero_cache; 9extern kmem_cache_t *pgtable_cache[];
10
11#define PTE_CACHE_NUM 0
12#define PMD_CACHE_NUM 1
13#define PUD_CACHE_NUM 1
14#define PGD_CACHE_NUM 0
10 15
11/* 16/*
12 * This program is free software; you can redistribute it and/or 17 * This program is free software; you can redistribute it and/or
@@ -15,30 +20,40 @@ extern kmem_cache_t *zero_cache;
15 * 2 of the License, or (at your option) any later version. 20 * 2 of the License, or (at your option) any later version.
16 */ 21 */
17 22
18static inline pgd_t * 23static inline pgd_t *pgd_alloc(struct mm_struct *mm)
19pgd_alloc(struct mm_struct *mm)
20{ 24{
21 return kmem_cache_alloc(zero_cache, GFP_KERNEL); 25 return kmem_cache_alloc(pgtable_cache[PGD_CACHE_NUM], GFP_KERNEL);
22} 26}
23 27
24static inline void 28static inline void pgd_free(pgd_t *pgd)
25pgd_free(pgd_t *pgd)
26{ 29{
27 kmem_cache_free(zero_cache, pgd); 30 kmem_cache_free(pgtable_cache[PGD_CACHE_NUM], pgd);
31}
32
33#define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
34
35static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
36{
37 return kmem_cache_alloc(pgtable_cache[PUD_CACHE_NUM],
38 GFP_KERNEL|__GFP_REPEAT);
39}
40
41static inline void pud_free(pud_t *pud)
42{
43 kmem_cache_free(pgtable_cache[PUD_CACHE_NUM], pud);
28} 44}
29 45
30#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD) 46#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
31 47
32static inline pmd_t * 48static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
33pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
34{ 49{
35 return kmem_cache_alloc(zero_cache, GFP_KERNEL|__GFP_REPEAT); 50 return kmem_cache_alloc(pgtable_cache[PMD_CACHE_NUM],
51 GFP_KERNEL|__GFP_REPEAT);
36} 52}
37 53
38static inline void 54static inline void pmd_free(pmd_t *pmd)
39pmd_free(pmd_t *pmd)
40{ 55{
41 kmem_cache_free(zero_cache, pmd); 56 kmem_cache_free(pgtable_cache[PMD_CACHE_NUM], pmd);
42} 57}
43 58
44#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, pte) 59#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, pte)
@@ -47,44 +62,58 @@ pmd_free(pmd_t *pmd)
47 62
48static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) 63static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
49{ 64{
50 return kmem_cache_alloc(zero_cache, GFP_KERNEL|__GFP_REPEAT); 65 return kmem_cache_alloc(pgtable_cache[PTE_CACHE_NUM],
66 GFP_KERNEL|__GFP_REPEAT);
51} 67}
52 68
53static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) 69static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
54{ 70{
55 pte_t *pte = kmem_cache_alloc(zero_cache, GFP_KERNEL|__GFP_REPEAT); 71 return virt_to_page(pte_alloc_one_kernel(mm, address));
56 if (pte)
57 return virt_to_page(pte);
58 return NULL;
59} 72}
60 73
61static inline void pte_free_kernel(pte_t *pte) 74static inline void pte_free_kernel(pte_t *pte)
62{ 75{
63 kmem_cache_free(zero_cache, pte); 76 kmem_cache_free(pgtable_cache[PTE_CACHE_NUM], pte);
64} 77}
65 78
66static inline void pte_free(struct page *ptepage) 79static inline void pte_free(struct page *ptepage)
67{ 80{
68 kmem_cache_free(zero_cache, page_address(ptepage)); 81 pte_free_kernel(page_address(ptepage));
69} 82}
70 83
71struct pte_freelist_batch 84#define PGF_CACHENUM_MASK 0xf
85
86typedef struct pgtable_free {
87 unsigned long val;
88} pgtable_free_t;
89
90static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
91 unsigned long mask)
72{ 92{
73 struct rcu_head rcu; 93 BUG_ON(cachenum > PGF_CACHENUM_MASK);
74 unsigned int index;
75 struct page * pages[0];
76};
77 94
78#define PTE_FREELIST_SIZE ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) / \ 95 return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum};
79 sizeof(struct page *)) 96}
80 97
81extern void pte_free_now(struct page *ptepage); 98static inline void pgtable_free(pgtable_free_t pgf)
82extern void pte_free_submit(struct pte_freelist_batch *batch); 99{
100 void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
101 int cachenum = pgf.val & PGF_CACHENUM_MASK;
83 102
84DECLARE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); 103 kmem_cache_free(pgtable_cache[cachenum], p);
104}
85 105
86void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage); 106void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
87#define __pmd_free_tlb(tlb, pmd) __pte_free_tlb(tlb, virt_to_page(pmd)) 107
108#define __pte_free_tlb(tlb, ptepage) \
109 pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
110 PTE_CACHE_NUM, PTE_TABLE_SIZE-1))
111#define __pmd_free_tlb(tlb, pmd) \
112 pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
113 PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
114#define __pud_free_tlb(tlb, pmd) \
115 pgtable_free_tlb(tlb, pgtable_free_cache(pud, \
116 PUD_CACHE_NUM, PUD_TABLE_SIZE-1))
88 117
89#define check_pgt_cache() do { } while (0) 118#define check_pgt_cache() do { } while (0)
90 119
diff --git a/include/asm-ppc64/pgtable.h b/include/asm-ppc64/pgtable.h
index 46cf61c2ff69..5ea952ad7164 100644
--- a/include/asm-ppc64/pgtable.h
+++ b/include/asm-ppc64/pgtable.h
@@ -15,19 +15,24 @@
15#include <asm/tlbflush.h> 15#include <asm/tlbflush.h>
16#endif /* __ASSEMBLY__ */ 16#endif /* __ASSEMBLY__ */
17 17
18#include <asm-generic/pgtable-nopud.h>
19
20/* 18/*
21 * Entries per page directory level. The PTE level must use a 64b record 19 * Entries per page directory level. The PTE level must use a 64b record
22 * for each page table entry. The PMD and PGD level use a 32b record for 20 * for each page table entry. The PMD and PGD level use a 32b record for
23 * each entry by assuming that each entry is page aligned. 21 * each entry by assuming that each entry is page aligned.
24 */ 22 */
25#define PTE_INDEX_SIZE 9 23#define PTE_INDEX_SIZE 9
26#define PMD_INDEX_SIZE 10 24#define PMD_INDEX_SIZE 7
27#define PGD_INDEX_SIZE 10 25#define PUD_INDEX_SIZE 7
26#define PGD_INDEX_SIZE 9
27
28#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
29#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
30#define PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE)
31#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
28 32
29#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) 33#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
30#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) 34#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
35#define PTRS_PER_PUD (1 << PMD_INDEX_SIZE)
31#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) 36#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
32 37
33/* PMD_SHIFT determines what a second-level page table entry can map */ 38/* PMD_SHIFT determines what a second-level page table entry can map */
@@ -35,8 +40,13 @@
35#define PMD_SIZE (1UL << PMD_SHIFT) 40#define PMD_SIZE (1UL << PMD_SHIFT)
36#define PMD_MASK (~(PMD_SIZE-1)) 41#define PMD_MASK (~(PMD_SIZE-1))
37 42
38/* PGDIR_SHIFT determines what a third-level page table entry can map */ 43/* PUD_SHIFT determines what a third-level page table entry can map */
39#define PGDIR_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE) 44#define PUD_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE)
45#define PUD_SIZE (1UL << PUD_SHIFT)
46#define PUD_MASK (~(PUD_SIZE-1))
47
48/* PGDIR_SHIFT determines what a fourth-level page table entry can map */
49#define PGDIR_SHIFT (PUD_SHIFT + PUD_INDEX_SIZE)
40#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 50#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
41#define PGDIR_MASK (~(PGDIR_SIZE-1)) 51#define PGDIR_MASK (~(PGDIR_SIZE-1))
42 52
@@ -45,15 +55,23 @@
45/* 55/*
46 * Size of EA range mapped by our pagetables. 56 * Size of EA range mapped by our pagetables.
47 */ 57 */
48#define EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \ 58#define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
49 PGD_INDEX_SIZE + PAGE_SHIFT) 59 PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
50#define EADDR_MASK ((1UL << EADDR_SIZE) - 1) 60#define PGTABLE_RANGE (1UL << PGTABLE_EADDR_SIZE)
61
62#if TASK_SIZE_USER64 > PGTABLE_RANGE
63#error TASK_SIZE_USER64 exceeds pagetable range
64#endif
65
66#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT))
67#error TASK_SIZE_USER64 exceeds user VSID range
68#endif
51 69
52/* 70/*
53 * Define the address range of the vmalloc VM area. 71 * Define the address range of the vmalloc VM area.
54 */ 72 */
55#define VMALLOC_START (0xD000000000000000ul) 73#define VMALLOC_START (0xD000000000000000ul)
56#define VMALLOC_SIZE (0x10000000000UL) 74#define VMALLOC_SIZE (0x80000000000UL)
57#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) 75#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
58 76
59/* 77/*
@@ -154,8 +172,6 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
154#ifndef __ASSEMBLY__ 172#ifndef __ASSEMBLY__
155int hash_huge_page(struct mm_struct *mm, unsigned long access, 173int hash_huge_page(struct mm_struct *mm, unsigned long access,
156 unsigned long ea, unsigned long vsid, int local); 174 unsigned long ea, unsigned long vsid, int local);
157
158void hugetlb_mm_free_pgd(struct mm_struct *mm);
159#endif /* __ASSEMBLY__ */ 175#endif /* __ASSEMBLY__ */
160 176
161#define HAVE_ARCH_UNMAPPED_AREA 177#define HAVE_ARCH_UNMAPPED_AREA
@@ -163,7 +179,6 @@ void hugetlb_mm_free_pgd(struct mm_struct *mm);
163#else 179#else
164 180
165#define hash_huge_page(mm,a,ea,vsid,local) -1 181#define hash_huge_page(mm,a,ea,vsid,local) -1
166#define hugetlb_mm_free_pgd(mm) do {} while (0)
167 182
168#endif 183#endif
169 184
@@ -197,39 +212,45 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
197#define pte_pfn(x) ((unsigned long)((pte_val(x) >> PTE_SHIFT))) 212#define pte_pfn(x) ((unsigned long)((pte_val(x) >> PTE_SHIFT)))
198#define pte_page(x) pfn_to_page(pte_pfn(x)) 213#define pte_page(x) pfn_to_page(pte_pfn(x))
199 214
200#define pmd_set(pmdp, ptep) \ 215#define pmd_set(pmdp, ptep) ({BUG_ON((u64)ptep < KERNELBASE); pmd_val(*(pmdp)) = (unsigned long)(ptep);})
201 (pmd_val(*(pmdp)) = __ba_to_bpn(ptep))
202#define pmd_none(pmd) (!pmd_val(pmd)) 216#define pmd_none(pmd) (!pmd_val(pmd))
203#define pmd_bad(pmd) (pmd_val(pmd) == 0) 217#define pmd_bad(pmd) (pmd_val(pmd) == 0)
204#define pmd_present(pmd) (pmd_val(pmd) != 0) 218#define pmd_present(pmd) (pmd_val(pmd) != 0)
205#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) 219#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0)
206#define pmd_page_kernel(pmd) (__bpn_to_ba(pmd_val(pmd))) 220#define pmd_page_kernel(pmd) (pmd_val(pmd))
207#define pmd_page(pmd) virt_to_page(pmd_page_kernel(pmd)) 221#define pmd_page(pmd) virt_to_page(pmd_page_kernel(pmd))
208 222
209#define pud_set(pudp, pmdp) (pud_val(*(pudp)) = (__ba_to_bpn(pmdp))) 223#define pud_set(pudp, pmdp) (pud_val(*(pudp)) = (unsigned long)(pmdp))
210#define pud_none(pud) (!pud_val(pud)) 224#define pud_none(pud) (!pud_val(pud))
211#define pud_bad(pud) ((pud_val(pud)) == 0UL) 225#define pud_bad(pud) ((pud_val(pud)) == 0)
212#define pud_present(pud) (pud_val(pud) != 0UL) 226#define pud_present(pud) (pud_val(pud) != 0)
213#define pud_clear(pudp) (pud_val(*(pudp)) = 0UL) 227#define pud_clear(pudp) (pud_val(*(pudp)) = 0)
214#define pud_page(pud) (__bpn_to_ba(pud_val(pud))) 228#define pud_page(pud) (pud_val(pud))
229
230#define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);})
231#define pgd_none(pgd) (!pgd_val(pgd))
232#define pgd_bad(pgd) (pgd_val(pgd) == 0)
233#define pgd_present(pgd) (pgd_val(pgd) != 0)
234#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0)
235#define pgd_page(pgd) (pgd_val(pgd))
215 236
216/* 237/*
217 * Find an entry in a page-table-directory. We combine the address region 238 * Find an entry in a page-table-directory. We combine the address region
218 * (the high order N bits) and the pgd portion of the address. 239 * (the high order N bits) and the pgd portion of the address.
219 */ 240 */
220/* to avoid overflow in free_pgtables we don't use PTRS_PER_PGD here */ 241/* to avoid overflow in free_pgtables we don't use PTRS_PER_PGD here */
221#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & 0x7ff) 242#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & 0x1ff)
222 243
223#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) 244#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
224 245
225/* Find an entry in the second-level page table.. */ 246#define pud_offset(pgdp, addr) \
247 (((pud_t *) pgd_page(*(pgdp))) + (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)))
248
226#define pmd_offset(pudp,addr) \ 249#define pmd_offset(pudp,addr) \
227 ((pmd_t *) pud_page(*(pudp)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) 250 (((pmd_t *) pud_page(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
228 251
229/* Find an entry in the third-level page table.. */
230#define pte_offset_kernel(dir,addr) \ 252#define pte_offset_kernel(dir,addr) \
231 ((pte_t *) pmd_page_kernel(*(dir)) \ 253 (((pte_t *) pmd_page_kernel(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
232 + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
233 254
234#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) 255#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
235#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) 256#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
@@ -458,23 +479,18 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
458#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) 479#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)
459 480
460#define pmd_ERROR(e) \ 481#define pmd_ERROR(e) \
461 printk("%s:%d: bad pmd %08x.\n", __FILE__, __LINE__, pmd_val(e)) 482 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
483#define pud_ERROR(e) \
484 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pud_val(e))
462#define pgd_ERROR(e) \ 485#define pgd_ERROR(e) \
463 printk("%s:%d: bad pgd %08x.\n", __FILE__, __LINE__, pgd_val(e)) 486 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
464 487
465extern pgd_t swapper_pg_dir[]; 488extern pgd_t swapper_pg_dir[];
466 489
467extern void paging_init(void); 490extern void paging_init(void);
468 491
469/*
470 * Because the huge pgtables are only 2 level, they can take
471 * at most around 4M, much less than one hugepage which the
472 * process is presumably entitled to use. So we don't bother
473 * freeing up the pagetables on unmap, and wait until
474 * destroy_context() to clean up the lot.
475 */
476#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \ 492#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \
477 do { } while (0) 493 free_pgd_range(tlb, addr, end, floor, ceiling)
478 494
479/* 495/*
480 * This gets called at the end of handling a page fault, when 496 * This gets called at the end of handling a page fault, when
diff --git a/include/asm-ppc64/processor.h b/include/asm-ppc64/processor.h
index 352306cfb579..50b14c0ddb87 100644
--- a/include/asm-ppc64/processor.h
+++ b/include/asm-ppc64/processor.h
@@ -382,8 +382,8 @@ extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
382extern struct task_struct *last_task_used_math; 382extern struct task_struct *last_task_used_math;
383extern struct task_struct *last_task_used_altivec; 383extern struct task_struct *last_task_used_altivec;
384 384
385/* 64-bit user address space is 41-bits (2TBs user VM) */ 385/* 64-bit user address space is 44-bits (16TB user VM) */
386#define TASK_SIZE_USER64 (0x0000020000000000UL) 386#define TASK_SIZE_USER64 (0x0000100000000000UL)
387 387
388/* 388/*
389 * 32-bit user address space is 4GB - 1 page 389 * 32-bit user address space is 4GB - 1 page