aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-10-15 11:07:35 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-15 11:07:35 -0400
commit5f2434a66dfa4701b81b79a78eaf9c32da0f8839 (patch)
tree8c38f1fb0d0fbcd15e496df89be00ad8c4918a43 /arch/powerpc/mm
parent278429cff8809958d25415ba0ed32b59866ab1a8 (diff)
parent6dc6472581f693b5fc95aebedf67b4960fb85cf0 (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (158 commits) powerpc: Fix CHRP PCI config access for indirect_pci powerpc/chrp: Fix detection of Python PCI host bridge on IBM CHRPs powerpc: Fix 32-bit SMP boot on CHRP powerpc: Fix link errors on 32-bit machines using legacy DMA powerpc/pci: Improve detection of unassigned bridge resources hvc_console: Fix free_irq in spinlocked section powerpc: Get USE_STRICT_MM_TYPECHECKS working again powerpc: Reflect the used arguments in machine_init() prototype powerpc: Fix DMA offset for non-coherent DMA powerpc: fix fsl_upm nand driver modular build powerpc/83xx: add NAND support for the MPC8360E-RDK boards powerpc: FPGA support for GE Fanuc SBC610 i2c: MPC8349E-mITX Power Management and GPIO expander driver powerpc: reserve two DMA channels for audio in MPC8610 HPCD device tree powerpc: document the "fsl,ssi-dma-channel" compatible property powerpc: disable CHRP and PMAC support in various defconfigs OF: add fsl,mcu-mpc8349emitx to the exception list powerpc/83xx: add DS1374 RTC support for the MPC837xE-MDS boards powerpc: remove support for bootmem-allocated memory for the DIU driver powerpc: remove non-dependent load fsl_booke PTE_64BIT ...
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/fsl_booke_mmu.c5
-rw-r--r--arch/powerpc/mm/gup.c7
-rw-r--r--arch/powerpc/mm/hash_low_32.S122
-rw-r--r--arch/powerpc/mm/hash_utils_64.c8
-rw-r--r--arch/powerpc/mm/hugetlbpage.c59
-rw-r--r--arch/powerpc/mm/init_64.c4
-rw-r--r--arch/powerpc/mm/mem.c5
-rw-r--r--arch/powerpc/mm/numa.c185
-rw-r--r--arch/powerpc/mm/pgtable_32.c4
-rw-r--r--arch/powerpc/mm/tlb_32.c1
10 files changed, 281 insertions, 119 deletions
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index ce10e2b1b902..23cee39534fd 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -202,7 +202,7 @@ adjust_total_lowmem(void)
202 cam_max_size = max_lowmem_size; 202 cam_max_size = max_lowmem_size;
203 203
204 /* adjust lowmem size to max_lowmem_size */ 204 /* adjust lowmem size to max_lowmem_size */
205 ram = min(max_lowmem_size, (phys_addr_t)total_lowmem); 205 ram = min(max_lowmem_size, total_lowmem);
206 206
207 /* Calculate CAM values */ 207 /* Calculate CAM values */
208 __cam0 = 1UL << 2 * (__ilog2(ram) / 2); 208 __cam0 = 1UL << 2 * (__ilog2(ram) / 2);
@@ -225,7 +225,8 @@ adjust_total_lowmem(void)
225 printk(KERN_INFO "Memory CAM mapping: CAM0=%ldMb, CAM1=%ldMb," 225 printk(KERN_INFO "Memory CAM mapping: CAM0=%ldMb, CAM1=%ldMb,"
226 " CAM2=%ldMb residual: %ldMb\n", 226 " CAM2=%ldMb residual: %ldMb\n",
227 __cam0 >> 20, __cam1 >> 20, __cam2 >> 20, 227 __cam0 >> 20, __cam1 >> 20, __cam2 >> 20,
228 (total_lowmem - __cam0 - __cam1 - __cam2) >> 20); 228 (long int)((total_lowmem - __cam0 - __cam1 - __cam2)
229 >> 20));
229 __max_low_memory = __cam0 + __cam1 + __cam2; 230 __max_low_memory = __cam0 + __cam1 + __cam2;
230 __initial_memory_limit_addr = memstart_addr + __max_low_memory; 231 __initial_memory_limit_addr = memstart_addr + __max_low_memory;
231} 232}
diff --git a/arch/powerpc/mm/gup.c b/arch/powerpc/mm/gup.c
index 9fdf4d6335e4..28a114db3ba0 100644
--- a/arch/powerpc/mm/gup.c
+++ b/arch/powerpc/mm/gup.c
@@ -41,7 +41,7 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
41 page = pte_page(pte); 41 page = pte_page(pte);
42 if (!page_cache_get_speculative(page)) 42 if (!page_cache_get_speculative(page))
43 return 0; 43 return 0;
44 if (unlikely(pte != *ptep)) { 44 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
45 put_page(page); 45 put_page(page);
46 return 0; 46 return 0;
47 } 47 }
@@ -92,7 +92,7 @@ static noinline int gup_huge_pte(pte_t *ptep, struct hstate *hstate,
92 *nr -= refs; 92 *nr -= refs;
93 return 0; 93 return 0;
94 } 94 }
95 if (unlikely(pte != *ptep)) { 95 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
96 /* Could be optimized better */ 96 /* Could be optimized better */
97 while (*nr) { 97 while (*nr) {
98 put_page(page); 98 put_page(page);
@@ -237,7 +237,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
237 pgd_t pgd = *pgdp; 237 pgd_t pgd = *pgdp;
238 238
239 VM_BUG_ON(shift != mmu_psize_defs[get_slice_psize(mm, addr)].shift); 239 VM_BUG_ON(shift != mmu_psize_defs[get_slice_psize(mm, addr)].shift);
240 pr_debug(" %016lx: normal pgd %p\n", addr, (void *)pgd); 240 pr_debug(" %016lx: normal pgd %p\n", addr,
241 (void *)pgd_val(pgd));
241 next = pgd_addr_end(addr, end); 242 next = pgd_addr_end(addr, end);
242 if (pgd_none(pgd)) 243 if (pgd_none(pgd))
243 goto slow; 244 goto slow;
diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S
index b9ba7d930801..7bffb70b9fe2 100644
--- a/arch/powerpc/mm/hash_low_32.S
+++ b/arch/powerpc/mm/hash_low_32.S
@@ -75,7 +75,7 @@ _GLOBAL(hash_page_sync)
75 * Returns to the caller if the access is illegal or there is no 75 * Returns to the caller if the access is illegal or there is no
76 * mapping for the address. Otherwise it places an appropriate PTE 76 * mapping for the address. Otherwise it places an appropriate PTE
77 * in the hash table and returns from the exception. 77 * in the hash table and returns from the exception.
78 * Uses r0, r3 - r8, ctr, lr. 78 * Uses r0, r3 - r8, r10, ctr, lr.
79 */ 79 */
80 .text 80 .text
81_GLOBAL(hash_page) 81_GLOBAL(hash_page)
@@ -106,9 +106,15 @@ _GLOBAL(hash_page)
106 addi r5,r5,swapper_pg_dir@l /* kernel page table */ 106 addi r5,r5,swapper_pg_dir@l /* kernel page table */
107 rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */ 107 rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */
108112: add r5,r5,r7 /* convert to phys addr */ 108112: add r5,r5,r7 /* convert to phys addr */
109#ifndef CONFIG_PTE_64BIT
109 rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */ 110 rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */
110 lwz r8,0(r5) /* get pmd entry */ 111 lwz r8,0(r5) /* get pmd entry */
111 rlwinm. r8,r8,0,0,19 /* extract address of pte page */ 112 rlwinm. r8,r8,0,0,19 /* extract address of pte page */
113#else
114 rlwinm r8,r4,13,19,29 /* Compute pgdir/pmd offset */
115 lwzx r8,r8,r5 /* Get L1 entry */
116 rlwinm. r8,r8,0,0,20 /* extract pt base address */
117#endif
112#ifdef CONFIG_SMP 118#ifdef CONFIG_SMP
113 beq- hash_page_out /* return if no mapping */ 119 beq- hash_page_out /* return if no mapping */
114#else 120#else
@@ -118,7 +124,11 @@ _GLOBAL(hash_page)
118 to the address following the rfi. */ 124 to the address following the rfi. */
119 beqlr- 125 beqlr-
120#endif 126#endif
127#ifndef CONFIG_PTE_64BIT
121 rlwimi r8,r4,22,20,29 /* insert next 10 bits of address */ 128 rlwimi r8,r4,22,20,29 /* insert next 10 bits of address */
129#else
130 rlwimi r8,r4,23,20,28 /* compute pte address */
131#endif
122 rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */ 132 rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */
123 ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE 133 ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
124 134
@@ -127,9 +137,15 @@ _GLOBAL(hash_page)
127 * because almost always, there won't be a permission violation 137 * because almost always, there won't be a permission violation
128 * and there won't already be an HPTE, and thus we will have 138 * and there won't already be an HPTE, and thus we will have
129 * to update the PTE to set _PAGE_HASHPTE. -- paulus. 139 * to update the PTE to set _PAGE_HASHPTE. -- paulus.
140 *
141 * If PTE_64BIT is set, the low word is the flags word; use that
142 * word for locking since it contains all the interesting bits.
130 */ 143 */
144#if (PTE_FLAGS_OFFSET != 0)
145 addi r8,r8,PTE_FLAGS_OFFSET
146#endif
131retry: 147retry:
132 lwarx r6,0,r8 /* get linux-style pte */ 148 lwarx r6,0,r8 /* get linux-style pte, flag word */
133 andc. r5,r3,r6 /* check access & ~permission */ 149 andc. r5,r3,r6 /* check access & ~permission */
134#ifdef CONFIG_SMP 150#ifdef CONFIG_SMP
135 bne- hash_page_out /* return if access not permitted */ 151 bne- hash_page_out /* return if access not permitted */
@@ -137,6 +153,15 @@ retry:
137 bnelr- 153 bnelr-
138#endif 154#endif
139 or r5,r0,r6 /* set accessed/dirty bits */ 155 or r5,r0,r6 /* set accessed/dirty bits */
156#ifdef CONFIG_PTE_64BIT
157#ifdef CONFIG_SMP
158 subf r10,r6,r8 /* create false data dependency */
159 subi r10,r10,PTE_FLAGS_OFFSET
160 lwzx r10,r6,r10 /* Get upper PTE word */
161#else
162 lwz r10,-PTE_FLAGS_OFFSET(r8)
163#endif /* CONFIG_SMP */
164#endif /* CONFIG_PTE_64BIT */
140 stwcx. r5,0,r8 /* attempt to update PTE */ 165 stwcx. r5,0,r8 /* attempt to update PTE */
141 bne- retry /* retry if someone got there first */ 166 bne- retry /* retry if someone got there first */
142 167
@@ -203,9 +228,9 @@ _GLOBAL(add_hash_page)
203 * we can't take a hash table miss (assuming the code is 228 * we can't take a hash table miss (assuming the code is
204 * covered by a BAT). -- paulus 229 * covered by a BAT). -- paulus
205 */ 230 */
206 mfmsr r10 231 mfmsr r9
207 SYNC 232 SYNC
208 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ 233 rlwinm r0,r9,0,17,15 /* clear bit 16 (MSR_EE) */
209 rlwinm r0,r0,0,28,26 /* clear MSR_DR */ 234 rlwinm r0,r0,0,28,26 /* clear MSR_DR */
210 mtmsr r0 235 mtmsr r0
211 SYNC_601 236 SYNC_601
@@ -214,14 +239,14 @@ _GLOBAL(add_hash_page)
214 tophys(r7,0) 239 tophys(r7,0)
215 240
216#ifdef CONFIG_SMP 241#ifdef CONFIG_SMP
217 addis r9,r7,mmu_hash_lock@ha 242 addis r6,r7,mmu_hash_lock@ha
218 addi r9,r9,mmu_hash_lock@l 243 addi r6,r6,mmu_hash_lock@l
21910: lwarx r0,0,r9 /* take the mmu_hash_lock */ 24410: lwarx r0,0,r6 /* take the mmu_hash_lock */
220 cmpi 0,r0,0 245 cmpi 0,r0,0
221 bne- 11f 246 bne- 11f
222 stwcx. r8,0,r9 247 stwcx. r8,0,r6
223 beq+ 12f 248 beq+ 12f
22411: lwz r0,0(r9) 24911: lwz r0,0(r6)
225 cmpi 0,r0,0 250 cmpi 0,r0,0
226 beq 10b 251 beq 10b
227 b 11b 252 b 11b
@@ -234,10 +259,24 @@ _GLOBAL(add_hash_page)
234 * HPTE, so we just unlock and return. 259 * HPTE, so we just unlock and return.
235 */ 260 */
236 mr r8,r5 261 mr r8,r5
262#ifndef CONFIG_PTE_64BIT
237 rlwimi r8,r4,22,20,29 263 rlwimi r8,r4,22,20,29
264#else
265 rlwimi r8,r4,23,20,28
266 addi r8,r8,PTE_FLAGS_OFFSET
267#endif
2381: lwarx r6,0,r8 2681: lwarx r6,0,r8
239 andi. r0,r6,_PAGE_HASHPTE 269 andi. r0,r6,_PAGE_HASHPTE
240 bne 9f /* if HASHPTE already set, done */ 270 bne 9f /* if HASHPTE already set, done */
271#ifdef CONFIG_PTE_64BIT
272#ifdef CONFIG_SMP
273 subf r10,r6,r8 /* create false data dependency */
274 subi r10,r10,PTE_FLAGS_OFFSET
275 lwzx r10,r6,r10 /* Get upper PTE word */
276#else
277 lwz r10,-PTE_FLAGS_OFFSET(r8)
278#endif /* CONFIG_SMP */
279#endif /* CONFIG_PTE_64BIT */
241 ori r5,r6,_PAGE_HASHPTE 280 ori r5,r6,_PAGE_HASHPTE
242 stwcx. r5,0,r8 281 stwcx. r5,0,r8
243 bne- 1b 282 bne- 1b
@@ -246,13 +285,15 @@ _GLOBAL(add_hash_page)
246 285
2479: 2869:
248#ifdef CONFIG_SMP 287#ifdef CONFIG_SMP
288 addis r6,r7,mmu_hash_lock@ha
289 addi r6,r6,mmu_hash_lock@l
249 eieio 290 eieio
250 li r0,0 291 li r0,0
251 stw r0,0(r9) /* clear mmu_hash_lock */ 292 stw r0,0(r6) /* clear mmu_hash_lock */
252#endif 293#endif
253 294
254 /* reenable interrupts and DR */ 295 /* reenable interrupts and DR */
255 mtmsr r10 296 mtmsr r9
256 SYNC_601 297 SYNC_601
257 isync 298 isync
258 299
@@ -267,7 +308,8 @@ _GLOBAL(add_hash_page)
267 * r5 contains the linux PTE, r6 contains the old value of the 308 * r5 contains the linux PTE, r6 contains the old value of the
268 * linux PTE (before setting _PAGE_HASHPTE) and r7 contains the 309 * linux PTE (before setting _PAGE_HASHPTE) and r7 contains the
269 * offset to be added to addresses (0 if the MMU is on, 310 * offset to be added to addresses (0 if the MMU is on,
270 * -KERNELBASE if it is off). 311 * -KERNELBASE if it is off). r10 contains the upper half of
312 * the PTE if CONFIG_PTE_64BIT.
271 * On SMP, the caller should have the mmu_hash_lock held. 313 * On SMP, the caller should have the mmu_hash_lock held.
272 * We assume that the caller has (or will) set the _PAGE_HASHPTE 314 * We assume that the caller has (or will) set the _PAGE_HASHPTE
273 * bit in the linux PTE in memory. The value passed in r6 should 315 * bit in the linux PTE in memory. The value passed in r6 should
@@ -285,7 +327,7 @@ Hash_bits = 12 /* e.g. 256kB hash table */
285Hash_msk = (((1 << Hash_bits) - 1) * 64) 327Hash_msk = (((1 << Hash_bits) - 1) * 64)
286 328
287/* defines for the PTE format for 32-bit PPCs */ 329/* defines for the PTE format for 32-bit PPCs */
288#define PTE_SIZE 8 330#define HPTE_SIZE 8
289#define PTEG_SIZE 64 331#define PTEG_SIZE 64
290#define LG_PTEG_SIZE 6 332#define LG_PTEG_SIZE 6
291#define LDPTEu lwzu 333#define LDPTEu lwzu
@@ -313,6 +355,11 @@ _GLOBAL(create_hpte)
313BEGIN_FTR_SECTION 355BEGIN_FTR_SECTION
314 ori r8,r8,_PAGE_COHERENT /* set M (coherence required) */ 356 ori r8,r8,_PAGE_COHERENT /* set M (coherence required) */
315END_FTR_SECTION_IFSET(CPU_FTR_NEED_COHERENT) 357END_FTR_SECTION_IFSET(CPU_FTR_NEED_COHERENT)
358#ifdef CONFIG_PTE_64BIT
359 /* Put the XPN bits into the PTE */
360 rlwimi r8,r10,8,20,22
361 rlwimi r8,r10,2,29,29
362#endif
316 363
317 /* Construct the high word of the PPC-style PTE (r5) */ 364 /* Construct the high word of the PPC-style PTE (r5) */
318 rlwinm r5,r3,7,1,24 /* put VSID in 0x7fffff80 bits */ 365 rlwinm r5,r3,7,1,24 /* put VSID in 0x7fffff80 bits */
@@ -342,8 +389,8 @@ _GLOBAL(hash_page_patch_A)
342 389
343 /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */ 390 /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
344 mtctr r0 391 mtctr r0
345 addi r4,r3,-PTE_SIZE 392 addi r4,r3,-HPTE_SIZE
3461: LDPTEu r6,PTE_SIZE(r4) /* get next PTE */ 3931: LDPTEu r6,HPTE_SIZE(r4) /* get next PTE */
347 CMPPTE 0,r6,r5 394 CMPPTE 0,r6,r5
348 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */ 395 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
349 beq+ found_slot 396 beq+ found_slot
@@ -353,9 +400,9 @@ _GLOBAL(hash_page_patch_A)
353_GLOBAL(hash_page_patch_B) 400_GLOBAL(hash_page_patch_B)
354 xoris r4,r3,Hash_msk>>16 /* compute secondary hash */ 401 xoris r4,r3,Hash_msk>>16 /* compute secondary hash */
355 xori r4,r4,(-PTEG_SIZE & 0xffff) 402 xori r4,r4,(-PTEG_SIZE & 0xffff)
356 addi r4,r4,-PTE_SIZE 403 addi r4,r4,-HPTE_SIZE
357 mtctr r0 404 mtctr r0
3582: LDPTEu r6,PTE_SIZE(r4) 4052: LDPTEu r6,HPTE_SIZE(r4)
359 CMPPTE 0,r6,r5 406 CMPPTE 0,r6,r5
360 bdnzf 2,2b 407 bdnzf 2,2b
361 beq+ found_slot 408 beq+ found_slot
@@ -363,8 +410,8 @@ _GLOBAL(hash_page_patch_B)
363 410
364 /* Search the primary PTEG for an empty slot */ 411 /* Search the primary PTEG for an empty slot */
36510: mtctr r0 41210: mtctr r0
366 addi r4,r3,-PTE_SIZE /* search primary PTEG */ 413 addi r4,r3,-HPTE_SIZE /* search primary PTEG */
3671: LDPTEu r6,PTE_SIZE(r4) /* get next PTE */ 4141: LDPTEu r6,HPTE_SIZE(r4) /* get next PTE */
368 TST_V(r6) /* test valid bit */ 415 TST_V(r6) /* test valid bit */
369 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */ 416 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
370 beq+ found_empty 417 beq+ found_empty
@@ -380,9 +427,9 @@ _GLOBAL(hash_page_patch_B)
380_GLOBAL(hash_page_patch_C) 427_GLOBAL(hash_page_patch_C)
381 xoris r4,r3,Hash_msk>>16 /* compute secondary hash */ 428 xoris r4,r3,Hash_msk>>16 /* compute secondary hash */
382 xori r4,r4,(-PTEG_SIZE & 0xffff) 429 xori r4,r4,(-PTEG_SIZE & 0xffff)
383 addi r4,r4,-PTE_SIZE 430 addi r4,r4,-HPTE_SIZE
384 mtctr r0 431 mtctr r0
3852: LDPTEu r6,PTE_SIZE(r4) 4322: LDPTEu r6,HPTE_SIZE(r4)
386 TST_V(r6) 433 TST_V(r6)
387 bdnzf 2,2b 434 bdnzf 2,2b
388 beq+ found_empty 435 beq+ found_empty
@@ -409,11 +456,11 @@ _GLOBAL(hash_page_patch_C)
409 456
4101: addis r4,r7,next_slot@ha /* get next evict slot */ 4571: addis r4,r7,next_slot@ha /* get next evict slot */
411 lwz r6,next_slot@l(r4) 458 lwz r6,next_slot@l(r4)
412 addi r6,r6,PTE_SIZE /* search for candidate */ 459 addi r6,r6,HPTE_SIZE /* search for candidate */
413 andi. r6,r6,7*PTE_SIZE 460 andi. r6,r6,7*HPTE_SIZE
414 stw r6,next_slot@l(r4) 461 stw r6,next_slot@l(r4)
415 add r4,r3,r6 462 add r4,r3,r6
416 LDPTE r0,PTE_SIZE/2(r4) /* get PTE second word */ 463 LDPTE r0,HPTE_SIZE/2(r4) /* get PTE second word */
417 clrrwi r0,r0,12 464 clrrwi r0,r0,12
418 lis r6,etext@h 465 lis r6,etext@h
419 ori r6,r6,etext@l /* get etext */ 466 ori r6,r6,etext@l /* get etext */
@@ -426,7 +473,7 @@ _GLOBAL(hash_page_patch_C)
426found_empty: 473found_empty:
427 STPTE r5,0(r4) 474 STPTE r5,0(r4)
428found_slot: 475found_slot:
429 STPTE r8,PTE_SIZE/2(r4) 476 STPTE r8,HPTE_SIZE/2(r4)
430 477
431#else /* CONFIG_SMP */ 478#else /* CONFIG_SMP */
432/* 479/*
@@ -452,7 +499,7 @@ found_slot:
452 STPTE r5,0(r4) 499 STPTE r5,0(r4)
453 sync 500 sync
454 TLBSYNC 501 TLBSYNC
455 STPTE r8,PTE_SIZE/2(r4) /* put in correct RPN, WIMG, PP bits */ 502 STPTE r8,HPTE_SIZE/2(r4) /* put in correct RPN, WIMG, PP bits */
456 sync 503 sync
457 SET_V(r5) 504 SET_V(r5)
458 STPTE r5,0(r4) /* finally set V bit in PTE */ 505 STPTE r5,0(r4) /* finally set V bit in PTE */
@@ -499,14 +546,18 @@ _GLOBAL(flush_hash_pages)
499 isync 546 isync
500 547
501 /* First find a PTE in the range that has _PAGE_HASHPTE set */ 548 /* First find a PTE in the range that has _PAGE_HASHPTE set */
549#ifndef CONFIG_PTE_64BIT
502 rlwimi r5,r4,22,20,29 550 rlwimi r5,r4,22,20,29
5031: lwz r0,0(r5) 551#else
552 rlwimi r5,r4,23,20,28
553#endif
5541: lwz r0,PTE_FLAGS_OFFSET(r5)
504 cmpwi cr1,r6,1 555 cmpwi cr1,r6,1
505 andi. r0,r0,_PAGE_HASHPTE 556 andi. r0,r0,_PAGE_HASHPTE
506 bne 2f 557 bne 2f
507 ble cr1,19f 558 ble cr1,19f
508 addi r4,r4,0x1000 559 addi r4,r4,0x1000
509 addi r5,r5,4 560 addi r5,r5,PTE_SIZE
510 addi r6,r6,-1 561 addi r6,r6,-1
511 b 1b 562 b 1b
512 563
@@ -545,7 +596,10 @@ _GLOBAL(flush_hash_pages)
545 * already clear, we're done (for this pte). If not, 596 * already clear, we're done (for this pte). If not,
546 * clear it (atomically) and proceed. -- paulus. 597 * clear it (atomically) and proceed. -- paulus.
547 */ 598 */
54833: lwarx r8,0,r5 /* fetch the pte */ 599#if (PTE_FLAGS_OFFSET != 0)
600 addi r5,r5,PTE_FLAGS_OFFSET
601#endif
60233: lwarx r8,0,r5 /* fetch the pte flags word */
549 andi. r0,r8,_PAGE_HASHPTE 603 andi. r0,r8,_PAGE_HASHPTE
550 beq 8f /* done if HASHPTE is already clear */ 604 beq 8f /* done if HASHPTE is already clear */
551 rlwinm r8,r8,0,31,29 /* clear HASHPTE bit */ 605 rlwinm r8,r8,0,31,29 /* clear HASHPTE bit */
@@ -562,8 +616,8 @@ _GLOBAL(flush_hash_patch_A)
562 /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */ 616 /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
563 li r0,8 /* PTEs/group */ 617 li r0,8 /* PTEs/group */
564 mtctr r0 618 mtctr r0
565 addi r12,r8,-PTE_SIZE 619 addi r12,r8,-HPTE_SIZE
5661: LDPTEu r0,PTE_SIZE(r12) /* get next PTE */ 6201: LDPTEu r0,HPTE_SIZE(r12) /* get next PTE */
567 CMPPTE 0,r0,r11 621 CMPPTE 0,r0,r11
568 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */ 622 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
569 beq+ 3f 623 beq+ 3f
@@ -574,9 +628,9 @@ _GLOBAL(flush_hash_patch_A)
574_GLOBAL(flush_hash_patch_B) 628_GLOBAL(flush_hash_patch_B)
575 xoris r12,r8,Hash_msk>>16 /* compute secondary hash */ 629 xoris r12,r8,Hash_msk>>16 /* compute secondary hash */
576 xori r12,r12,(-PTEG_SIZE & 0xffff) 630 xori r12,r12,(-PTEG_SIZE & 0xffff)
577 addi r12,r12,-PTE_SIZE 631 addi r12,r12,-HPTE_SIZE
578 mtctr r0 632 mtctr r0
5792: LDPTEu r0,PTE_SIZE(r12) 6332: LDPTEu r0,HPTE_SIZE(r12)
580 CMPPTE 0,r0,r11 634 CMPPTE 0,r0,r11
581 bdnzf 2,2b 635 bdnzf 2,2b
582 xori r11,r11,PTE_H /* clear H again */ 636 xori r11,r11,PTE_H /* clear H again */
@@ -590,7 +644,7 @@ _GLOBAL(flush_hash_patch_B)
590 644
5918: ble cr1,9f /* if all ptes checked */ 6458: ble cr1,9f /* if all ptes checked */
59281: addi r6,r6,-1 64681: addi r6,r6,-1
593 addi r5,r5,4 /* advance to next pte */ 647 addi r5,r5,PTE_SIZE
594 addi r4,r4,0x1000 648 addi r4,r4,0x1000
595 lwz r0,0(r5) /* check next pte */ 649 lwz r0,0(r5) /* check next pte */
596 cmpwi cr1,r6,1 650 cmpwi cr1,r6,1
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 8920eea34528..5c64af174752 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -194,7 +194,7 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
194 unsigned long tprot = prot; 194 unsigned long tprot = prot;
195 195
196 /* Make kernel text executable */ 196 /* Make kernel text executable */
197 if (in_kernel_text(vaddr)) 197 if (overlaps_kernel_text(vaddr, vaddr + step))
198 tprot &= ~HPTE_R_N; 198 tprot &= ~HPTE_R_N;
199 199
200 hash = hpt_hash(va, shift, ssize); 200 hash = hpt_hash(va, shift, ssize);
@@ -348,6 +348,7 @@ static int __init htab_dt_scan_page_sizes(unsigned long node,
348 return 0; 348 return 0;
349} 349}
350 350
351#ifdef CONFIG_HUGETLB_PAGE
351/* Scan for 16G memory blocks that have been set aside for huge pages 352/* Scan for 16G memory blocks that have been set aside for huge pages
352 * and reserve those blocks for 16G huge pages. 353 * and reserve those blocks for 16G huge pages.
353 */ 354 */
@@ -385,6 +386,7 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
385 add_gpage(phys_addr, block_size, expected_pages); 386 add_gpage(phys_addr, block_size, expected_pages);
386 return 0; 387 return 0;
387} 388}
389#endif /* CONFIG_HUGETLB_PAGE */
388 390
389static void __init htab_init_page_sizes(void) 391static void __init htab_init_page_sizes(void)
390{ 392{
@@ -539,7 +541,7 @@ static unsigned long __init htab_get_table_size(void)
539void create_section_mapping(unsigned long start, unsigned long end) 541void create_section_mapping(unsigned long start, unsigned long end)
540{ 542{
541 BUG_ON(htab_bolt_mapping(start, end, __pa(start), 543 BUG_ON(htab_bolt_mapping(start, end, __pa(start),
542 PAGE_KERNEL, mmu_linear_psize, 544 pgprot_val(PAGE_KERNEL), mmu_linear_psize,
543 mmu_kernel_ssize)); 545 mmu_kernel_ssize));
544} 546}
545 547
@@ -647,7 +649,7 @@ void __init htab_initialize(void)
647 mtspr(SPRN_SDR1, _SDR1); 649 mtspr(SPRN_SDR1, _SDR1);
648 } 650 }
649 651
650 prot = PAGE_KERNEL; 652 prot = pgprot_val(PAGE_KERNEL);
651 653
652#ifdef CONFIG_DEBUG_PAGEALLOC 654#ifdef CONFIG_DEBUG_PAGEALLOC
653 linear_map_hash_count = lmb_end_of_DRAM() >> PAGE_SHIFT; 655 linear_map_hash_count = lmb_end_of_DRAM() >> PAGE_SHIFT;
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index f1c2d55b4377..a117024ab8cd 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -128,29 +128,37 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
128 return 0; 128 return 0;
129} 129}
130 130
131/* Base page size affects how we walk hugetlb page tables */ 131
132#ifdef CONFIG_PPC_64K_PAGES 132static pud_t *hpud_offset(pgd_t *pgd, unsigned long addr, struct hstate *hstate)
133#define hpmd_offset(pud, addr, h) pmd_offset(pud, addr) 133{
134#define hpmd_alloc(mm, pud, addr, h) pmd_alloc(mm, pud, addr) 134 if (huge_page_shift(hstate) < PUD_SHIFT)
135#else 135 return pud_offset(pgd, addr);
136static inline 136 else
137pmd_t *hpmd_offset(pud_t *pud, unsigned long addr, struct hstate *hstate) 137 return (pud_t *) pgd;
138}
139static pud_t *hpud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long addr,
140 struct hstate *hstate)
138{ 141{
139 if (huge_page_shift(hstate) == PAGE_SHIFT_64K) 142 if (huge_page_shift(hstate) < PUD_SHIFT)
143 return pud_alloc(mm, pgd, addr);
144 else
145 return (pud_t *) pgd;
146}
147static pmd_t *hpmd_offset(pud_t *pud, unsigned long addr, struct hstate *hstate)
148{
149 if (huge_page_shift(hstate) < PMD_SHIFT)
140 return pmd_offset(pud, addr); 150 return pmd_offset(pud, addr);
141 else 151 else
142 return (pmd_t *) pud; 152 return (pmd_t *) pud;
143} 153}
144static inline 154static pmd_t *hpmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long addr,
145pmd_t *hpmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long addr, 155 struct hstate *hstate)
146 struct hstate *hstate)
147{ 156{
148 if (huge_page_shift(hstate) == PAGE_SHIFT_64K) 157 if (huge_page_shift(hstate) < PMD_SHIFT)
149 return pmd_alloc(mm, pud, addr); 158 return pmd_alloc(mm, pud, addr);
150 else 159 else
151 return (pmd_t *) pud; 160 return (pmd_t *) pud;
152} 161}
153#endif
154 162
155/* Build list of addresses of gigantic pages. This function is used in early 163/* Build list of addresses of gigantic pages. This function is used in early
156 * boot before the buddy or bootmem allocator is setup. 164 * boot before the buddy or bootmem allocator is setup.
@@ -204,7 +212,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
204 212
205 pg = pgd_offset(mm, addr); 213 pg = pgd_offset(mm, addr);
206 if (!pgd_none(*pg)) { 214 if (!pgd_none(*pg)) {
207 pu = pud_offset(pg, addr); 215 pu = hpud_offset(pg, addr, hstate);
208 if (!pud_none(*pu)) { 216 if (!pud_none(*pu)) {
209 pm = hpmd_offset(pu, addr, hstate); 217 pm = hpmd_offset(pu, addr, hstate);
210 if (!pmd_none(*pm)) 218 if (!pmd_none(*pm))
@@ -233,7 +241,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
233 addr &= hstate->mask; 241 addr &= hstate->mask;
234 242
235 pg = pgd_offset(mm, addr); 243 pg = pgd_offset(mm, addr);
236 pu = pud_alloc(mm, pg, addr); 244 pu = hpud_alloc(mm, pg, addr, hstate);
237 245
238 if (pu) { 246 if (pu) {
239 pm = hpmd_alloc(mm, pu, addr, hstate); 247 pm = hpmd_alloc(mm, pu, addr, hstate);
@@ -316,13 +324,7 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
316 pud = pud_offset(pgd, addr); 324 pud = pud_offset(pgd, addr);
317 do { 325 do {
318 next = pud_addr_end(addr, end); 326 next = pud_addr_end(addr, end);
319#ifdef CONFIG_PPC_64K_PAGES 327 if (shift < PMD_SHIFT) {
320 if (pud_none_or_clear_bad(pud))
321 continue;
322 hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling,
323 psize);
324#else
325 if (shift == PAGE_SHIFT_64K) {
326 if (pud_none_or_clear_bad(pud)) 328 if (pud_none_or_clear_bad(pud))
327 continue; 329 continue;
328 hugetlb_free_pmd_range(tlb, pud, addr, next, floor, 330 hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
@@ -332,7 +334,6 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
332 continue; 334 continue;
333 free_hugepte_range(tlb, (hugepd_t *)pud, psize); 335 free_hugepte_range(tlb, (hugepd_t *)pud, psize);
334 } 336 }
335#endif
336 } while (pud++, addr = next, addr != end); 337 } while (pud++, addr = next, addr != end);
337 338
338 start &= PGDIR_MASK; 339 start &= PGDIR_MASK;
@@ -422,9 +423,15 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
422 psize = get_slice_psize(tlb->mm, addr); 423 psize = get_slice_psize(tlb->mm, addr);
423 BUG_ON(!mmu_huge_psizes[psize]); 424 BUG_ON(!mmu_huge_psizes[psize]);
424 next = pgd_addr_end(addr, end); 425 next = pgd_addr_end(addr, end);
425 if (pgd_none_or_clear_bad(pgd)) 426 if (mmu_psize_to_shift(psize) < PUD_SHIFT) {
426 continue; 427 if (pgd_none_or_clear_bad(pgd))
427 hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling); 428 continue;
429 hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
430 } else {
431 if (pgd_none(*pgd))
432 continue;
433 free_hugepte_range(tlb, (hugepd_t *)pgd, psize);
434 }
428 } while (pgd++, addr = next, addr != end); 435 } while (pgd++, addr = next, addr != end);
429} 436}
430 437
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 036fe2f10c77..3e6a6543f53a 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -228,8 +228,8 @@ int __meminit vmemmap_populate(struct page *start_page,
228 start, p, __pa(p)); 228 start, p, __pa(p));
229 229
230 mapped = htab_bolt_mapping(start, start + page_size, __pa(p), 230 mapped = htab_bolt_mapping(start, start + page_size, __pa(p),
231 PAGE_KERNEL, mmu_vmemmap_psize, 231 pgprot_val(PAGE_KERNEL),
232 mmu_kernel_ssize); 232 mmu_vmemmap_psize, mmu_kernel_ssize);
233 BUG_ON(mapped < 0); 233 BUG_ON(mapped < 0);
234 } 234 }
235 235
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 1c93c255873b..98d7bf99533a 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -75,11 +75,10 @@ static inline pte_t *virt_to_kpte(unsigned long vaddr)
75 75
76int page_is_ram(unsigned long pfn) 76int page_is_ram(unsigned long pfn)
77{ 77{
78 unsigned long paddr = (pfn << PAGE_SHIFT);
79
80#ifndef CONFIG_PPC64 /* XXX for now */ 78#ifndef CONFIG_PPC64 /* XXX for now */
81 return paddr < __pa(high_memory); 79 return pfn < max_pfn;
82#else 80#else
81 unsigned long paddr = (pfn << PAGE_SHIFT);
83 int i; 82 int i;
84 for (i=0; i < lmb.memory.cnt; i++) { 83 for (i=0; i < lmb.memory.cnt; i++) {
85 unsigned long base; 84 unsigned long base;
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index d9a181351332..6cf5c71c431f 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -89,6 +89,46 @@ static int __cpuinit fake_numa_create_new_node(unsigned long end_pfn,
89 return 0; 89 return 0;
90} 90}
91 91
92/*
93 * get_active_region_work_fn - A helper function for get_node_active_region
94 * Returns datax set to the start_pfn and end_pfn if they contain
95 * the initial value of datax->start_pfn between them
96 * @start_pfn: start page(inclusive) of region to check
97 * @end_pfn: end page(exclusive) of region to check
98 * @datax: comes in with ->start_pfn set to value to search for and
99 * goes out with active range if it contains it
100 * Returns 1 if search value is in range else 0
101 */
102static int __init get_active_region_work_fn(unsigned long start_pfn,
103 unsigned long end_pfn, void *datax)
104{
105 struct node_active_region *data;
106 data = (struct node_active_region *)datax;
107
108 if (start_pfn <= data->start_pfn && end_pfn > data->start_pfn) {
109 data->start_pfn = start_pfn;
110 data->end_pfn = end_pfn;
111 return 1;
112 }
113 return 0;
114
115}
116
117/*
118 * get_node_active_region - Return active region containing start_pfn
119 * @start_pfn: The page to return the region for.
120 * @node_ar: Returned set to the active region containing start_pfn
121 */
122static void __init get_node_active_region(unsigned long start_pfn,
123 struct node_active_region *node_ar)
124{
125 int nid = early_pfn_to_nid(start_pfn);
126
127 node_ar->nid = nid;
128 node_ar->start_pfn = start_pfn;
129 work_with_active_regions(nid, get_active_region_work_fn, node_ar);
130}
131
92static void __cpuinit map_cpu_to_node(int cpu, int node) 132static void __cpuinit map_cpu_to_node(int cpu, int node)
93{ 133{
94 numa_cpu_lookup_table[cpu] = node; 134 numa_cpu_lookup_table[cpu] = node;
@@ -150,6 +190,21 @@ static const int *of_get_associativity(struct device_node *dev)
150 return of_get_property(dev, "ibm,associativity", NULL); 190 return of_get_property(dev, "ibm,associativity", NULL);
151} 191}
152 192
193/*
194 * Returns the property linux,drconf-usable-memory if
195 * it exists (the property exists only in kexec/kdump kernels,
196 * added by kexec-tools)
197 */
198static const u32 *of_get_usable_memory(struct device_node *memory)
199{
200 const u32 *prop;
201 u32 len;
202 prop = of_get_property(memory, "linux,drconf-usable-memory", &len);
203 if (!prop || len < sizeof(unsigned int))
204 return 0;
205 return prop;
206}
207
153/* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa 208/* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
154 * info is found. 209 * info is found.
155 */ 210 */
@@ -487,14 +542,29 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start,
487} 542}
488 543
489/* 544/*
545 * Reads the counter for a given entry in
546 * linux,drconf-usable-memory property
547 */
548static inline int __init read_usm_ranges(const u32 **usm)
549{
550 /*
551 * For each lmb in ibm,dynamic-memory a corresponding
552 * entry in linux,drconf-usable-memory property contains
553 * a counter followed by that many (base, size) duple.
554 * read the counter from linux,drconf-usable-memory
555 */
556 return read_n_cells(n_mem_size_cells, usm);
557}
558
559/*
490 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory 560 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
491 * node. This assumes n_mem_{addr,size}_cells have been set. 561 * node. This assumes n_mem_{addr,size}_cells have been set.
492 */ 562 */
493static void __init parse_drconf_memory(struct device_node *memory) 563static void __init parse_drconf_memory(struct device_node *memory)
494{ 564{
495 const u32 *dm; 565 const u32 *dm, *usm;
496 unsigned int n, rc; 566 unsigned int n, rc, ranges, is_kexec_kdump = 0;
497 unsigned long lmb_size, size; 567 unsigned long lmb_size, base, size, sz;
498 int nid; 568 int nid;
499 struct assoc_arrays aa; 569 struct assoc_arrays aa;
500 570
@@ -510,6 +580,11 @@ static void __init parse_drconf_memory(struct device_node *memory)
510 if (rc) 580 if (rc)
511 return; 581 return;
512 582
583 /* check if this is a kexec/kdump kernel */
584 usm = of_get_usable_memory(memory);
585 if (usm != NULL)
586 is_kexec_kdump = 1;
587
513 for (; n != 0; --n) { 588 for (; n != 0; --n) {
514 struct of_drconf_cell drmem; 589 struct of_drconf_cell drmem;
515 590
@@ -521,21 +596,31 @@ static void __init parse_drconf_memory(struct device_node *memory)
521 || !(drmem.flags & DRCONF_MEM_ASSIGNED)) 596 || !(drmem.flags & DRCONF_MEM_ASSIGNED))
522 continue; 597 continue;
523 598
524 nid = of_drconf_to_nid_single(&drmem, &aa); 599 base = drmem.base_addr;
600 size = lmb_size;
601 ranges = 1;
525 602
526 fake_numa_create_new_node( 603 if (is_kexec_kdump) {
527 ((drmem.base_addr + lmb_size) >> PAGE_SHIFT), 604 ranges = read_usm_ranges(&usm);
605 if (!ranges) /* there are no (base, size) duple */
606 continue;
607 }
608 do {
609 if (is_kexec_kdump) {
610 base = read_n_cells(n_mem_addr_cells, &usm);
611 size = read_n_cells(n_mem_size_cells, &usm);
612 }
613 nid = of_drconf_to_nid_single(&drmem, &aa);
614 fake_numa_create_new_node(
615 ((base + size) >> PAGE_SHIFT),
528 &nid); 616 &nid);
529 617 node_set_online(nid);
530 node_set_online(nid); 618 sz = numa_enforce_memory_limit(base, size);
531 619 if (sz)
532 size = numa_enforce_memory_limit(drmem.base_addr, lmb_size); 620 add_active_range(nid, base >> PAGE_SHIFT,
533 if (!size) 621 (base >> PAGE_SHIFT)
534 continue; 622 + (sz >> PAGE_SHIFT));
535 623 } while (--ranges);
536 add_active_range(nid, drmem.base_addr >> PAGE_SHIFT,
537 (drmem.base_addr >> PAGE_SHIFT)
538 + (size >> PAGE_SHIFT));
539 } 624 }
540} 625}
541 626
@@ -837,38 +922,50 @@ void __init do_init_bootmem(void)
837 start_pfn, end_pfn); 922 start_pfn, end_pfn);
838 923
839 free_bootmem_with_active_regions(nid, end_pfn); 924 free_bootmem_with_active_regions(nid, end_pfn);
925 }
840 926
841 /* Mark reserved regions on this node */ 927 /* Mark reserved regions */
842 for (i = 0; i < lmb.reserved.cnt; i++) { 928 for (i = 0; i < lmb.reserved.cnt; i++) {
843 unsigned long physbase = lmb.reserved.region[i].base; 929 unsigned long physbase = lmb.reserved.region[i].base;
844 unsigned long size = lmb.reserved.region[i].size; 930 unsigned long size = lmb.reserved.region[i].size;
845 unsigned long start_paddr = start_pfn << PAGE_SHIFT; 931 unsigned long start_pfn = physbase >> PAGE_SHIFT;
846 unsigned long end_paddr = end_pfn << PAGE_SHIFT; 932 unsigned long end_pfn = ((physbase + size) >> PAGE_SHIFT);
847 933 struct node_active_region node_ar;
848 if (early_pfn_to_nid(physbase >> PAGE_SHIFT) != nid && 934
849 early_pfn_to_nid((physbase+size-1) >> PAGE_SHIFT) != nid) 935 get_node_active_region(start_pfn, &node_ar);
850 continue; 936 while (start_pfn < end_pfn) {
851 937 /*
852 if (physbase < end_paddr && 938 * if reserved region extends past active region
853 (physbase+size) > start_paddr) { 939 * then trim size to active region
854 /* overlaps */ 940 */
855 if (physbase < start_paddr) { 941 if (end_pfn > node_ar.end_pfn)
856 size -= start_paddr - physbase; 942 size = (node_ar.end_pfn << PAGE_SHIFT)
857 physbase = start_paddr; 943 - (start_pfn << PAGE_SHIFT);
858 } 944 dbg("reserve_bootmem %lx %lx nid=%d\n", physbase, size,
859 945 node_ar.nid);
860 if (size > end_paddr - physbase) 946 reserve_bootmem_node(NODE_DATA(node_ar.nid), physbase,
861 size = end_paddr - physbase; 947 size, BOOTMEM_DEFAULT);
862 948 /*
863 dbg("reserve_bootmem %lx %lx\n", physbase, 949 * if reserved region is contained in the active region
864 size); 950 * then done.
865 reserve_bootmem_node(NODE_DATA(nid), physbase, 951 */
866 size, BOOTMEM_DEFAULT); 952 if (end_pfn <= node_ar.end_pfn)
867 } 953 break;
954
955 /*
956 * reserved region extends past the active region
957 * get next active region that contains this
958 * reserved region
959 */
960 start_pfn = node_ar.end_pfn;
961 physbase = start_pfn << PAGE_SHIFT;
962 get_node_active_region(start_pfn, &node_ar);
868 } 963 }
869 964
870 sparse_memory_present_with_active_regions(nid);
871 } 965 }
966
967 for_each_online_node(nid)
968 sparse_memory_present_with_active_regions(nid);
872} 969}
873 970
874void __init paging_init(void) 971void __init paging_init(void)
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 2001abdb1912..c31d6d26f0b5 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -73,7 +73,7 @@ extern unsigned long p_mapped_by_tlbcam(unsigned long pa);
73#endif /* HAVE_TLBCAM */ 73#endif /* HAVE_TLBCAM */
74 74
75#ifdef CONFIG_PTE_64BIT 75#ifdef CONFIG_PTE_64BIT
76/* 44x uses an 8kB pgdir because it has 8-byte Linux PTEs. */ 76/* Some processors use an 8kB pgdir because they have 8-byte Linux PTEs. */
77#define PGDIR_ORDER 1 77#define PGDIR_ORDER 1
78#else 78#else
79#define PGDIR_ORDER 0 79#define PGDIR_ORDER 0
@@ -288,7 +288,7 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
288} 288}
289 289
290/* 290/*
291 * Map in all of physical memory starting at KERNELBASE. 291 * Map in a big chunk of physical memory starting at KERNELBASE.
292 */ 292 */
293void __init mapin_ram(void) 293void __init mapin_ram(void)
294{ 294{
diff --git a/arch/powerpc/mm/tlb_32.c b/arch/powerpc/mm/tlb_32.c
index eb4b512d65fa..f9a47fee3927 100644
--- a/arch/powerpc/mm/tlb_32.c
+++ b/arch/powerpc/mm/tlb_32.c
@@ -45,6 +45,7 @@ void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
45 flush_hash_pages(mm->context.id, addr, ptephys, 1); 45 flush_hash_pages(mm->context.id, addr, ptephys, 1);
46 } 46 }
47} 47}
48EXPORT_SYMBOL(flush_hash_entry);
48 49
49/* 50/*
50 * Called by ptep_set_access_flags, must flush on CPUs for which the 51 * Called by ptep_set_access_flags, must flush on CPUs for which the