aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-08 14:50:19 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-08 14:50:19 -0400
commitdf6d3916f3b7b7e2067567a256dd4f0c1ea854a2 (patch)
tree0fdeab1ab5d566605fc99aeb5ea3f621f11e7608 /arch/powerpc/mm
parent74add80cbd7fe246c893b93ee75ac59acdd01dd4 (diff)
parent197686dfe0038fd190326d118b743ff65ad20c0e (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc
* 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc: (77 commits) [POWERPC] Abolish powerpc_flash_init() [POWERPC] Early serial debug support for PPC44x [POWERPC] Support for the Ebony 440GP reference board in arch/powerpc [POWERPC] Add device tree for Ebony [POWERPC] Add powerpc/platforms/44x, disable platforms/4xx for now [POWERPC] MPIC U3/U4 MSI backend [POWERPC] MPIC MSI allocator [POWERPC] Enable MSI mappings for MPIC [POWERPC] Tell Phyp we support MSI [POWERPC] RTAS MSI implementation [POWERPC] PowerPC MSI infrastructure [POWERPC] Rip out the existing powerpc msi stubs [POWERPC] Remove use of 4level-fixup.h for ppc32 [POWERPC] Add powerpc PCI-E reset API implementation [POWERPC] Holly bootwrapper [POWERPC] Holly DTS [POWERPC] Holly defconfig [POWERPC] Add support for 750CL Holly board [POWERPC] Generalize tsi108 PCI setup [POWERPC] Generalize tsi108 PHY types ... Fixed conflict in include/asm-powerpc/kdebug.h manually Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/44x_mmu.c82
-rw-r--r--arch/powerpc/mm/fault.c42
-rw-r--r--arch/powerpc/mm/hash_native_64.c84
-rw-r--r--arch/powerpc/mm/hash_utils_64.c2
-rw-r--r--arch/powerpc/mm/mem.c1
-rw-r--r--arch/powerpc/mm/mmu_decl.h3
-rw-r--r--arch/powerpc/mm/pgtable_32.c28
-rw-r--r--arch/powerpc/mm/stab.c2
8 files changed, 116 insertions, 128 deletions
diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c
index 0a0a0487b334..ca4dcb07a939 100644
--- a/arch/powerpc/mm/44x_mmu.c
+++ b/arch/powerpc/mm/44x_mmu.c
@@ -24,73 +24,38 @@
24 * 24 *
25 */ 25 */
26 26
27#include <linux/signal.h>
28#include <linux/sched.h>
29#include <linux/kernel.h>
30#include <linux/errno.h>
31#include <linux/string.h>
32#include <linux/types.h>
33#include <linux/ptrace.h>
34#include <linux/mman.h>
35#include <linux/mm.h>
36#include <linux/swap.h>
37#include <linux/stddef.h>
38#include <linux/vmalloc.h>
39#include <linux/init.h> 27#include <linux/init.h>
40#include <linux/delay.h>
41#include <linux/highmem.h>
42
43#include <asm/pgalloc.h>
44#include <asm/prom.h>
45#include <asm/io.h>
46#include <asm/mmu_context.h>
47#include <asm/pgtable.h>
48#include <asm/mmu.h> 28#include <asm/mmu.h>
49#include <asm/uaccess.h> 29#include <asm/system.h>
50#include <asm/smp.h> 30#include <asm/page.h>
51#include <asm/bootx.h>
52#include <asm/machdep.h>
53#include <asm/setup.h>
54 31
55#include "mmu_decl.h" 32#include "mmu_decl.h"
56 33
57extern char etext[], _stext[];
58
59/* Used by the 44x TLB replacement exception handler. 34/* Used by the 44x TLB replacement exception handler.
60 * Just needed it declared someplace. 35 * Just needed it declared someplace.
61 */ 36 */
62unsigned int tlb_44x_index = 0; 37unsigned int tlb_44x_index; /* = 0 */
63unsigned int tlb_44x_hwater = 62; 38unsigned int tlb_44x_hwater = PPC44x_TLB_SIZE - 1 - PPC44x_EARLY_TLBS;
64 39
65/* 40/*
66 * "Pins" a 256MB TLB entry in AS0 for kernel lowmem 41 * "Pins" a 256MB TLB entry in AS0 for kernel lowmem
67 */ 42 */
68static void __init 43static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys)
69ppc44x_pin_tlb(int slot, unsigned int virt, unsigned int phys)
70{ 44{
71 unsigned long attrib = 0; 45 __asm__ __volatile__(
72 46 "tlbwe %2,%3,%4\n"
73 __asm__ __volatile__("\ 47 "tlbwe %1,%3,%5\n"
74 clrrwi %2,%2,10\n\ 48 "tlbwe %0,%3,%6\n"
75 ori %2,%2,%4\n\
76 clrrwi %1,%1,10\n\
77 li %0,0\n\
78 ori %0,%0,%5\n\
79 tlbwe %2,%3,%6\n\
80 tlbwe %1,%3,%7\n\
81 tlbwe %0,%3,%8"
82 : 49 :
83 : "r" (attrib), "r" (phys), "r" (virt), "r" (slot), 50 : "r" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G),
84 "i" (PPC44x_TLB_VALID | PPC44x_TLB_256M), 51 "r" (phys),
85 "i" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G), 52 "r" (virt | PPC44x_TLB_VALID | PPC44x_TLB_256M),
53 "r" (tlb_44x_hwater--), /* slot for this TLB entry */
86 "i" (PPC44x_TLB_PAGEID), 54 "i" (PPC44x_TLB_PAGEID),
87 "i" (PPC44x_TLB_XLAT), 55 "i" (PPC44x_TLB_XLAT),
88 "i" (PPC44x_TLB_ATTRIB)); 56 "i" (PPC44x_TLB_ATTRIB));
89} 57}
90 58
91/*
92 * MMU_init_hw does the chip-specific initialization of the MMU hardware.
93 */
94void __init MMU_init_hw(void) 59void __init MMU_init_hw(void)
95{ 60{
96 flush_instruction_cache(); 61 flush_instruction_cache();
@@ -98,22 +63,13 @@ void __init MMU_init_hw(void)
98 63
99unsigned long __init mmu_mapin_ram(void) 64unsigned long __init mmu_mapin_ram(void)
100{ 65{
101 unsigned int pinned_tlbs = 1; 66 unsigned long addr;
102 int i;
103
104 /* Determine number of entries necessary to cover lowmem */
105 pinned_tlbs = (unsigned int)
106 (_ALIGN(total_lowmem, PPC_PIN_SIZE) >> PPC44x_PIN_SHIFT);
107
108 /* Write upper watermark to save location */
109 tlb_44x_hwater = PPC44x_LOW_SLOT - pinned_tlbs;
110 67
111 /* If necessary, set additional pinned TLBs */ 68 /* Pin in enough TLBs to cover any lowmem not covered by the
112 if (pinned_tlbs > 1) 69 * initial 256M mapping established in head_44x.S */
113 for (i = (PPC44x_LOW_SLOT-(pinned_tlbs-1)); i < PPC44x_LOW_SLOT; i++) { 70 for (addr = PPC_PIN_SIZE; addr < total_lowmem;
114 unsigned int phys_addr = (PPC44x_LOW_SLOT-i) * PPC_PIN_SIZE; 71 addr += PPC_PIN_SIZE)
115 ppc44x_pin_tlb(i, phys_addr+PAGE_OFFSET, phys_addr); 72 ppc44x_pin_tlb(addr + PAGE_OFFSET, addr);
116 }
117 73
118 return total_lowmem; 74 return total_lowmem;
119} 75}
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index bec0cce79a78..bfe901353142 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -39,37 +39,26 @@
39#include <asm/tlbflush.h> 39#include <asm/tlbflush.h>
40#include <asm/siginfo.h> 40#include <asm/siginfo.h>
41 41
42#ifdef CONFIG_KPROBES
43ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
44 42
45/* Hook to register for page fault notifications */ 43#ifdef CONFIG_KPROBES
46int register_page_fault_notifier(struct notifier_block *nb) 44static inline int notify_page_fault(struct pt_regs *regs)
47{
48 return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
49}
50
51int unregister_page_fault_notifier(struct notifier_block *nb)
52{ 45{
53 return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb); 46 int ret = 0;
54} 47
48 /* kprobe_running() needs smp_processor_id() */
49 if (!user_mode(regs)) {
50 preempt_disable();
51 if (kprobe_running() && kprobe_fault_handler(regs, 11))
52 ret = 1;
53 preempt_enable();
54 }
55 55
56static inline int notify_page_fault(enum die_val val, const char *str, 56 return ret;
57 struct pt_regs *regs, long err, int trap, int sig)
58{
59 struct die_args args = {
60 .regs = regs,
61 .str = str,
62 .err = err,
63 .trapnr = trap,
64 .signr = sig
65 };
66 return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
67} 57}
68#else 58#else
69static inline int notify_page_fault(enum die_val val, const char *str, 59static inline int notify_page_fault(struct pt_regs *regs)
70 struct pt_regs *regs, long err, int trap, int sig)
71{ 60{
72 return NOTIFY_DONE; 61 return 0;
73} 62}
74#endif 63#endif
75 64
@@ -175,8 +164,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
175 is_write = error_code & ESR_DST; 164 is_write = error_code & ESR_DST;
176#endif /* CONFIG_4xx || CONFIG_BOOKE */ 165#endif /* CONFIG_4xx || CONFIG_BOOKE */
177 166
178 if (notify_page_fault(DIE_PAGE_FAULT, "page_fault", regs, error_code, 167 if (notify_page_fault(regs))
179 11, SIGSEGV) == NOTIFY_STOP)
180 return 0; 168 return 0;
181 169
182 if (trap == 0x300) { 170 if (trap == 0x300) {
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 79aedaf36f2b..7b7fe2d7b9dc 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -26,6 +26,7 @@
26#include <asm/tlb.h> 26#include <asm/tlb.h>
27#include <asm/cputable.h> 27#include <asm/cputable.h>
28#include <asm/udbg.h> 28#include <asm/udbg.h>
29#include <asm/kexec.h>
29 30
30#ifdef DEBUG_LOW 31#ifdef DEBUG_LOW
31#define DBG_LOW(fmt...) udbg_printf(fmt) 32#define DBG_LOW(fmt...) udbg_printf(fmt)
@@ -340,31 +341,70 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
340 local_irq_restore(flags); 341 local_irq_restore(flags);
341} 342}
342 343
343/* 344#define LP_SHIFT 12
344 * XXX This need fixing based on page size. It's only used by 345#define LP_BITS 8
345 * native_hpte_clear() for now which needs fixing too so they 346#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
346 * make a good pair...
347 */
348static unsigned long slot2va(unsigned long hpte_v, unsigned long slot)
349{
350 unsigned long avpn = HPTE_V_AVPN_VAL(hpte_v);
351 unsigned long va;
352 347
353 va = avpn << 23; 348static void hpte_decode(hpte_t *hpte, unsigned long slot,
349 int *psize, unsigned long *va)
350{
351 unsigned long hpte_r = hpte->r;
352 unsigned long hpte_v = hpte->v;
353 unsigned long avpn;
354 int i, size, shift, penc, avpnm_bits;
355
356 if (!(hpte_v & HPTE_V_LARGE))
357 size = MMU_PAGE_4K;
358 else {
359 for (i = 0; i < LP_BITS; i++) {
360 if ((hpte_r & LP_MASK(i+1)) == LP_MASK(i+1))
361 break;
362 }
363 penc = LP_MASK(i+1) >> LP_SHIFT;
364 for (size = 0; size < MMU_PAGE_COUNT; size++) {
354 365
355 if (! (hpte_v & HPTE_V_LARGE)) { 366 /* 4K pages are not represented by LP */
356 unsigned long vpi, pteg; 367 if (size == MMU_PAGE_4K)
368 continue;
357 369
358 pteg = slot / HPTES_PER_GROUP; 370 /* valid entries have a shift value */
359 if (hpte_v & HPTE_V_SECONDARY) 371 if (!mmu_psize_defs[size].shift)
360 pteg = ~pteg; 372 continue;
361 373
362 vpi = ((va >> 28) ^ pteg) & htab_hash_mask; 374 if (penc == mmu_psize_defs[size].penc)
375 break;
376 }
377 }
363 378
364 va |= vpi << PAGE_SHIFT; 379 /*
380 * FIXME, the code below works for 16M, 64K, and 4K pages as these
381 * fall under the p<=23 rules for calculating the virtual address.
382 * In the case of 16M pages, an extra bit is stolen from the AVPN
383 * field to achieve the requisite 24 bits.
384 *
385 * Does not work for 16G pages or 1 TB segments.
386 */
387 shift = mmu_psize_defs[size].shift;
388 if (mmu_psize_defs[size].avpnm)
389 avpnm_bits = __ilog2_u64(mmu_psize_defs[size].avpnm) + 1;
390 else
391 avpnm_bits = 0;
392 if (shift - avpnm_bits <= 23) {
393 avpn = HPTE_V_AVPN_VAL(hpte_v) << 23;
394
395 if (shift < 23) {
396 unsigned long vpi, pteg;
397
398 pteg = slot / HPTES_PER_GROUP;
399 if (hpte_v & HPTE_V_SECONDARY)
400 pteg = ~pteg;
401 vpi = ((avpn >> 28) ^ pteg) & htab_hash_mask;
402 avpn |= (vpi << mmu_psize_defs[size].shift);
403 }
365 } 404 }
366 405
367 return va; 406 *va = avpn;
407 *psize = size;
368} 408}
369 409
370/* 410/*
@@ -374,15 +414,14 @@ static unsigned long slot2va(unsigned long hpte_v, unsigned long slot)
374 * 414 *
375 * TODO: add batching support when enabled. remember, no dynamic memory here, 415 * TODO: add batching support when enabled. remember, no dynamic memory here,
376 * athough there is the control page available... 416 * athough there is the control page available...
377 *
378 * XXX FIXME: 4k only for now !
379 */ 417 */
380static void native_hpte_clear(void) 418static void native_hpte_clear(void)
381{ 419{
382 unsigned long slot, slots, flags; 420 unsigned long slot, slots, flags;
383 hpte_t *hptep = htab_address; 421 hpte_t *hptep = htab_address;
384 unsigned long hpte_v; 422 unsigned long hpte_v, va;
385 unsigned long pteg_count; 423 unsigned long pteg_count;
424 int psize;
386 425
387 pteg_count = htab_hash_mask + 1; 426 pteg_count = htab_hash_mask + 1;
388 427
@@ -408,8 +447,9 @@ static void native_hpte_clear(void)
408 * already hold the native_tlbie_lock. 447 * already hold the native_tlbie_lock.
409 */ 448 */
410 if (hpte_v & HPTE_V_VALID) { 449 if (hpte_v & HPTE_V_VALID) {
450 hpte_decode(hptep, slot, &psize, &va);
411 hptep->v = 0; 451 hptep->v = 0;
412 __tlbie(slot2va(hpte_v, slot), MMU_PAGE_4K); 452 __tlbie(va, psize);
413 } 453 }
414 } 454 }
415 455
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 49618461defb..9b226fa7006f 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -103,7 +103,7 @@ int mmu_ci_restrictions;
103#ifdef CONFIG_DEBUG_PAGEALLOC 103#ifdef CONFIG_DEBUG_PAGEALLOC
104static u8 *linear_map_hash_slots; 104static u8 *linear_map_hash_slots;
105static unsigned long linear_map_hash_count; 105static unsigned long linear_map_hash_count;
106static spinlock_t linear_map_hash_lock; 106static DEFINE_SPINLOCK(linear_map_hash_lock);
107#endif /* CONFIG_DEBUG_PAGEALLOC */ 107#endif /* CONFIG_DEBUG_PAGEALLOC */
108 108
109/* There are definitions of page sizes arrays to be used when none 109/* There are definitions of page sizes arrays to be used when none
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index c4bcd7546424..1a6e08f3298f 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -80,7 +80,6 @@ int page_is_ram(unsigned long pfn)
80 return 0; 80 return 0;
81#endif 81#endif
82} 82}
83EXPORT_SYMBOL(page_is_ram);
84 83
85pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 84pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
86 unsigned long size, pgprot_t vma_prot) 85 unsigned long size, pgprot_t vma_prot)
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 9c4538bb04b0..2558c34eedaa 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -40,7 +40,8 @@ extern int __map_without_bats;
40extern unsigned long ioremap_base; 40extern unsigned long ioremap_base;
41extern unsigned int rtas_data, rtas_size; 41extern unsigned int rtas_data, rtas_size;
42 42
43extern PTE *Hash, *Hash_end; 43struct _PTE;
44extern struct _PTE *Hash, *Hash_end;
44extern unsigned long Hash_size, Hash_mask; 45extern unsigned long Hash_size, Hash_mask;
45 46
46extern unsigned int num_tlbcam_entries; 47extern unsigned int num_tlbcam_entries;
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index bca560374927..d8232b7a08f7 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -261,7 +261,7 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
261 int err = -ENOMEM; 261 int err = -ENOMEM;
262 262
263 /* Use upper 10 bits of VA to index the first level map */ 263 /* Use upper 10 bits of VA to index the first level map */
264 pd = pmd_offset(pgd_offset_k(va), va); 264 pd = pmd_offset(pud_offset(pgd_offset_k(va), va), va);
265 /* Use middle 10 bits of VA to index the second-level map */ 265 /* Use middle 10 bits of VA to index the second-level map */
266 pg = pte_alloc_kernel(pd, va); 266 pg = pte_alloc_kernel(pd, va);
267 if (pg != 0) { 267 if (pg != 0) {
@@ -354,23 +354,27 @@ int
354get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp) 354get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp)
355{ 355{
356 pgd_t *pgd; 356 pgd_t *pgd;
357 pud_t *pud;
357 pmd_t *pmd; 358 pmd_t *pmd;
358 pte_t *pte; 359 pte_t *pte;
359 int retval = 0; 360 int retval = 0;
360 361
361 pgd = pgd_offset(mm, addr & PAGE_MASK); 362 pgd = pgd_offset(mm, addr & PAGE_MASK);
362 if (pgd) { 363 if (pgd) {
363 pmd = pmd_offset(pgd, addr & PAGE_MASK); 364 pud = pud_offset(pgd, addr & PAGE_MASK);
364 if (pmd_present(*pmd)) { 365 if (pud && pud_present(*pud)) {
365 pte = pte_offset_map(pmd, addr & PAGE_MASK); 366 pmd = pmd_offset(pud, addr & PAGE_MASK);
366 if (pte) { 367 if (pmd_present(*pmd)) {
367 retval = 1; 368 pte = pte_offset_map(pmd, addr & PAGE_MASK);
368 *ptep = pte; 369 if (pte) {
369 if (pmdp) 370 retval = 1;
370 *pmdp = pmd; 371 *ptep = pte;
371 /* XXX caller needs to do pte_unmap, yuck */ 372 if (pmdp)
372 } 373 *pmdp = pmd;
373 } 374 /* XXX caller needs to do pte_unmap, yuck */
375 }
376 }
377 }
374 } 378 }
375 return(retval); 379 return(retval);
376} 380}
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index eeeacab548e6..132c6bc66ce1 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -227,7 +227,7 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
227 * the first (bolted) segment, so that do_stab_bolted won't get a 227 * the first (bolted) segment, so that do_stab_bolted won't get a
228 * recursive segment miss on the segment table itself. 228 * recursive segment miss on the segment table itself.
229 */ 229 */
230void stabs_alloc(void) 230void __init stabs_alloc(void)
231{ 231{
232 int cpu; 232 int cpu;
233 233