aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/40x_mmu.c2
-rw-r--r--arch/powerpc/mm/44x_mmu.c2
-rw-r--r--arch/powerpc/mm/fsl_booke_mmu.c2
-rw-r--r--arch/powerpc/mm/hash_utils_64.c12
-rw-r--r--arch/powerpc/mm/init_32.c9
-rw-r--r--arch/powerpc/mm/mmap_64.c4
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c2
-rw-r--r--arch/powerpc/mm/mmu_decl.h17
-rw-r--r--arch/powerpc/mm/pgtable_32.c38
-rw-r--r--arch/powerpc/mm/ppc_mmu_32.c4
-rw-r--r--arch/powerpc/mm/tlb_hash64.c12
11 files changed, 77 insertions, 27 deletions
diff --git a/arch/powerpc/mm/40x_mmu.c b/arch/powerpc/mm/40x_mmu.c
index f5e7b9ce63dd..08dfa8e6d86f 100644
--- a/arch/powerpc/mm/40x_mmu.c
+++ b/arch/powerpc/mm/40x_mmu.c
@@ -91,7 +91,7 @@ void __init MMU_init_hw(void)
91#define LARGE_PAGE_SIZE_16M (1<<24) 91#define LARGE_PAGE_SIZE_16M (1<<24)
92#define LARGE_PAGE_SIZE_4M (1<<22) 92#define LARGE_PAGE_SIZE_4M (1<<22)
93 93
94unsigned long __init mmu_mapin_ram(void) 94unsigned long __init mmu_mapin_ram(unsigned long top)
95{ 95{
96 unsigned long v, s, mapped; 96 unsigned long v, s, mapped;
97 phys_addr_t p; 97 phys_addr_t p;
diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c
index 98052ac96580..3986264b0993 100644
--- a/arch/powerpc/mm/44x_mmu.c
+++ b/arch/powerpc/mm/44x_mmu.c
@@ -88,7 +88,7 @@ void __init MMU_init_hw(void)
88 flush_instruction_cache(); 88 flush_instruction_cache();
89} 89}
90 90
91unsigned long __init mmu_mapin_ram(void) 91unsigned long __init mmu_mapin_ram(unsigned long top)
92{ 92{
93 unsigned long addr; 93 unsigned long addr;
94 94
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index fcfcb6e976c7..c5394728bf2e 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -207,7 +207,7 @@ unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx)
207 return amount_mapped; 207 return amount_mapped;
208} 208}
209 209
210unsigned long __init mmu_mapin_ram(void) 210unsigned long __init mmu_mapin_ram(unsigned long top)
211{ 211{
212 return tlbcam_addrs[tlbcam_index - 1].limit - PAGE_OFFSET + 1; 212 return tlbcam_addrs[tlbcam_index - 1].limit - PAGE_OFFSET + 1;
213} 213}
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 50f867d657df..3ecdcec0a39e 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -340,7 +340,7 @@ static int __init htab_dt_scan_page_sizes(unsigned long node,
340 else 340 else
341 def->tlbiel = 0; 341 def->tlbiel = 0;
342 342
343 DBG(" %d: shift=%02x, sllp=%04x, avpnm=%08x, " 343 DBG(" %d: shift=%02x, sllp=%04lx, avpnm=%08lx, "
344 "tlbiel=%d, penc=%d\n", 344 "tlbiel=%d, penc=%d\n",
345 idx, shift, def->sllp, def->avpnm, def->tlbiel, 345 idx, shift, def->sllp, def->avpnm, def->tlbiel,
346 def->penc); 346 def->penc);
@@ -663,7 +663,7 @@ static void __init htab_initialize(void)
663 base = (unsigned long)__va(lmb.memory.region[i].base); 663 base = (unsigned long)__va(lmb.memory.region[i].base);
664 size = lmb.memory.region[i].size; 664 size = lmb.memory.region[i].size;
665 665
666 DBG("creating mapping for region: %lx..%lx (prot: %x)\n", 666 DBG("creating mapping for region: %lx..%lx (prot: %lx)\n",
667 base, size, prot); 667 base, size, prot);
668 668
669#ifdef CONFIG_U3_DART 669#ifdef CONFIG_U3_DART
@@ -879,7 +879,7 @@ static inline int subpage_protection(struct mm_struct *mm, unsigned long ea)
879 */ 879 */
880int hash_page(unsigned long ea, unsigned long access, unsigned long trap) 880int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
881{ 881{
882 void *pgdir; 882 pgd_t *pgdir;
883 unsigned long vsid; 883 unsigned long vsid;
884 struct mm_struct *mm; 884 struct mm_struct *mm;
885 pte_t *ptep; 885 pte_t *ptep;
@@ -1025,7 +1025,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
1025 else 1025 else
1026#endif /* CONFIG_PPC_HAS_HASH_64K */ 1026#endif /* CONFIG_PPC_HAS_HASH_64K */
1027 { 1027 {
1028 int spp = subpage_protection(pgdir, ea); 1028 int spp = subpage_protection(mm, ea);
1029 if (access & spp) 1029 if (access & spp)
1030 rc = -2; 1030 rc = -2;
1031 else 1031 else
@@ -1115,7 +1115,7 @@ void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int ssize,
1115{ 1115{
1116 unsigned long hash, index, shift, hidx, slot; 1116 unsigned long hash, index, shift, hidx, slot;
1117 1117
1118 DBG_LOW("flush_hash_page(va=%016x)\n", va); 1118 DBG_LOW("flush_hash_page(va=%016lx)\n", va);
1119 pte_iterate_hashed_subpages(pte, psize, va, index, shift) { 1119 pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
1120 hash = hpt_hash(va, shift, ssize); 1120 hash = hpt_hash(va, shift, ssize);
1121 hidx = __rpte_to_hidx(pte, index); 1121 hidx = __rpte_to_hidx(pte, index);
@@ -1123,7 +1123,7 @@ void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int ssize,
1123 hash = ~hash; 1123 hash = ~hash;
1124 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 1124 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
1125 slot += hidx & _PTEIDX_GROUP_IX; 1125 slot += hidx & _PTEIDX_GROUP_IX;
1126 DBG_LOW(" sub %d: hash=%x, hidx=%x\n", index, slot, hidx); 1126 DBG_LOW(" sub %ld: hash=%lx, hidx=%lx\n", index, slot, hidx);
1127 ppc_md.hpte_invalidate(slot, va, psize, ssize, local); 1127 ppc_md.hpte_invalidate(slot, va, psize, ssize, local);
1128 } pte_iterate_hashed_end(); 1128 } pte_iterate_hashed_end();
1129} 1129}
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 9ddcfb4dc139..4ec900af332f 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -82,6 +82,11 @@ extern struct task_struct *current_set[NR_CPUS];
82int __map_without_bats; 82int __map_without_bats;
83int __map_without_ltlbs; 83int __map_without_ltlbs;
84 84
85/*
86 * This tells the system to allow ioremapping memory marked as reserved.
87 */
88int __allow_ioremap_reserved;
89
85/* max amount of low RAM to map in */ 90/* max amount of low RAM to map in */
86unsigned long __max_low_memory = MAX_LOW_MEM; 91unsigned long __max_low_memory = MAX_LOW_MEM;
87 92
@@ -131,9 +136,13 @@ void __init MMU_init(void)
131 MMU_setup(); 136 MMU_setup();
132 137
133 if (lmb.memory.cnt > 1) { 138 if (lmb.memory.cnt > 1) {
139#ifndef CONFIG_WII
134 lmb.memory.cnt = 1; 140 lmb.memory.cnt = 1;
135 lmb_analyze(); 141 lmb_analyze();
136 printk(KERN_WARNING "Only using first contiguous memory region"); 142 printk(KERN_WARNING "Only using first contiguous memory region");
143#else
144 wii_memory_fixups();
145#endif
137 } 146 }
138 147
139 total_lowmem = total_memory = lmb_end_of_DRAM() - memstart_addr; 148 total_lowmem = total_memory = lmb_end_of_DRAM() - memstart_addr;
diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
index 0d957a4c70fe..5a783d8e8e8e 100644
--- a/arch/powerpc/mm/mmap_64.c
+++ b/arch/powerpc/mm/mmap_64.c
@@ -47,7 +47,7 @@ static inline int mmap_is_legacy(void)
47 if (current->personality & ADDR_COMPAT_LAYOUT) 47 if (current->personality & ADDR_COMPAT_LAYOUT)
48 return 1; 48 return 1;
49 49
50 if (current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY) 50 if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
51 return 1; 51 return 1;
52 52
53 return sysctl_legacy_va_layout; 53 return sysctl_legacy_va_layout;
@@ -77,7 +77,7 @@ static unsigned long mmap_rnd(void)
77 77
78static inline unsigned long mmap_base(void) 78static inline unsigned long mmap_base(void)
79{ 79{
80 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur; 80 unsigned long gap = rlimit(RLIMIT_STACK);
81 81
82 if (gap < MIN_GAP) 82 if (gap < MIN_GAP)
83 gap = MIN_GAP; 83 gap = MIN_GAP;
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index be4f34c30a0b..1044a634b6d0 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -353,7 +353,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
353 read_lock(&tasklist_lock); 353 read_lock(&tasklist_lock);
354 for_each_process(p) { 354 for_each_process(p) {
355 if (p->mm) 355 if (p->mm)
356 cpu_mask_clear_cpu(cpu, mm_cpumask(p->mm)); 356 cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
357 } 357 }
358 read_unlock(&tasklist_lock); 358 read_unlock(&tasklist_lock);
359 break; 359 break;
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index e27a990af42d..d49a77503e19 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -104,6 +104,7 @@ extern void setbat(int index, unsigned long virt, phys_addr_t phys,
104 unsigned int size, int flags); 104 unsigned int size, int flags);
105 105
106extern int __map_without_bats; 106extern int __map_without_bats;
107extern int __allow_ioremap_reserved;
107extern unsigned long ioremap_base; 108extern unsigned long ioremap_base;
108extern unsigned int rtas_data, rtas_size; 109extern unsigned int rtas_data, rtas_size;
109 110
@@ -125,24 +126,32 @@ extern phys_addr_t total_lowmem;
125extern phys_addr_t memstart_addr; 126extern phys_addr_t memstart_addr;
126extern phys_addr_t lowmem_end_addr; 127extern phys_addr_t lowmem_end_addr;
127 128
129#ifdef CONFIG_WII
130extern unsigned long wii_hole_start;
131extern unsigned long wii_hole_size;
132
133extern unsigned long wii_mmu_mapin_mem2(unsigned long top);
134extern void wii_memory_fixups(void);
135#endif
136
128/* ...and now those things that may be slightly different between processor 137/* ...and now those things that may be slightly different between processor
129 * architectures. -- Dan 138 * architectures. -- Dan
130 */ 139 */
131#if defined(CONFIG_8xx) 140#if defined(CONFIG_8xx)
132#define MMU_init_hw() do { } while(0) 141#define MMU_init_hw() do { } while(0)
133#define mmu_mapin_ram() (0UL) 142#define mmu_mapin_ram(top) (0UL)
134 143
135#elif defined(CONFIG_4xx) 144#elif defined(CONFIG_4xx)
136extern void MMU_init_hw(void); 145extern void MMU_init_hw(void);
137extern unsigned long mmu_mapin_ram(void); 146extern unsigned long mmu_mapin_ram(unsigned long top);
138 147
139#elif defined(CONFIG_FSL_BOOKE) 148#elif defined(CONFIG_FSL_BOOKE)
140extern void MMU_init_hw(void); 149extern void MMU_init_hw(void);
141extern unsigned long mmu_mapin_ram(void); 150extern unsigned long mmu_mapin_ram(unsigned long top);
142extern void adjust_total_lowmem(void); 151extern void adjust_total_lowmem(void);
143 152
144#elif defined(CONFIG_PPC32) 153#elif defined(CONFIG_PPC32)
145/* anything 32-bit except 4xx or 8xx */ 154/* anything 32-bit except 4xx or 8xx */
146extern void MMU_init_hw(void); 155extern void MMU_init_hw(void);
147extern unsigned long mmu_mapin_ram(void); 156extern unsigned long mmu_mapin_ram(unsigned long top);
148#endif 157#endif
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index cb96cb2e17cc..573b3bd1c45b 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -26,6 +26,7 @@
26#include <linux/vmalloc.h> 26#include <linux/vmalloc.h>
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/highmem.h> 28#include <linux/highmem.h>
29#include <linux/lmb.h>
29 30
30#include <asm/pgtable.h> 31#include <asm/pgtable.h>
31#include <asm/pgalloc.h> 32#include <asm/pgalloc.h>
@@ -191,7 +192,8 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
191 * Don't allow anybody to remap normal RAM that we're using. 192 * Don't allow anybody to remap normal RAM that we're using.
192 * mem_init() sets high_memory so only do the check after that. 193 * mem_init() sets high_memory so only do the check after that.
193 */ 194 */
194 if (mem_init_done && (p < virt_to_phys(high_memory))) { 195 if (mem_init_done && (p < virt_to_phys(high_memory)) &&
196 !(__allow_ioremap_reserved && lmb_is_region_reserved(p, size))) {
195 printk("__ioremap(): phys addr 0x%llx is RAM lr %p\n", 197 printk("__ioremap(): phys addr 0x%llx is RAM lr %p\n",
196 (unsigned long long)p, __builtin_return_address(0)); 198 (unsigned long long)p, __builtin_return_address(0));
197 return NULL; 199 return NULL;
@@ -283,18 +285,18 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
283} 285}
284 286
285/* 287/*
286 * Map in a big chunk of physical memory starting at PAGE_OFFSET. 288 * Map in a chunk of physical memory starting at start.
287 */ 289 */
288void __init mapin_ram(void) 290void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
289{ 291{
290 unsigned long v, s, f; 292 unsigned long v, s, f;
291 phys_addr_t p; 293 phys_addr_t p;
292 int ktext; 294 int ktext;
293 295
294 s = mmu_mapin_ram(); 296 s = offset;
295 v = PAGE_OFFSET + s; 297 v = PAGE_OFFSET + s;
296 p = memstart_addr + s; 298 p = memstart_addr + s;
297 for (; s < total_lowmem; s += PAGE_SIZE) { 299 for (; s < top; s += PAGE_SIZE) {
298 ktext = ((char *) v >= _stext && (char *) v < etext); 300 ktext = ((char *) v >= _stext && (char *) v < etext);
299 f = ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL; 301 f = ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL;
300 map_page(v, p, f); 302 map_page(v, p, f);
@@ -307,6 +309,30 @@ void __init mapin_ram(void)
307 } 309 }
308} 310}
309 311
312void __init mapin_ram(void)
313{
314 unsigned long s, top;
315
316#ifndef CONFIG_WII
317 top = total_lowmem;
318 s = mmu_mapin_ram(top);
319 __mapin_ram_chunk(s, top);
320#else
321 if (!wii_hole_size) {
322 s = mmu_mapin_ram(total_lowmem);
323 __mapin_ram_chunk(s, total_lowmem);
324 } else {
325 top = wii_hole_start;
326 s = mmu_mapin_ram(top);
327 __mapin_ram_chunk(s, top);
328
329 top = lmb_end_of_DRAM();
330 s = wii_mmu_mapin_mem2(top);
331 __mapin_ram_chunk(s, top);
332 }
333#endif
334}
335
310/* Scan the real Linux page tables and return a PTE pointer for 336/* Scan the real Linux page tables and return a PTE pointer for
311 * a virtual address in a context. 337 * a virtual address in a context.
312 * Returns true (1) if PTE was found, zero otherwise. The pointer to 338 * Returns true (1) if PTE was found, zero otherwise. The pointer to
@@ -356,7 +382,7 @@ static int __change_page_attr(struct page *page, pgprot_t prot)
356 return 0; 382 return 0;
357 if (!get_pteptr(&init_mm, address, &kpte, &kpmd)) 383 if (!get_pteptr(&init_mm, address, &kpte, &kpmd))
358 return -EINVAL; 384 return -EINVAL;
359 set_pte_at(&init_mm, address, kpte, mk_pte(page, prot)); 385 __set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0);
360 wmb(); 386 wmb();
361#ifdef CONFIG_PPC_STD_MMU 387#ifdef CONFIG_PPC_STD_MMU
362 flush_hash_pages(0, address, pmd_val(*kpmd), 1); 388 flush_hash_pages(0, address, pmd_val(*kpmd), 1);
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c
index 2d2a87e10154..f11c2cdcb0fe 100644
--- a/arch/powerpc/mm/ppc_mmu_32.c
+++ b/arch/powerpc/mm/ppc_mmu_32.c
@@ -72,7 +72,7 @@ unsigned long p_mapped_by_bats(phys_addr_t pa)
72 return 0; 72 return 0;
73} 73}
74 74
75unsigned long __init mmu_mapin_ram(void) 75unsigned long __init mmu_mapin_ram(unsigned long top)
76{ 76{
77 unsigned long tot, bl, done; 77 unsigned long tot, bl, done;
78 unsigned long max_size = (256<<20); 78 unsigned long max_size = (256<<20);
@@ -86,7 +86,7 @@ unsigned long __init mmu_mapin_ram(void)
86 86
87 /* Make sure we don't map a block larger than the 87 /* Make sure we don't map a block larger than the
88 smallest alignment of the physical address. */ 88 smallest alignment of the physical address. */
89 tot = total_lowmem; 89 tot = top;
90 for (bl = 128<<10; bl < max_size; bl <<= 1) { 90 for (bl = 128<<10; bl < max_size; bl <<= 1) {
91 if (bl * 2 > tot) 91 if (bl * 2 > tot)
92 break; 92 break;
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 282d9306361f..1ec06576f619 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -63,15 +63,21 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
63 if (huge) { 63 if (huge) {
64#ifdef CONFIG_HUGETLB_PAGE 64#ifdef CONFIG_HUGETLB_PAGE
65 psize = get_slice_psize(mm, addr); 65 psize = get_slice_psize(mm, addr);
66 /* Mask the address for the correct page size */
67 addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
66#else 68#else
67 BUG(); 69 BUG();
68 psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */ 70 psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
69#endif 71#endif
70 } else 72 } else {
71 psize = pte_pagesize_index(mm, addr, pte); 73 psize = pte_pagesize_index(mm, addr, pte);
74 /* Mask the address for the standard page size. If we
75 * have a 64k page kernel, but the hardware does not
76 * support 64k pages, this might be different from the
77 * hardware page size encoded in the slice table. */
78 addr &= PAGE_MASK;
79 }
72 80
73 /* Mask the address for the correct page size */
74 addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
75 81
76 /* Build full vaddr */ 82 /* Build full vaddr */
77 if (!is_kernel_addr(addr)) { 83 if (!is_kernel_addr(addr)) {