aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/gup.c10
-rw-r--r--arch/powerpc/mm/hugetlbpage.c4
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c17
-rw-r--r--arch/powerpc/mm/pgtable.c4
-rw-r--r--arch/powerpc/mm/slb.c13
-rw-r--r--arch/powerpc/mm/tlb_hash64.c2
6 files changed, 22 insertions, 28 deletions
diff --git a/arch/powerpc/mm/gup.c b/arch/powerpc/mm/gup.c
index bc400c78c97f..bc122a120bf0 100644
--- a/arch/powerpc/mm/gup.c
+++ b/arch/powerpc/mm/gup.c
@@ -159,7 +159,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
159 int psize; 159 int psize;
160#endif 160#endif
161 161
162 pr_debug("%s(%lx,%x,%s)\n", __func__, start, nr_pages, write ? "write" : "read"); 162 pr_devel("%s(%lx,%x,%s)\n", __func__, start, nr_pages, write ? "write" : "read");
163 163
164 start &= PAGE_MASK; 164 start &= PAGE_MASK;
165 addr = start; 165 addr = start;
@@ -170,7 +170,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
170 start, len))) 170 start, len)))
171 goto slow_irqon; 171 goto slow_irqon;
172 172
173 pr_debug(" aligned: %lx .. %lx\n", start, end); 173 pr_devel(" aligned: %lx .. %lx\n", start, end);
174 174
175#ifdef CONFIG_HUGETLB_PAGE 175#ifdef CONFIG_HUGETLB_PAGE
176 /* We bail out on slice boundary crossing when hugetlb is 176 /* We bail out on slice boundary crossing when hugetlb is
@@ -234,7 +234,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
234 do { 234 do {
235 VM_BUG_ON(shift != mmu_psize_defs[get_slice_psize(mm, a)].shift); 235 VM_BUG_ON(shift != mmu_psize_defs[get_slice_psize(mm, a)].shift);
236 ptep = huge_pte_offset(mm, a); 236 ptep = huge_pte_offset(mm, a);
237 pr_debug(" %016lx: huge ptep %p\n", a, ptep); 237 pr_devel(" %016lx: huge ptep %p\n", a, ptep);
238 if (!ptep || !gup_huge_pte(ptep, hstate, &a, end, write, pages, 238 if (!ptep || !gup_huge_pte(ptep, hstate, &a, end, write, pages,
239 &nr)) 239 &nr))
240 goto slow; 240 goto slow;
@@ -249,7 +249,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
249#ifdef CONFIG_PPC64 249#ifdef CONFIG_PPC64
250 VM_BUG_ON(shift != mmu_psize_defs[get_slice_psize(mm, addr)].shift); 250 VM_BUG_ON(shift != mmu_psize_defs[get_slice_psize(mm, addr)].shift);
251#endif 251#endif
252 pr_debug(" %016lx: normal pgd %p\n", addr, 252 pr_devel(" %016lx: normal pgd %p\n", addr,
253 (void *)pgd_val(pgd)); 253 (void *)pgd_val(pgd));
254 next = pgd_addr_end(addr, end); 254 next = pgd_addr_end(addr, end);
255 if (pgd_none(pgd)) 255 if (pgd_none(pgd))
@@ -269,7 +269,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
269slow: 269slow:
270 local_irq_enable(); 270 local_irq_enable();
271slow_irqon: 271slow_irqon:
272 pr_debug(" slow path ! nr = %d\n", nr); 272 pr_devel(" slow path ! nr = %d\n", nr);
273 273
274 /* Try to get the remaining pages with get_user_pages */ 274 /* Try to get the remaining pages with get_user_pages */
275 start += nr << PAGE_SHIFT; 275 start += nr << PAGE_SHIFT;
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 9920d6a7cf29..c46ef2ffa3d9 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -305,7 +305,7 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
305 305
306 pmd = pmd_offset(pud, start); 306 pmd = pmd_offset(pud, start);
307 pud_clear(pud); 307 pud_clear(pud);
308 pmd_free_tlb(tlb, pmd); 308 pmd_free_tlb(tlb, pmd, start);
309} 309}
310 310
311static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, 311static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
@@ -348,7 +348,7 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
348 348
349 pud = pud_offset(pgd, start); 349 pud = pud_offset(pgd, start);
350 pgd_clear(pgd); 350 pgd_clear(pgd);
351 pud_free_tlb(tlb, pud); 351 pud_free_tlb(tlb, pud, start);
352} 352}
353 353
354/* 354/*
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 8343986809c0..b1a727def15b 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -89,7 +89,7 @@ static unsigned int steal_context_smp(unsigned int id)
89 id = first_context; 89 id = first_context;
90 continue; 90 continue;
91 } 91 }
92 pr_debug("[%d] steal context %d from mm @%p\n", 92 pr_devel("[%d] steal context %d from mm @%p\n",
93 smp_processor_id(), id, mm); 93 smp_processor_id(), id, mm);
94 94
95 /* Mark this mm has having no context anymore */ 95 /* Mark this mm has having no context anymore */
@@ -126,7 +126,7 @@ static unsigned int steal_context_up(unsigned int id)
126 /* Pick up the victim mm */ 126 /* Pick up the victim mm */
127 mm = context_mm[id]; 127 mm = context_mm[id];
128 128
129 pr_debug("[%d] steal context %d from mm @%p\n", cpu, id, mm); 129 pr_devel("[%d] steal context %d from mm @%p\n", cpu, id, mm);
130 130
131 /* Flush the TLB for that context */ 131 /* Flush the TLB for that context */
132 local_flush_tlb_mm(mm); 132 local_flush_tlb_mm(mm);
@@ -180,7 +180,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
180 spin_lock(&context_lock); 180 spin_lock(&context_lock);
181 181
182#ifndef DEBUG_STEAL_ONLY 182#ifndef DEBUG_STEAL_ONLY
183 pr_debug("[%d] activating context for mm @%p, active=%d, id=%d\n", 183 pr_devel("[%d] activating context for mm @%p, active=%d, id=%d\n",
184 cpu, next, next->context.active, next->context.id); 184 cpu, next, next->context.active, next->context.id);
185#endif 185#endif
186 186
@@ -189,7 +189,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
189 next->context.active++; 189 next->context.active++;
190 if (prev) { 190 if (prev) {
191#ifndef DEBUG_STEAL_ONLY 191#ifndef DEBUG_STEAL_ONLY
192 pr_debug(" old context %p active was: %d\n", 192 pr_devel(" old context %p active was: %d\n",
193 prev, prev->context.active); 193 prev, prev->context.active);
194#endif 194#endif
195 WARN_ON(prev->context.active < 1); 195 WARN_ON(prev->context.active < 1);
@@ -217,6 +217,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
217 id = steal_context_smp(id); 217 id = steal_context_smp(id);
218 if (id == MMU_NO_CONTEXT) 218 if (id == MMU_NO_CONTEXT)
219 goto again; 219 goto again;
220 goto stolen;
220 } 221 }
221#endif /* CONFIG_SMP */ 222#endif /* CONFIG_SMP */
222 id = steal_context_up(id); 223 id = steal_context_up(id);
@@ -236,7 +237,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
236 next->context.id = id; 237 next->context.id = id;
237 238
238#ifndef DEBUG_STEAL_ONLY 239#ifndef DEBUG_STEAL_ONLY
239 pr_debug("[%d] picked up new id %d, nrf is now %d\n", 240 pr_devel("[%d] picked up new id %d, nrf is now %d\n",
240 cpu, id, nr_free_contexts); 241 cpu, id, nr_free_contexts);
241#endif 242#endif
242 243
@@ -247,7 +248,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
247 * local TLB for it and unmark it before we use it 248 * local TLB for it and unmark it before we use it
248 */ 249 */
249 if (test_bit(id, stale_map[cpu])) { 250 if (test_bit(id, stale_map[cpu])) {
250 pr_debug("[%d] flushing stale context %d for mm @%p !\n", 251 pr_devel("[%d] flushing stale context %d for mm @%p !\n",
251 cpu, id, next); 252 cpu, id, next);
252 local_flush_tlb_mm(next); 253 local_flush_tlb_mm(next);
253 254
@@ -314,13 +315,13 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
314 switch (action) { 315 switch (action) {
315 case CPU_ONLINE: 316 case CPU_ONLINE:
316 case CPU_ONLINE_FROZEN: 317 case CPU_ONLINE_FROZEN:
317 pr_debug("MMU: Allocating stale context map for CPU %d\n", cpu); 318 pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu);
318 stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL); 319 stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL);
319 break; 320 break;
320#ifdef CONFIG_HOTPLUG_CPU 321#ifdef CONFIG_HOTPLUG_CPU
321 case CPU_DEAD: 322 case CPU_DEAD:
322 case CPU_DEAD_FROZEN: 323 case CPU_DEAD_FROZEN:
323 pr_debug("MMU: Freeing stale context map for CPU %d\n", cpu); 324 pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu);
324 kfree(stale_map[cpu]); 325 kfree(stale_map[cpu]);
325 stale_map[cpu] = NULL; 326 stale_map[cpu] = NULL;
326 break; 327 break;
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index ae1d67cc090c..627767d6169b 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -129,12 +129,12 @@ static pte_t do_dcache_icache_coherency(pte_t pte)
129 page = pfn_to_page(pfn); 129 page = pfn_to_page(pfn);
130 130
131 if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)) { 131 if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)) {
132 pr_debug("do_dcache_icache_coherency... flushing\n"); 132 pr_devel("do_dcache_icache_coherency... flushing\n");
133 flush_dcache_icache_page(page); 133 flush_dcache_icache_page(page);
134 set_bit(PG_arch_1, &page->flags); 134 set_bit(PG_arch_1, &page->flags);
135 } 135 }
136 else 136 else
137 pr_debug("do_dcache_icache_coherency... already clean\n"); 137 pr_devel("do_dcache_icache_coherency... already clean\n");
138 return __pte(pte_val(pte) | _PAGE_HWEXEC); 138 return __pte(pte_val(pte) | _PAGE_HWEXEC);
139} 139}
140 140
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 3b52c80e5e33..5b7038f248b6 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -14,8 +14,6 @@
14 * 2 of the License, or (at your option) any later version. 14 * 2 of the License, or (at your option) any later version.
15 */ 15 */
16 16
17#undef DEBUG
18
19#include <asm/pgtable.h> 17#include <asm/pgtable.h>
20#include <asm/mmu.h> 18#include <asm/mmu.h>
21#include <asm/mmu_context.h> 19#include <asm/mmu_context.h>
@@ -27,11 +25,6 @@
27#include <linux/compiler.h> 25#include <linux/compiler.h>
28#include <asm/udbg.h> 26#include <asm/udbg.h>
29 27
30#ifdef DEBUG
31#define DBG(fmt...) printk(fmt)
32#else
33#define DBG pr_debug
34#endif
35 28
36extern void slb_allocate_realmode(unsigned long ea); 29extern void slb_allocate_realmode(unsigned long ea);
37extern void slb_allocate_user(unsigned long ea); 30extern void slb_allocate_user(unsigned long ea);
@@ -285,13 +278,13 @@ void slb_initialize(void)
285 patch_slb_encoding(slb_compare_rr_to_size, 278 patch_slb_encoding(slb_compare_rr_to_size,
286 mmu_slb_size); 279 mmu_slb_size);
287 280
288 DBG("SLB: linear LLP = %04lx\n", linear_llp); 281 pr_devel("SLB: linear LLP = %04lx\n", linear_llp);
289 DBG("SLB: io LLP = %04lx\n", io_llp); 282 pr_devel("SLB: io LLP = %04lx\n", io_llp);
290 283
291#ifdef CONFIG_SPARSEMEM_VMEMMAP 284#ifdef CONFIG_SPARSEMEM_VMEMMAP
292 patch_slb_encoding(slb_miss_kernel_load_vmemmap, 285 patch_slb_encoding(slb_miss_kernel_load_vmemmap,
293 SLB_VSID_KERNEL | vmemmap_llp); 286 SLB_VSID_KERNEL | vmemmap_llp);
294 DBG("SLB: vmemmap LLP = %04lx\n", vmemmap_llp); 287 pr_devel("SLB: vmemmap LLP = %04lx\n", vmemmap_llp);
295#endif 288#endif
296 } 289 }
297 290
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 1be1b5e59796..937eb90677d9 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -72,7 +72,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
72 */ 72 */
73 if (huge) { 73 if (huge) {
74#ifdef CONFIG_HUGETLB_PAGE 74#ifdef CONFIG_HUGETLB_PAGE
75 psize = get_slice_psize(mm, addr);; 75 psize = get_slice_psize(mm, addr);
76#else 76#else
77 BUG(); 77 BUG();
78 psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */ 78 psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */