aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorMichael Ellerman <michael@ellerman.id.au>2009-06-17 14:13:54 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-07-07 23:50:22 -0400
commita1ac38ab98e8a79ce225347b725f3b9751c70f1e (patch)
tree0213205a85e173d5228f2e6de5e629de69f92afd /arch/powerpc
parentb69e9e931d2c6116921fdb260684e0546285c3c8 (diff)
powerpc: Use pr_devel() in arch/powerpc/mm/mmu_context_nohash.c
pr_debug() can now result in code being generated even when DEBUG is not defined. That's not really desirable in some places. With CONFIG_DYNAMIC_DEBUG=y: size before: text data bss dec hex filename 1508 48 28 1584 630 powerpc/mm/mmu_context_nohash.o size after: text data bss dec hex filename 1088 0 28 1116 45c powerpc/mm/mmu_context_nohash.o Signed-off-by: Michael Ellerman <michael@ellerman.id.au> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 8343986809c0..92a197117d5b 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -89,7 +89,7 @@ static unsigned int steal_context_smp(unsigned int id)
89 id = first_context; 89 id = first_context;
90 continue; 90 continue;
91 } 91 }
92 pr_debug("[%d] steal context %d from mm @%p\n", 92 pr_devel("[%d] steal context %d from mm @%p\n",
93 smp_processor_id(), id, mm); 93 smp_processor_id(), id, mm);
94 94
95 /* Mark this mm has having no context anymore */ 95 /* Mark this mm has having no context anymore */
@@ -126,7 +126,7 @@ static unsigned int steal_context_up(unsigned int id)
126 /* Pick up the victim mm */ 126 /* Pick up the victim mm */
127 mm = context_mm[id]; 127 mm = context_mm[id];
128 128
129 pr_debug("[%d] steal context %d from mm @%p\n", cpu, id, mm); 129 pr_devel("[%d] steal context %d from mm @%p\n", cpu, id, mm);
130 130
131 /* Flush the TLB for that context */ 131 /* Flush the TLB for that context */
132 local_flush_tlb_mm(mm); 132 local_flush_tlb_mm(mm);
@@ -180,7 +180,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
180 spin_lock(&context_lock); 180 spin_lock(&context_lock);
181 181
182#ifndef DEBUG_STEAL_ONLY 182#ifndef DEBUG_STEAL_ONLY
183 pr_debug("[%d] activating context for mm @%p, active=%d, id=%d\n", 183 pr_devel("[%d] activating context for mm @%p, active=%d, id=%d\n",
184 cpu, next, next->context.active, next->context.id); 184 cpu, next, next->context.active, next->context.id);
185#endif 185#endif
186 186
@@ -189,7 +189,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
189 next->context.active++; 189 next->context.active++;
190 if (prev) { 190 if (prev) {
191#ifndef DEBUG_STEAL_ONLY 191#ifndef DEBUG_STEAL_ONLY
192 pr_debug(" old context %p active was: %d\n", 192 pr_devel(" old context %p active was: %d\n",
193 prev, prev->context.active); 193 prev, prev->context.active);
194#endif 194#endif
195 WARN_ON(prev->context.active < 1); 195 WARN_ON(prev->context.active < 1);
@@ -236,7 +236,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
236 next->context.id = id; 236 next->context.id = id;
237 237
238#ifndef DEBUG_STEAL_ONLY 238#ifndef DEBUG_STEAL_ONLY
239 pr_debug("[%d] picked up new id %d, nrf is now %d\n", 239 pr_devel("[%d] picked up new id %d, nrf is now %d\n",
240 cpu, id, nr_free_contexts); 240 cpu, id, nr_free_contexts);
241#endif 241#endif
242 242
@@ -247,7 +247,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
247 * local TLB for it and unmark it before we use it 247 * local TLB for it and unmark it before we use it
248 */ 248 */
249 if (test_bit(id, stale_map[cpu])) { 249 if (test_bit(id, stale_map[cpu])) {
250 pr_debug("[%d] flushing stale context %d for mm @%p !\n", 250 pr_devel("[%d] flushing stale context %d for mm @%p !\n",
251 cpu, id, next); 251 cpu, id, next);
252 local_flush_tlb_mm(next); 252 local_flush_tlb_mm(next);
253 253
@@ -314,13 +314,13 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
314 switch (action) { 314 switch (action) {
315 case CPU_ONLINE: 315 case CPU_ONLINE:
316 case CPU_ONLINE_FROZEN: 316 case CPU_ONLINE_FROZEN:
317 pr_debug("MMU: Allocating stale context map for CPU %d\n", cpu); 317 pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu);
318 stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL); 318 stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL);
319 break; 319 break;
320#ifdef CONFIG_HOTPLUG_CPU 320#ifdef CONFIG_HOTPLUG_CPU
321 case CPU_DEAD: 321 case CPU_DEAD:
322 case CPU_DEAD_FROZEN: 322 case CPU_DEAD_FROZEN:
323 pr_debug("MMU: Freeing stale context map for CPU %d\n", cpu); 323 pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu);
324 kfree(stale_map[cpu]); 324 kfree(stale_map[cpu]);
325 stale_map[cpu] = NULL; 325 stale_map[cpu] = NULL;
326 break; 326 break;