aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPavel Emelyanov <xemul@openvz.org>2007-10-19 02:40:40 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-19 14:53:43 -0400
commitba25f9dcc4ea6e30839fcab5a5516f2176d5bfed (patch)
tree3123c03b25dd5c0cd24b6ab4fc16731217838157 /kernel
parent9a2e70572e94e21e7ec4186702d045415422bda0 (diff)
Use helpers to obtain task pid in printks
The task_struct->pid member is going to be deprecated, so start using the helpers (task_pid_nr/task_pid_vnr/task_pid_nr_ns) in the kernel. The first thing to start with is the pid, printed to dmesg - in this case we may safely use task_pid_nr(). Besides, printks produce more (much more) than a half of all the explicit pid usage. [akpm@linux-foundation.org: git-drm went and changed lots of stuff] Signed-off-by: Pavel Emelyanov <xemul@openvz.org> Cc: Dave Airlie <airlied@linux.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpu.c3
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/lockdep.c22
-rw-r--r--kernel/rtmutex-debug.c15
-rw-r--r--kernel/rtmutex.c2
-rw-r--r--kernel/sched.c7
-rw-r--r--kernel/signal.c2
-rw-r--r--kernel/softlockup.c2
-rw-r--r--kernel/workqueue.c2
9 files changed, 31 insertions, 26 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index a21f71af9d81..ebf6647a2bd4 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -98,7 +98,8 @@ static inline void check_for_tasks(int cpu)
98 !cputime_eq(p->stime, cputime_zero))) 98 !cputime_eq(p->stime, cputime_zero)))
99 printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\ 99 printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\
100 (state = %ld, flags = %x) \n", 100 (state = %ld, flags = %x) \n",
101 p->comm, p->pid, cpu, p->state, p->flags); 101 p->comm, task_pid_nr(p), cpu,
102 p->state, p->flags);
102 } 103 }
103 write_unlock_irq(&tasklist_lock); 104 write_unlock_irq(&tasklist_lock);
104} 105}
diff --git a/kernel/exit.c b/kernel/exit.c
index 6838d4d77e05..7dab2defec63 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -959,7 +959,7 @@ fastcall NORET_TYPE void do_exit(long code)
959 959
960 if (unlikely(in_atomic())) 960 if (unlikely(in_atomic()))
961 printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", 961 printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
962 current->comm, current->pid, 962 current->comm, task_pid_nr(current),
963 preempt_count()); 963 preempt_count());
964 964
965 acct_update_integrals(tsk); 965 acct_update_integrals(tsk);
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index b5392ff7e6a6..55fe0c7cd95f 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -511,11 +511,11 @@ static void lockdep_print_held_locks(struct task_struct *curr)
511 int i, depth = curr->lockdep_depth; 511 int i, depth = curr->lockdep_depth;
512 512
513 if (!depth) { 513 if (!depth) {
514 printk("no locks held by %s/%d.\n", curr->comm, curr->pid); 514 printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr));
515 return; 515 return;
516 } 516 }
517 printk("%d lock%s held by %s/%d:\n", 517 printk("%d lock%s held by %s/%d:\n",
518 depth, depth > 1 ? "s" : "", curr->comm, curr->pid); 518 depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr));
519 519
520 for (i = 0; i < depth; i++) { 520 for (i = 0; i < depth; i++) {
521 printk(" #%d: ", i); 521 printk(" #%d: ", i);
@@ -904,7 +904,7 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
904 print_kernel_version(); 904 print_kernel_version();
905 printk( "-------------------------------------------------------\n"); 905 printk( "-------------------------------------------------------\n");
906 printk("%s/%d is trying to acquire lock:\n", 906 printk("%s/%d is trying to acquire lock:\n",
907 curr->comm, curr->pid); 907 curr->comm, task_pid_nr(curr));
908 print_lock(check_source); 908 print_lock(check_source);
909 printk("\nbut task is already holding lock:\n"); 909 printk("\nbut task is already holding lock:\n");
910 print_lock(check_target); 910 print_lock(check_target);
@@ -1085,7 +1085,7 @@ print_bad_irq_dependency(struct task_struct *curr,
1085 print_kernel_version(); 1085 print_kernel_version();
1086 printk( "------------------------------------------------------\n"); 1086 printk( "------------------------------------------------------\n");
1087 printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n", 1087 printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
1088 curr->comm, curr->pid, 1088 curr->comm, task_pid_nr(curr),
1089 curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT, 1089 curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
1090 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT, 1090 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
1091 curr->hardirqs_enabled, 1091 curr->hardirqs_enabled,
@@ -1237,7 +1237,7 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
1237 print_kernel_version(); 1237 print_kernel_version();
1238 printk( "---------------------------------------------\n"); 1238 printk( "---------------------------------------------\n");
1239 printk("%s/%d is trying to acquire lock:\n", 1239 printk("%s/%d is trying to acquire lock:\n",
1240 curr->comm, curr->pid); 1240 curr->comm, task_pid_nr(curr));
1241 print_lock(next); 1241 print_lock(next);
1242 printk("\nbut task is already holding lock:\n"); 1242 printk("\nbut task is already holding lock:\n");
1243 print_lock(prev); 1243 print_lock(prev);
@@ -1641,7 +1641,7 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
1641 usage_str[prev_bit], usage_str[new_bit]); 1641 usage_str[prev_bit], usage_str[new_bit]);
1642 1642
1643 printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n", 1643 printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
1644 curr->comm, curr->pid, 1644 curr->comm, task_pid_nr(curr),
1645 trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT, 1645 trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
1646 trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT, 1646 trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
1647 trace_hardirqs_enabled(curr), 1647 trace_hardirqs_enabled(curr),
@@ -1694,7 +1694,7 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
1694 print_kernel_version(); 1694 print_kernel_version();
1695 printk( "---------------------------------------------------------\n"); 1695 printk( "---------------------------------------------------------\n");
1696 printk("%s/%d just changed the state of lock:\n", 1696 printk("%s/%d just changed the state of lock:\n",
1697 curr->comm, curr->pid); 1697 curr->comm, task_pid_nr(curr));
1698 print_lock(this); 1698 print_lock(this);
1699 if (forwards) 1699 if (forwards)
1700 printk("but this lock took another, %s-irq-unsafe lock in the past:\n", irqclass); 1700 printk("but this lock took another, %s-irq-unsafe lock in the past:\n", irqclass);
@@ -2487,7 +2487,7 @@ print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
2487 printk( "[ BUG: bad unlock balance detected! ]\n"); 2487 printk( "[ BUG: bad unlock balance detected! ]\n");
2488 printk( "-------------------------------------\n"); 2488 printk( "-------------------------------------\n");
2489 printk("%s/%d is trying to release lock (", 2489 printk("%s/%d is trying to release lock (",
2490 curr->comm, curr->pid); 2490 curr->comm, task_pid_nr(curr));
2491 print_lockdep_cache(lock); 2491 print_lockdep_cache(lock);
2492 printk(") at:\n"); 2492 printk(") at:\n");
2493 print_ip_sym(ip); 2493 print_ip_sym(ip);
@@ -2737,7 +2737,7 @@ print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
2737 printk( "[ BUG: bad contention detected! ]\n"); 2737 printk( "[ BUG: bad contention detected! ]\n");
2738 printk( "---------------------------------\n"); 2738 printk( "---------------------------------\n");
2739 printk("%s/%d is trying to contend lock (", 2739 printk("%s/%d is trying to contend lock (",
2740 curr->comm, curr->pid); 2740 curr->comm, task_pid_nr(curr));
2741 print_lockdep_cache(lock); 2741 print_lockdep_cache(lock);
2742 printk(") at:\n"); 2742 printk(") at:\n");
2743 print_ip_sym(ip); 2743 print_ip_sym(ip);
@@ -3072,7 +3072,7 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
3072 printk( "[ BUG: held lock freed! ]\n"); 3072 printk( "[ BUG: held lock freed! ]\n");
3073 printk( "-------------------------\n"); 3073 printk( "-------------------------\n");
3074 printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n", 3074 printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
3075 curr->comm, curr->pid, mem_from, mem_to-1); 3075 curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
3076 print_lock(hlock); 3076 print_lock(hlock);
3077 lockdep_print_held_locks(curr); 3077 lockdep_print_held_locks(curr);
3078 3078
@@ -3125,7 +3125,7 @@ static void print_held_locks_bug(struct task_struct *curr)
3125 printk( "[ BUG: lock held at task exit time! ]\n"); 3125 printk( "[ BUG: lock held at task exit time! ]\n");
3126 printk( "-------------------------------------\n"); 3126 printk( "-------------------------------------\n");
3127 printk("%s/%d is exiting with locks still held!\n", 3127 printk("%s/%d is exiting with locks still held!\n",
3128 curr->comm, curr->pid); 3128 curr->comm, task_pid_nr(curr));
3129 lockdep_print_held_locks(curr); 3129 lockdep_print_held_locks(curr);
3130 3130
3131 printk("\nstack backtrace:\n"); 3131 printk("\nstack backtrace:\n");
diff --git a/kernel/rtmutex-debug.c b/kernel/rtmutex-debug.c
index 6b0703db152d..56d73cb8826d 100644
--- a/kernel/rtmutex-debug.c
+++ b/kernel/rtmutex-debug.c
@@ -87,7 +87,7 @@ static int rt_trace_on = 1;
87static void printk_task(struct task_struct *p) 87static void printk_task(struct task_struct *p)
88{ 88{
89 if (p) 89 if (p)
90 printk("%16s:%5d [%p, %3d]", p->comm, p->pid, p, p->prio); 90 printk("%16s:%5d [%p, %3d]", p->comm, task_pid_nr(p), p, p->prio);
91 else 91 else
92 printk("<none>"); 92 printk("<none>");
93} 93}
@@ -152,22 +152,25 @@ void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter)
152 printk( "[ BUG: circular locking deadlock detected! ]\n"); 152 printk( "[ BUG: circular locking deadlock detected! ]\n");
153 printk( "--------------------------------------------\n"); 153 printk( "--------------------------------------------\n");
154 printk("%s/%d is deadlocking current task %s/%d\n\n", 154 printk("%s/%d is deadlocking current task %s/%d\n\n",
155 task->comm, task->pid, current->comm, current->pid); 155 task->comm, task_pid_nr(task),
156 current->comm, task_pid_nr(current));
156 157
157 printk("\n1) %s/%d is trying to acquire this lock:\n", 158 printk("\n1) %s/%d is trying to acquire this lock:\n",
158 current->comm, current->pid); 159 current->comm, task_pid_nr(current));
159 printk_lock(waiter->lock, 1); 160 printk_lock(waiter->lock, 1);
160 161
161 printk("\n2) %s/%d is blocked on this lock:\n", task->comm, task->pid); 162 printk("\n2) %s/%d is blocked on this lock:\n",
163 task->comm, task_pid_nr(task));
162 printk_lock(waiter->deadlock_lock, 1); 164 printk_lock(waiter->deadlock_lock, 1);
163 165
164 debug_show_held_locks(current); 166 debug_show_held_locks(current);
165 debug_show_held_locks(task); 167 debug_show_held_locks(task);
166 168
167 printk("\n%s/%d's [blocked] stackdump:\n\n", task->comm, task->pid); 169 printk("\n%s/%d's [blocked] stackdump:\n\n",
170 task->comm, task_pid_nr(task));
168 show_stack(task, NULL); 171 show_stack(task, NULL);
169 printk("\n%s/%d's [current] stackdump:\n\n", 172 printk("\n%s/%d's [current] stackdump:\n\n",
170 current->comm, current->pid); 173 current->comm, task_pid_nr(current));
171 dump_stack(); 174 dump_stack();
172 debug_show_all_locks(); 175 debug_show_all_locks();
173 176
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index 8cd9bd2cdb34..0deef71ff8d2 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -185,7 +185,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
185 prev_max = max_lock_depth; 185 prev_max = max_lock_depth;
186 printk(KERN_WARNING "Maximum lock depth %d reached " 186 printk(KERN_WARNING "Maximum lock depth %d reached "
187 "task: %s (%d)\n", max_lock_depth, 187 "task: %s (%d)\n", max_lock_depth,
188 top_task->comm, top_task->pid); 188 top_task->comm, task_pid_nr(top_task));
189 } 189 }
190 put_task_struct(task); 190 put_task_struct(task);
191 191
diff --git a/kernel/sched.c b/kernel/sched.c
index 9d458504e3a6..a7e30462600f 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3502,7 +3502,7 @@ EXPORT_SYMBOL(sub_preempt_count);
3502static noinline void __schedule_bug(struct task_struct *prev) 3502static noinline void __schedule_bug(struct task_struct *prev)
3503{ 3503{
3504 printk(KERN_ERR "BUG: scheduling while atomic: %s/0x%08x/%d\n", 3504 printk(KERN_ERR "BUG: scheduling while atomic: %s/0x%08x/%d\n",
3505 prev->comm, preempt_count(), prev->pid); 3505 prev->comm, preempt_count(), task_pid_nr(prev));
3506 debug_show_held_locks(prev); 3506 debug_show_held_locks(prev);
3507 if (irqs_disabled()) 3507 if (irqs_disabled())
3508 print_irqtrace_events(prev); 3508 print_irqtrace_events(prev);
@@ -4865,7 +4865,8 @@ static void show_task(struct task_struct *p)
4865 free = (unsigned long)n - (unsigned long)end_of_stack(p); 4865 free = (unsigned long)n - (unsigned long)end_of_stack(p);
4866 } 4866 }
4867#endif 4867#endif
4868 printk(KERN_CONT "%5lu %5d %6d\n", free, p->pid, p->parent->pid); 4868 printk(KERN_CONT "%5lu %5d %6d\n", free,
4869 task_pid_nr(p), task_pid_nr(p->parent));
4869 4870
4870 if (state != TASK_RUNNING) 4871 if (state != TASK_RUNNING)
4871 show_stack(p, NULL); 4872 show_stack(p, NULL);
@@ -5172,7 +5173,7 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
5172 if (p->mm && printk_ratelimit()) 5173 if (p->mm && printk_ratelimit())
5173 printk(KERN_INFO "process %d (%s) no " 5174 printk(KERN_INFO "process %d (%s) no "
5174 "longer affine to cpu%d\n", 5175 "longer affine to cpu%d\n",
5175 p->pid, p->comm, dead_cpu); 5176 task_pid_nr(p), p->comm, dead_cpu);
5176 } 5177 }
5177 } while (!__migrate_task_irq(p, dead_cpu, dest_cpu)); 5178 } while (!__migrate_task_irq(p, dead_cpu, dest_cpu));
5178} 5179}
diff --git a/kernel/signal.c b/kernel/signal.c
index 08364e75bb58..12006308c7eb 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -730,7 +730,7 @@ int print_fatal_signals;
730static void print_fatal_signal(struct pt_regs *regs, int signr) 730static void print_fatal_signal(struct pt_regs *regs, int signr)
731{ 731{
732 printk("%s/%d: potentially unexpected fatal signal %d.\n", 732 printk("%s/%d: potentially unexpected fatal signal %d.\n",
733 current->comm, current->pid, signr); 733 current->comm, task_pid_nr(current), signr);
734 734
735#ifdef __i386__ 735#ifdef __i386__
736 printk("code at %08lx: ", regs->eip); 736 printk("code at %08lx: ", regs->eip);
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index edeeef3a6a32..11df812263c8 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -113,7 +113,7 @@ void softlockup_tick(void)
113 spin_lock(&print_lock); 113 spin_lock(&print_lock);
114 printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n", 114 printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n",
115 this_cpu, now - touch_timestamp, 115 this_cpu, now - touch_timestamp,
116 current->comm, current->pid); 116 current->comm, task_pid_nr(current));
117 if (regs) 117 if (regs)
118 show_regs(regs); 118 show_regs(regs);
119 else 119 else
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index d1916fea7108..52d5e7c9a8e6 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -282,7 +282,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
282 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " 282 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
283 "%s/0x%08x/%d\n", 283 "%s/0x%08x/%d\n",
284 current->comm, preempt_count(), 284 current->comm, preempt_count(),
285 current->pid); 285 task_pid_nr(current));
286 printk(KERN_ERR " last function: "); 286 printk(KERN_ERR " last function: ");
287 print_symbol("%s\n", (unsigned long)f); 287 print_symbol("%s\n", (unsigned long)f);
288 debug_show_held_locks(current); 288 debug_show_held_locks(current);