aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_workqueue.c
diff options
context:
space:
mode:
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>2009-03-09 21:49:53 -0400
committerIngo Molnar <mingo@elte.hu>2009-03-10 05:20:30 -0400
commitbbcd3063597a3824357cd83c501c2a2aa21ef37b (patch)
tree4da63cbe8e3740fd9645fbbf6aeeed5d21ac6437 /kernel/trace/trace_workqueue.c
parent8293dd6f86e759068ce918aa10ca9c5d6d711cd0 (diff)
tracing: Don't assume possible cpu list have continuous numbers
"for (++cpu ; cpu < num_possible_cpus(); cpu++)" statement assumes possible cpus have continuous number - but that's a wrong assumption. Insted, cpumask_next() should be used. Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Lai Jiangshan <laijs@cn.fujitsu.com> Cc: Steven Rostedt <srostedt@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> LKML-Reference: <20090310104437.A480.A69D9226@jp.fujitsu.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace_workqueue.c')
-rw-r--r--kernel/trace/trace_workqueue.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
index 46c8dc896bd3..739fdacf873b 100644
--- a/kernel/trace/trace_workqueue.c
+++ b/kernel/trace/trace_workqueue.c
@@ -91,7 +91,7 @@ static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu)
91 struct cpu_workqueue_stats *cws; 91 struct cpu_workqueue_stats *cws;
92 unsigned long flags; 92 unsigned long flags;
93 93
94 WARN_ON(cpu < 0 || cpu >= num_possible_cpus()); 94 WARN_ON(cpu < 0);
95 95
96 /* Workqueues are sometimes created in atomic context */ 96 /* Workqueues are sometimes created in atomic context */
97 cws = kzalloc(sizeof(struct cpu_workqueue_stats), GFP_ATOMIC); 97 cws = kzalloc(sizeof(struct cpu_workqueue_stats), GFP_ATOMIC);
@@ -175,12 +175,12 @@ static void *workqueue_stat_next(void *prev, int idx)
175 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); 175 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
176 if (list_is_last(&prev_cws->list, &workqueue_cpu_stat(cpu)->list)) { 176 if (list_is_last(&prev_cws->list, &workqueue_cpu_stat(cpu)->list)) {
177 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); 177 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
178 for (++cpu ; cpu < num_possible_cpus(); cpu++) { 178 do {
179 ret = workqueue_stat_start_cpu(cpu); 179 cpu = cpumask_next(cpu, cpu_possible_mask);
180 if (ret) 180 if (cpu >= nr_cpu_ids)
181 return ret; 181 return NULL;
182 } 182 } while (!(ret = workqueue_stat_start_cpu(cpu)));
183 return NULL; 183 return ret;
184 } 184 }
185 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); 185 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
186 186