aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/trace.h1
-rw-r--r--kernel/trace/trace_hw_branches.c173
-rw-r--r--kernel/trace/trace_workqueue.c64
3 files changed, 170 insertions, 68 deletions
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 54b72781e920..b96037d970df 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -438,7 +438,6 @@ void trace_function(struct trace_array *tr,
438 438
439void trace_graph_return(struct ftrace_graph_ret *trace); 439void trace_graph_return(struct ftrace_graph_ret *trace);
440int trace_graph_entry(struct ftrace_graph_ent *trace); 440int trace_graph_entry(struct ftrace_graph_ent *trace);
441void trace_hw_branch(struct trace_array *tr, u64 from, u64 to);
442 441
443void tracing_start_cmdline_record(void); 442void tracing_start_cmdline_record(void);
444void tracing_stop_cmdline_record(void); 443void tracing_stop_cmdline_record(void);
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c
index df21c1e72b95..fff3545fc866 100644
--- a/kernel/trace/trace_hw_branches.c
+++ b/kernel/trace/trace_hw_branches.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * h/w branch tracer for x86 based on bts 2 * h/w branch tracer for x86 based on bts
3 * 3 *
4 * Copyright (C) 2008 Markus Metzger <markus.t.metzger@gmail.com> 4 * Copyright (C) 2008-2009 Intel Corporation.
5 * Markus Metzger <markus.t.metzger@gmail.com>, 2008-2009
5 * 6 *
6 */ 7 */
7 8
@@ -10,6 +11,9 @@
10#include <linux/debugfs.h> 11#include <linux/debugfs.h>
11#include <linux/ftrace.h> 12#include <linux/ftrace.h>
12#include <linux/kallsyms.h> 13#include <linux/kallsyms.h>
14#include <linux/mutex.h>
15#include <linux/cpu.h>
16#include <linux/smp.h>
13 17
14#include <asm/ds.h> 18#include <asm/ds.h>
15 19
@@ -19,13 +23,32 @@
19 23
20#define SIZEOF_BTS (1 << 13) 24#define SIZEOF_BTS (1 << 13)
21 25
26/* The tracer mutex protects the below per-cpu tracer array.
27 It needs to be held to:
28 - start tracing on all cpus
29 - stop tracing on all cpus
30 - start tracing on a single hotplug cpu
31 - stop tracing on a single hotplug cpu
32 - read the trace from all cpus
33 - read the trace from a single cpu
34*/
35static DEFINE_MUTEX(bts_tracer_mutex);
22static DEFINE_PER_CPU(struct bts_tracer *, tracer); 36static DEFINE_PER_CPU(struct bts_tracer *, tracer);
23static DEFINE_PER_CPU(unsigned char[SIZEOF_BTS], buffer); 37static DEFINE_PER_CPU(unsigned char[SIZEOF_BTS], buffer);
24 38
25#define this_tracer per_cpu(tracer, smp_processor_id()) 39#define this_tracer per_cpu(tracer, smp_processor_id())
26#define this_buffer per_cpu(buffer, smp_processor_id()) 40#define this_buffer per_cpu(buffer, smp_processor_id())
27 41
42static int __read_mostly trace_hw_branches_enabled;
43static struct trace_array *hw_branch_trace __read_mostly;
28 44
45
46/*
47 * Start tracing on the current cpu.
48 * The argument is ignored.
49 *
50 * pre: bts_tracer_mutex must be locked.
51 */
29static void bts_trace_start_cpu(void *arg) 52static void bts_trace_start_cpu(void *arg)
30{ 53{
31 if (this_tracer) 54 if (this_tracer)
@@ -43,14 +66,20 @@ static void bts_trace_start_cpu(void *arg)
43 66
44static void bts_trace_start(struct trace_array *tr) 67static void bts_trace_start(struct trace_array *tr)
45{ 68{
46 int cpu; 69 mutex_lock(&bts_tracer_mutex);
47 70
48 tracing_reset_online_cpus(tr); 71 on_each_cpu(bts_trace_start_cpu, NULL, 1);
72 trace_hw_branches_enabled = 1;
49 73
50 for_each_cpu(cpu, cpu_possible_mask) 74 mutex_unlock(&bts_tracer_mutex);
51 smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1);
52} 75}
53 76
77/*
78 * Start tracing on the current cpu.
79 * The argument is ignored.
80 *
81 * pre: bts_tracer_mutex must be locked.
82 */
54static void bts_trace_stop_cpu(void *arg) 83static void bts_trace_stop_cpu(void *arg)
55{ 84{
56 if (this_tracer) { 85 if (this_tracer) {
@@ -61,26 +90,63 @@ static void bts_trace_stop_cpu(void *arg)
61 90
62static void bts_trace_stop(struct trace_array *tr) 91static void bts_trace_stop(struct trace_array *tr)
63{ 92{
64 int cpu; 93 mutex_lock(&bts_tracer_mutex);
94
95 trace_hw_branches_enabled = 0;
96 on_each_cpu(bts_trace_stop_cpu, NULL, 1);
65 97
66 for_each_cpu(cpu, cpu_possible_mask) 98 mutex_unlock(&bts_tracer_mutex);
99}
100
101static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb,
102 unsigned long action, void *hcpu)
103{
104 unsigned int cpu = (unsigned long)hcpu;
105
106 mutex_lock(&bts_tracer_mutex);
107
108 if (!trace_hw_branches_enabled)
109 goto out;
110
111 switch (action) {
112 case CPU_ONLINE:
113 case CPU_DOWN_FAILED:
114 smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1);
115 break;
116 case CPU_DOWN_PREPARE:
67 smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1); 117 smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1);
118 break;
119 }
120
121 out:
122 mutex_unlock(&bts_tracer_mutex);
123 return NOTIFY_DONE;
68} 124}
69 125
126static struct notifier_block bts_hotcpu_notifier __cpuinitdata = {
127 .notifier_call = bts_hotcpu_handler
128};
129
70static int bts_trace_init(struct trace_array *tr) 130static int bts_trace_init(struct trace_array *tr)
71{ 131{
132 hw_branch_trace = tr;
133
134 register_hotcpu_notifier(&bts_hotcpu_notifier);
72 tracing_reset_online_cpus(tr); 135 tracing_reset_online_cpus(tr);
73 bts_trace_start(tr); 136 bts_trace_start(tr);
74 137
75 return 0; 138 return 0;
76} 139}
77 140
141static void bts_trace_reset(struct trace_array *tr)
142{
143 bts_trace_stop(tr);
144 unregister_hotcpu_notifier(&bts_hotcpu_notifier);
145}
146
78static void bts_trace_print_header(struct seq_file *m) 147static void bts_trace_print_header(struct seq_file *m)
79{ 148{
80 seq_puts(m, 149 seq_puts(m, "# CPU# TO <- FROM\n");
81 "# CPU# FROM TO FUNCTION\n");
82 seq_puts(m,
83 "# | | | |\n");
84} 150}
85 151
86static enum print_line_t bts_trace_print_line(struct trace_iterator *iter) 152static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
@@ -88,15 +154,15 @@ static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
88 struct trace_entry *entry = iter->ent; 154 struct trace_entry *entry = iter->ent;
89 struct trace_seq *seq = &iter->seq; 155 struct trace_seq *seq = &iter->seq;
90 struct hw_branch_entry *it; 156 struct hw_branch_entry *it;
157 unsigned long symflags = TRACE_ITER_SYM_OFFSET;
91 158
92 trace_assign_type(it, entry); 159 trace_assign_type(it, entry);
93 160
94 if (entry->type == TRACE_HW_BRANCHES) { 161 if (entry->type == TRACE_HW_BRANCHES) {
95 if (trace_seq_printf(seq, "%4d ", entry->cpu) && 162 if (trace_seq_printf(seq, "%4d ", entry->cpu) &&
96 trace_seq_printf(seq, "0x%016llx -> 0x%016llx ", 163 seq_print_ip_sym(seq, it->to, symflags) &&
97 it->from, it->to) && 164 trace_seq_printf(seq, "\t <- ") &&
98 (!it->from || 165 seq_print_ip_sym(seq, it->from, symflags) &&
99 seq_print_ip_sym(seq, it->from, /* sym_flags = */ 0)) &&
100 trace_seq_printf(seq, "\n")) 166 trace_seq_printf(seq, "\n"))
101 return TRACE_TYPE_HANDLED; 167 return TRACE_TYPE_HANDLED;
102 return TRACE_TYPE_PARTIAL_LINE;; 168 return TRACE_TYPE_PARTIAL_LINE;;
@@ -104,26 +170,42 @@ static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
104 return TRACE_TYPE_UNHANDLED; 170 return TRACE_TYPE_UNHANDLED;
105} 171}
106 172
107void trace_hw_branch(struct trace_array *tr, u64 from, u64 to) 173void trace_hw_branch(u64 from, u64 to)
108{ 174{
175 struct trace_array *tr = hw_branch_trace;
109 struct ring_buffer_event *event; 176 struct ring_buffer_event *event;
110 struct hw_branch_entry *entry; 177 struct hw_branch_entry *entry;
111 unsigned long irq; 178 unsigned long irq1, irq2;
179 int cpu;
112 180
113 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), &irq); 181 if (unlikely(!tr))
114 if (!event)
115 return; 182 return;
183
184 if (unlikely(!trace_hw_branches_enabled))
185 return;
186
187 local_irq_save(irq1);
188 cpu = raw_smp_processor_id();
189 if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
190 goto out;
191
192 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), &irq2);
193 if (!event)
194 goto out;
116 entry = ring_buffer_event_data(event); 195 entry = ring_buffer_event_data(event);
117 tracing_generic_entry_update(&entry->ent, 0, from); 196 tracing_generic_entry_update(&entry->ent, 0, from);
118 entry->ent.type = TRACE_HW_BRANCHES; 197 entry->ent.type = TRACE_HW_BRANCHES;
119 entry->ent.cpu = smp_processor_id(); 198 entry->ent.cpu = cpu;
120 entry->from = from; 199 entry->from = from;
121 entry->to = to; 200 entry->to = to;
122 ring_buffer_unlock_commit(tr->buffer, event, irq); 201 ring_buffer_unlock_commit(tr->buffer, event, irq2);
202
203 out:
204 atomic_dec(&tr->data[cpu]->disabled);
205 local_irq_restore(irq1);
123} 206}
124 207
125static void trace_bts_at(struct trace_array *tr, 208static void trace_bts_at(const struct bts_trace *trace, void *at)
126 const struct bts_trace *trace, void *at)
127{ 209{
128 struct bts_struct bts; 210 struct bts_struct bts;
129 int err = 0; 211 int err = 0;
@@ -138,18 +220,29 @@ static void trace_bts_at(struct trace_array *tr,
138 220
139 switch (bts.qualifier) { 221 switch (bts.qualifier) {
140 case BTS_BRANCH: 222 case BTS_BRANCH:
141 trace_hw_branch(tr, bts.variant.lbr.from, bts.variant.lbr.to); 223 trace_hw_branch(bts.variant.lbr.from, bts.variant.lbr.to);
142 break; 224 break;
143 } 225 }
144} 226}
145 227
228/*
229 * Collect the trace on the current cpu and write it into the ftrace buffer.
230 *
231 * pre: bts_tracer_mutex must be locked
232 */
146static void trace_bts_cpu(void *arg) 233static void trace_bts_cpu(void *arg)
147{ 234{
148 struct trace_array *tr = (struct trace_array *) arg; 235 struct trace_array *tr = (struct trace_array *) arg;
149 const struct bts_trace *trace; 236 const struct bts_trace *trace;
150 unsigned char *at; 237 unsigned char *at;
151 238
152 if (!this_tracer) 239 if (unlikely(!tr))
240 return;
241
242 if (unlikely(atomic_read(&tr->data[raw_smp_processor_id()]->disabled)))
243 return;
244
245 if (unlikely(!this_tracer))
153 return; 246 return;
154 247
155 ds_suspend_bts(this_tracer); 248 ds_suspend_bts(this_tracer);
@@ -159,11 +252,11 @@ static void trace_bts_cpu(void *arg)
159 252
160 for (at = trace->ds.top; (void *)at < trace->ds.end; 253 for (at = trace->ds.top; (void *)at < trace->ds.end;
161 at += trace->ds.size) 254 at += trace->ds.size)
162 trace_bts_at(tr, trace, at); 255 trace_bts_at(trace, at);
163 256
164 for (at = trace->ds.begin; (void *)at < trace->ds.top; 257 for (at = trace->ds.begin; (void *)at < trace->ds.top;
165 at += trace->ds.size) 258 at += trace->ds.size)
166 trace_bts_at(tr, trace, at); 259 trace_bts_at(trace, at);
167 260
168out: 261out:
169 ds_resume_bts(this_tracer); 262 ds_resume_bts(this_tracer);
@@ -171,22 +264,38 @@ out:
171 264
172static void trace_bts_prepare(struct trace_iterator *iter) 265static void trace_bts_prepare(struct trace_iterator *iter)
173{ 266{
174 int cpu; 267 mutex_lock(&bts_tracer_mutex);
268
269 on_each_cpu(trace_bts_cpu, iter->tr, 1);
270
271 mutex_unlock(&bts_tracer_mutex);
272}
273
274static void trace_bts_close(struct trace_iterator *iter)
275{
276 tracing_reset_online_cpus(iter->tr);
277}
278
279void trace_hw_branch_oops(void)
280{
281 mutex_lock(&bts_tracer_mutex);
282
283 trace_bts_cpu(hw_branch_trace);
175 284
176 for_each_cpu(cpu, cpu_possible_mask) 285 mutex_unlock(&bts_tracer_mutex);
177 smp_call_function_single(cpu, trace_bts_cpu, iter->tr, 1);
178} 286}
179 287
180struct tracer bts_tracer __read_mostly = 288struct tracer bts_tracer __read_mostly =
181{ 289{
182 .name = "hw-branch-tracer", 290 .name = "hw-branch-tracer",
183 .init = bts_trace_init, 291 .init = bts_trace_init,
184 .reset = bts_trace_stop, 292 .reset = bts_trace_reset,
185 .print_header = bts_trace_print_header, 293 .print_header = bts_trace_print_header,
186 .print_line = bts_trace_print_line, 294 .print_line = bts_trace_print_line,
187 .start = bts_trace_start, 295 .start = bts_trace_start,
188 .stop = bts_trace_stop, 296 .stop = bts_trace_stop,
189 .open = trace_bts_prepare 297 .open = trace_bts_prepare,
298 .close = trace_bts_close
190}; 299};
191 300
192__init static int init_bts_trace(void) 301__init static int init_bts_trace(void)
diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
index f8118d39ca9b..4664990fe9c5 100644
--- a/kernel/trace/trace_workqueue.c
+++ b/kernel/trace/trace_workqueue.c
@@ -8,6 +8,7 @@
8 8
9#include <trace/workqueue.h> 9#include <trace/workqueue.h>
10#include <linux/list.h> 10#include <linux/list.h>
11#include <linux/percpu.h>
11#include "trace_stat.h" 12#include "trace_stat.h"
12#include "trace.h" 13#include "trace.h"
13 14
@@ -37,7 +38,8 @@ struct workqueue_global_stats {
37/* Don't need a global lock because allocated before the workqueues, and 38/* Don't need a global lock because allocated before the workqueues, and
38 * never freed. 39 * never freed.
39 */ 40 */
40static struct workqueue_global_stats *all_workqueue_stat; 41static DEFINE_PER_CPU(struct workqueue_global_stats, all_workqueue_stat);
42#define workqueue_cpu_stat(cpu) (&per_cpu(all_workqueue_stat, cpu))
41 43
42/* Insertion of a work */ 44/* Insertion of a work */
43static void 45static void
@@ -48,8 +50,8 @@ probe_workqueue_insertion(struct task_struct *wq_thread,
48 struct cpu_workqueue_stats *node, *next; 50 struct cpu_workqueue_stats *node, *next;
49 unsigned long flags; 51 unsigned long flags;
50 52
51 spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); 53 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
52 list_for_each_entry_safe(node, next, &all_workqueue_stat[cpu].list, 54 list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list,
53 list) { 55 list) {
54 if (node->pid == wq_thread->pid) { 56 if (node->pid == wq_thread->pid) {
55 atomic_inc(&node->inserted); 57 atomic_inc(&node->inserted);
@@ -58,7 +60,7 @@ probe_workqueue_insertion(struct task_struct *wq_thread,
58 } 60 }
59 pr_debug("trace_workqueue: entry not found\n"); 61 pr_debug("trace_workqueue: entry not found\n");
60found: 62found:
61 spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); 63 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
62} 64}
63 65
64/* Execution of a work */ 66/* Execution of a work */
@@ -70,8 +72,8 @@ probe_workqueue_execution(struct task_struct *wq_thread,
70 struct cpu_workqueue_stats *node, *next; 72 struct cpu_workqueue_stats *node, *next;
71 unsigned long flags; 73 unsigned long flags;
72 74
73 spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); 75 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
74 list_for_each_entry_safe(node, next, &all_workqueue_stat[cpu].list, 76 list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list,
75 list) { 77 list) {
76 if (node->pid == wq_thread->pid) { 78 if (node->pid == wq_thread->pid) {
77 node->executed++; 79 node->executed++;
@@ -80,7 +82,7 @@ probe_workqueue_execution(struct task_struct *wq_thread,
80 } 82 }
81 pr_debug("trace_workqueue: entry not found\n"); 83 pr_debug("trace_workqueue: entry not found\n");
82found: 84found:
83 spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); 85 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
84} 86}
85 87
86/* Creation of a cpu workqueue thread */ 88/* Creation of a cpu workqueue thread */
@@ -104,11 +106,11 @@ static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu)
104 106
105 cws->pid = wq_thread->pid; 107 cws->pid = wq_thread->pid;
106 108
107 spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); 109 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
108 if (list_empty(&all_workqueue_stat[cpu].list)) 110 if (list_empty(&workqueue_cpu_stat(cpu)->list))
109 cws->first_entry = true; 111 cws->first_entry = true;
110 list_add_tail(&cws->list, &all_workqueue_stat[cpu].list); 112 list_add_tail(&cws->list, &workqueue_cpu_stat(cpu)->list);
111 spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); 113 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
112} 114}
113 115
114/* Destruction of a cpu workqueue thread */ 116/* Destruction of a cpu workqueue thread */
@@ -119,8 +121,8 @@ static void probe_workqueue_destruction(struct task_struct *wq_thread)
119 struct cpu_workqueue_stats *node, *next; 121 struct cpu_workqueue_stats *node, *next;
120 unsigned long flags; 122 unsigned long flags;
121 123
122 spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); 124 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
123 list_for_each_entry_safe(node, next, &all_workqueue_stat[cpu].list, 125 list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list,
124 list) { 126 list) {
125 if (node->pid == wq_thread->pid) { 127 if (node->pid == wq_thread->pid) {
126 list_del(&node->list); 128 list_del(&node->list);
@@ -131,7 +133,7 @@ static void probe_workqueue_destruction(struct task_struct *wq_thread)
131 133
132 pr_debug("trace_workqueue: don't find workqueue to destroy\n"); 134 pr_debug("trace_workqueue: don't find workqueue to destroy\n");
133found: 135found:
134 spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); 136 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
135 137
136} 138}
137 139
@@ -141,13 +143,13 @@ static struct cpu_workqueue_stats *workqueue_stat_start_cpu(int cpu)
141 struct cpu_workqueue_stats *ret = NULL; 143 struct cpu_workqueue_stats *ret = NULL;
142 144
143 145
144 spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); 146 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
145 147
146 if (!list_empty(&all_workqueue_stat[cpu].list)) 148 if (!list_empty(&workqueue_cpu_stat(cpu)->list))
147 ret = list_entry(all_workqueue_stat[cpu].list.next, 149 ret = list_entry(workqueue_cpu_stat(cpu)->list.next,
148 struct cpu_workqueue_stats, list); 150 struct cpu_workqueue_stats, list);
149 151
150 spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); 152 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
151 153
152 return ret; 154 return ret;
153} 155}
@@ -172,9 +174,9 @@ static void *workqueue_stat_next(void *prev, int idx)
172 unsigned long flags; 174 unsigned long flags;
173 void *ret = NULL; 175 void *ret = NULL;
174 176
175 spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); 177 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
176 if (list_is_last(&prev_cws->list, &all_workqueue_stat[cpu].list)) { 178 if (list_is_last(&prev_cws->list, &workqueue_cpu_stat(cpu)->list)) {
177 spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); 179 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
178 for (++cpu ; cpu < num_possible_cpus(); cpu++) { 180 for (++cpu ; cpu < num_possible_cpus(); cpu++) {
179 ret = workqueue_stat_start_cpu(cpu); 181 ret = workqueue_stat_start_cpu(cpu);
180 if (ret) 182 if (ret)
@@ -182,7 +184,7 @@ static void *workqueue_stat_next(void *prev, int idx)
182 } 184 }
183 return NULL; 185 return NULL;
184 } 186 }
185 spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); 187 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
186 188
187 return list_entry(prev_cws->list.next, struct cpu_workqueue_stats, 189 return list_entry(prev_cws->list.next, struct cpu_workqueue_stats,
188 list); 190 list);
@@ -199,10 +201,10 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
199 cws->executed, 201 cws->executed,
200 trace_find_cmdline(cws->pid)); 202 trace_find_cmdline(cws->pid));
201 203
202 spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); 204 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
203 if (&cws->list == all_workqueue_stat[cpu].list.next) 205 if (&cws->list == workqueue_cpu_stat(cpu)->list.next)
204 seq_printf(s, "\n"); 206 seq_printf(s, "\n");
205 spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); 207 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
206 208
207 return 0; 209 return 0;
208} 210}
@@ -258,17 +260,9 @@ int __init trace_workqueue_early_init(void)
258 if (ret) 260 if (ret)
259 goto no_creation; 261 goto no_creation;
260 262
261 all_workqueue_stat = kmalloc(sizeof(struct workqueue_global_stats)
262 * num_possible_cpus(), GFP_KERNEL);
263
264 if (!all_workqueue_stat) {
265 pr_warning("trace_workqueue: not enough memory\n");
266 goto no_creation;
267 }
268
269 for_each_possible_cpu(cpu) { 263 for_each_possible_cpu(cpu) {
270 spin_lock_init(&all_workqueue_stat[cpu].lock); 264 spin_lock_init(&workqueue_cpu_stat(cpu)->lock);
271 INIT_LIST_HEAD(&all_workqueue_stat[cpu].list); 265 INIT_LIST_HEAD(&workqueue_cpu_stat(cpu)->list);
272 } 266 }
273 267
274 return 0; 268 return 0;