diff options
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/Kconfig | 21 | ||||
-rw-r--r-- | kernel/trace/Makefile | 1 | ||||
-rw-r--r-- | kernel/trace/kmemtrace.c | 350 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 78 | ||||
-rw-r--r-- | kernel/trace/trace.c | 73 | ||||
-rw-r--r-- | kernel/trace/trace.h | 27 | ||||
-rw-r--r-- | kernel/trace/trace_boot.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_hw_branches.c | 6 | ||||
-rw-r--r-- | kernel/trace/trace_power.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_sysprof.c | 14 |
11 files changed, 512 insertions, 64 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index e2a4ff6fc3a6..1c0b7504cab3 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -264,6 +264,27 @@ config HW_BRANCH_TRACER | |||
264 | This tracer records all branches on the system in a circular | 264 | This tracer records all branches on the system in a circular |
265 | buffer giving access to the last N branches for each cpu. | 265 | buffer giving access to the last N branches for each cpu. |
266 | 266 | ||
267 | config KMEMTRACE | ||
268 | bool "Trace SLAB allocations" | ||
269 | select TRACING | ||
270 | help | ||
271 | kmemtrace provides tracing for slab allocator functions, such as | ||
272 | kmalloc, kfree, kmem_cache_alloc, kmem_cache_free etc.. Collected | ||
273 | data is then fed to the userspace application in order to analyse | ||
274 | allocation hotspots, internal fragmentation and so on, making it | ||
275 | possible to see how well an allocator performs, as well as debug | ||
276 | and profile kernel code. | ||
277 | |||
278 | This requires an userspace application to use. See | ||
279 | Documentation/vm/kmemtrace.txt for more information. | ||
280 | |||
281 | Saying Y will make the kernel somewhat larger and slower. However, | ||
282 | if you disable kmemtrace at run-time or boot-time, the performance | ||
283 | impact is minimal (depending on the arch the kernel is built for). | ||
284 | |||
285 | If unsure, say N. | ||
286 | |||
287 | |||
267 | config DYNAMIC_FTRACE | 288 | config DYNAMIC_FTRACE |
268 | bool "enable/disable ftrace tracepoints dynamically" | 289 | bool "enable/disable ftrace tracepoints dynamically" |
269 | depends on FUNCTION_TRACER | 290 | depends on FUNCTION_TRACER |
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 31cd5fbc0eed..05c9182061de 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
@@ -35,5 +35,6 @@ obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o | |||
35 | obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o | 35 | obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o |
36 | obj-$(CONFIG_HW_BRANCH_TRACER) += trace_hw_branches.o | 36 | obj-$(CONFIG_HW_BRANCH_TRACER) += trace_hw_branches.o |
37 | obj-$(CONFIG_POWER_TRACER) += trace_power.o | 37 | obj-$(CONFIG_POWER_TRACER) += trace_power.o |
38 | obj-$(CONFIG_KMEMTRACE) += kmemtrace.o | ||
38 | 39 | ||
39 | libftrace-y := ftrace.o | 40 | libftrace-y := ftrace.o |
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c new file mode 100644 index 000000000000..faaa5ae7e75a --- /dev/null +++ b/kernel/trace/kmemtrace.c | |||
@@ -0,0 +1,350 @@ | |||
1 | /* | ||
2 | * Memory allocator tracing | ||
3 | * | ||
4 | * Copyright (C) 2008 Eduard - Gabriel Munteanu | ||
5 | * Copyright (C) 2008 Pekka Enberg <penberg@cs.helsinki.fi> | ||
6 | * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com> | ||
7 | */ | ||
8 | |||
9 | #include <linux/dcache.h> | ||
10 | #include <linux/debugfs.h> | ||
11 | #include <linux/fs.h> | ||
12 | #include <linux/seq_file.h> | ||
13 | #include <trace/kmemtrace.h> | ||
14 | |||
15 | #include "trace.h" | ||
16 | #include "trace_output.h" | ||
17 | |||
18 | /* Select an alternative, minimalistic output than the original one */ | ||
19 | #define TRACE_KMEM_OPT_MINIMAL 0x1 | ||
20 | |||
21 | static struct tracer_opt kmem_opts[] = { | ||
22 | /* Default disable the minimalistic output */ | ||
23 | { TRACER_OPT(kmem_minimalistic, TRACE_KMEM_OPT_MINIMAL) }, | ||
24 | { } | ||
25 | }; | ||
26 | |||
27 | static struct tracer_flags kmem_tracer_flags = { | ||
28 | .val = 0, | ||
29 | .opts = kmem_opts | ||
30 | }; | ||
31 | |||
32 | |||
33 | static bool kmem_tracing_enabled __read_mostly; | ||
34 | static struct trace_array *kmemtrace_array; | ||
35 | |||
36 | static int kmem_trace_init(struct trace_array *tr) | ||
37 | { | ||
38 | int cpu; | ||
39 | kmemtrace_array = tr; | ||
40 | |||
41 | for_each_cpu_mask(cpu, cpu_possible_map) | ||
42 | tracing_reset(tr, cpu); | ||
43 | |||
44 | kmem_tracing_enabled = true; | ||
45 | |||
46 | return 0; | ||
47 | } | ||
48 | |||
49 | static void kmem_trace_reset(struct trace_array *tr) | ||
50 | { | ||
51 | kmem_tracing_enabled = false; | ||
52 | } | ||
53 | |||
54 | static void kmemtrace_headers(struct seq_file *s) | ||
55 | { | ||
56 | /* Don't need headers for the original kmemtrace output */ | ||
57 | if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)) | ||
58 | return; | ||
59 | |||
60 | seq_printf(s, "#\n"); | ||
61 | seq_printf(s, "# ALLOC TYPE REQ GIVEN FLAGS " | ||
62 | " POINTER NODE CALLER\n"); | ||
63 | seq_printf(s, "# FREE | | | | " | ||
64 | " | | | |\n"); | ||
65 | seq_printf(s, "# |\n\n"); | ||
66 | } | ||
67 | |||
68 | /* | ||
69 | * The two following functions give the original output from kmemtrace, | ||
70 | * or something close to....perhaps they need some missing things | ||
71 | */ | ||
72 | static enum print_line_t | ||
73 | kmemtrace_print_alloc_original(struct trace_iterator *iter, | ||
74 | struct kmemtrace_alloc_entry *entry) | ||
75 | { | ||
76 | struct trace_seq *s = &iter->seq; | ||
77 | int ret; | ||
78 | |||
79 | /* Taken from the old linux/kmemtrace.h */ | ||
80 | ret = trace_seq_printf(s, "type_id %d call_site %lu ptr %lu " | ||
81 | "bytes_req %lu bytes_alloc %lu gfp_flags %lu node %d\n", | ||
82 | entry->type_id, entry->call_site, (unsigned long) entry->ptr, | ||
83 | (unsigned long) entry->bytes_req, (unsigned long) entry->bytes_alloc, | ||
84 | (unsigned long) entry->gfp_flags, entry->node); | ||
85 | |||
86 | if (!ret) | ||
87 | return TRACE_TYPE_PARTIAL_LINE; | ||
88 | |||
89 | return TRACE_TYPE_HANDLED; | ||
90 | } | ||
91 | |||
92 | static enum print_line_t | ||
93 | kmemtrace_print_free_original(struct trace_iterator *iter, | ||
94 | struct kmemtrace_free_entry *entry) | ||
95 | { | ||
96 | struct trace_seq *s = &iter->seq; | ||
97 | int ret; | ||
98 | |||
99 | /* Taken from the old linux/kmemtrace.h */ | ||
100 | ret = trace_seq_printf(s, "type_id %d call_site %lu ptr %lu\n", | ||
101 | entry->type_id, entry->call_site, (unsigned long) entry->ptr); | ||
102 | |||
103 | if (!ret) | ||
104 | return TRACE_TYPE_PARTIAL_LINE; | ||
105 | |||
106 | return TRACE_TYPE_HANDLED; | ||
107 | } | ||
108 | |||
109 | |||
110 | /* The two other following provide a more minimalistic output */ | ||
111 | static enum print_line_t | ||
112 | kmemtrace_print_alloc_compress(struct trace_iterator *iter, | ||
113 | struct kmemtrace_alloc_entry *entry) | ||
114 | { | ||
115 | struct trace_seq *s = &iter->seq; | ||
116 | int ret; | ||
117 | |||
118 | /* Alloc entry */ | ||
119 | ret = trace_seq_printf(s, " + "); | ||
120 | if (!ret) | ||
121 | return TRACE_TYPE_PARTIAL_LINE; | ||
122 | |||
123 | /* Type */ | ||
124 | switch (entry->type_id) { | ||
125 | case KMEMTRACE_TYPE_KMALLOC: | ||
126 | ret = trace_seq_printf(s, "K "); | ||
127 | break; | ||
128 | case KMEMTRACE_TYPE_CACHE: | ||
129 | ret = trace_seq_printf(s, "C "); | ||
130 | break; | ||
131 | case KMEMTRACE_TYPE_PAGES: | ||
132 | ret = trace_seq_printf(s, "P "); | ||
133 | break; | ||
134 | default: | ||
135 | ret = trace_seq_printf(s, "? "); | ||
136 | } | ||
137 | |||
138 | if (!ret) | ||
139 | return TRACE_TYPE_PARTIAL_LINE; | ||
140 | |||
141 | /* Requested */ | ||
142 | ret = trace_seq_printf(s, "%4d ", entry->bytes_req); | ||
143 | if (!ret) | ||
144 | return TRACE_TYPE_PARTIAL_LINE; | ||
145 | |||
146 | /* Allocated */ | ||
147 | ret = trace_seq_printf(s, "%4d ", entry->bytes_alloc); | ||
148 | if (!ret) | ||
149 | return TRACE_TYPE_PARTIAL_LINE; | ||
150 | |||
151 | /* Flags | ||
152 | * TODO: would be better to see the name of the GFP flag names | ||
153 | */ | ||
154 | ret = trace_seq_printf(s, "%08x ", entry->gfp_flags); | ||
155 | if (!ret) | ||
156 | return TRACE_TYPE_PARTIAL_LINE; | ||
157 | |||
158 | /* Pointer to allocated */ | ||
159 | ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr); | ||
160 | if (!ret) | ||
161 | return TRACE_TYPE_PARTIAL_LINE; | ||
162 | |||
163 | /* Node */ | ||
164 | ret = trace_seq_printf(s, "%4d ", entry->node); | ||
165 | if (!ret) | ||
166 | return TRACE_TYPE_PARTIAL_LINE; | ||
167 | |||
168 | /* Call site */ | ||
169 | ret = seq_print_ip_sym(s, entry->call_site, 0); | ||
170 | if (!ret) | ||
171 | return TRACE_TYPE_PARTIAL_LINE; | ||
172 | |||
173 | if (!trace_seq_printf(s, "\n")) | ||
174 | return TRACE_TYPE_PARTIAL_LINE; | ||
175 | |||
176 | return TRACE_TYPE_HANDLED; | ||
177 | } | ||
178 | |||
179 | static enum print_line_t | ||
180 | kmemtrace_print_free_compress(struct trace_iterator *iter, | ||
181 | struct kmemtrace_free_entry *entry) | ||
182 | { | ||
183 | struct trace_seq *s = &iter->seq; | ||
184 | int ret; | ||
185 | |||
186 | /* Free entry */ | ||
187 | ret = trace_seq_printf(s, " - "); | ||
188 | if (!ret) | ||
189 | return TRACE_TYPE_PARTIAL_LINE; | ||
190 | |||
191 | /* Type */ | ||
192 | switch (entry->type_id) { | ||
193 | case KMEMTRACE_TYPE_KMALLOC: | ||
194 | ret = trace_seq_printf(s, "K "); | ||
195 | break; | ||
196 | case KMEMTRACE_TYPE_CACHE: | ||
197 | ret = trace_seq_printf(s, "C "); | ||
198 | break; | ||
199 | case KMEMTRACE_TYPE_PAGES: | ||
200 | ret = trace_seq_printf(s, "P "); | ||
201 | break; | ||
202 | default: | ||
203 | ret = trace_seq_printf(s, "? "); | ||
204 | } | ||
205 | |||
206 | if (!ret) | ||
207 | return TRACE_TYPE_PARTIAL_LINE; | ||
208 | |||
209 | /* Skip requested/allocated/flags */ | ||
210 | ret = trace_seq_printf(s, " "); | ||
211 | if (!ret) | ||
212 | return TRACE_TYPE_PARTIAL_LINE; | ||
213 | |||
214 | /* Pointer to allocated */ | ||
215 | ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr); | ||
216 | if (!ret) | ||
217 | return TRACE_TYPE_PARTIAL_LINE; | ||
218 | |||
219 | /* Skip node */ | ||
220 | ret = trace_seq_printf(s, " "); | ||
221 | if (!ret) | ||
222 | return TRACE_TYPE_PARTIAL_LINE; | ||
223 | |||
224 | /* Call site */ | ||
225 | ret = seq_print_ip_sym(s, entry->call_site, 0); | ||
226 | if (!ret) | ||
227 | return TRACE_TYPE_PARTIAL_LINE; | ||
228 | |||
229 | if (!trace_seq_printf(s, "\n")) | ||
230 | return TRACE_TYPE_PARTIAL_LINE; | ||
231 | |||
232 | return TRACE_TYPE_HANDLED; | ||
233 | } | ||
234 | |||
235 | static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter) | ||
236 | { | ||
237 | struct trace_entry *entry = iter->ent; | ||
238 | |||
239 | switch (entry->type) { | ||
240 | case TRACE_KMEM_ALLOC: { | ||
241 | struct kmemtrace_alloc_entry *field; | ||
242 | trace_assign_type(field, entry); | ||
243 | if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL) | ||
244 | return kmemtrace_print_alloc_compress(iter, field); | ||
245 | else | ||
246 | return kmemtrace_print_alloc_original(iter, field); | ||
247 | } | ||
248 | |||
249 | case TRACE_KMEM_FREE: { | ||
250 | struct kmemtrace_free_entry *field; | ||
251 | trace_assign_type(field, entry); | ||
252 | if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL) | ||
253 | return kmemtrace_print_free_compress(iter, field); | ||
254 | else | ||
255 | return kmemtrace_print_free_original(iter, field); | ||
256 | } | ||
257 | |||
258 | default: | ||
259 | return TRACE_TYPE_UNHANDLED; | ||
260 | } | ||
261 | } | ||
262 | |||
263 | /* Trace allocations */ | ||
264 | void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id, | ||
265 | unsigned long call_site, | ||
266 | const void *ptr, | ||
267 | size_t bytes_req, | ||
268 | size_t bytes_alloc, | ||
269 | gfp_t gfp_flags, | ||
270 | int node) | ||
271 | { | ||
272 | struct ring_buffer_event *event; | ||
273 | struct kmemtrace_alloc_entry *entry; | ||
274 | struct trace_array *tr = kmemtrace_array; | ||
275 | unsigned long irq_flags; | ||
276 | |||
277 | if (!kmem_tracing_enabled) | ||
278 | return; | ||
279 | |||
280 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | ||
281 | &irq_flags); | ||
282 | if (!event) | ||
283 | return; | ||
284 | entry = ring_buffer_event_data(event); | ||
285 | tracing_generic_entry_update(&entry->ent, 0, 0); | ||
286 | |||
287 | entry->ent.type = TRACE_KMEM_ALLOC; | ||
288 | entry->call_site = call_site; | ||
289 | entry->ptr = ptr; | ||
290 | entry->bytes_req = bytes_req; | ||
291 | entry->bytes_alloc = bytes_alloc; | ||
292 | entry->gfp_flags = gfp_flags; | ||
293 | entry->node = node; | ||
294 | |||
295 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | ||
296 | |||
297 | trace_wake_up(); | ||
298 | } | ||
299 | EXPORT_SYMBOL(kmemtrace_mark_alloc_node); | ||
300 | |||
301 | void kmemtrace_mark_free(enum kmemtrace_type_id type_id, | ||
302 | unsigned long call_site, | ||
303 | const void *ptr) | ||
304 | { | ||
305 | struct ring_buffer_event *event; | ||
306 | struct kmemtrace_free_entry *entry; | ||
307 | struct trace_array *tr = kmemtrace_array; | ||
308 | unsigned long irq_flags; | ||
309 | |||
310 | if (!kmem_tracing_enabled) | ||
311 | return; | ||
312 | |||
313 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | ||
314 | &irq_flags); | ||
315 | if (!event) | ||
316 | return; | ||
317 | entry = ring_buffer_event_data(event); | ||
318 | tracing_generic_entry_update(&entry->ent, 0, 0); | ||
319 | |||
320 | entry->ent.type = TRACE_KMEM_FREE; | ||
321 | entry->type_id = type_id; | ||
322 | entry->call_site = call_site; | ||
323 | entry->ptr = ptr; | ||
324 | |||
325 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | ||
326 | |||
327 | trace_wake_up(); | ||
328 | } | ||
329 | EXPORT_SYMBOL(kmemtrace_mark_free); | ||
330 | |||
331 | static struct tracer kmem_tracer __read_mostly = { | ||
332 | .name = "kmemtrace", | ||
333 | .init = kmem_trace_init, | ||
334 | .reset = kmem_trace_reset, | ||
335 | .print_line = kmemtrace_print_line, | ||
336 | .print_header = kmemtrace_headers, | ||
337 | .flags = &kmem_tracer_flags | ||
338 | }; | ||
339 | |||
340 | void kmemtrace_init(void) | ||
341 | { | ||
342 | /* earliest opportunity to start kmem tracing */ | ||
343 | } | ||
344 | |||
345 | static int __init init_kmem_tracer(void) | ||
346 | { | ||
347 | return register_tracer(&kmem_tracer); | ||
348 | } | ||
349 | |||
350 | device_initcall(init_kmem_tracer); | ||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 76f34c0ef29c..a9d9760dc7b6 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -69,6 +69,7 @@ void tracing_on(void) | |||
69 | { | 69 | { |
70 | set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags); | 70 | set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags); |
71 | } | 71 | } |
72 | EXPORT_SYMBOL_GPL(tracing_on); | ||
72 | 73 | ||
73 | /** | 74 | /** |
74 | * tracing_off - turn off all tracing buffers | 75 | * tracing_off - turn off all tracing buffers |
@@ -82,6 +83,7 @@ void tracing_off(void) | |||
82 | { | 83 | { |
83 | clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags); | 84 | clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags); |
84 | } | 85 | } |
86 | EXPORT_SYMBOL_GPL(tracing_off); | ||
85 | 87 | ||
86 | /** | 88 | /** |
87 | * tracing_off_permanent - permanently disable ring buffers | 89 | * tracing_off_permanent - permanently disable ring buffers |
@@ -111,12 +113,14 @@ u64 ring_buffer_time_stamp(int cpu) | |||
111 | 113 | ||
112 | return time; | 114 | return time; |
113 | } | 115 | } |
116 | EXPORT_SYMBOL_GPL(ring_buffer_time_stamp); | ||
114 | 117 | ||
115 | void ring_buffer_normalize_time_stamp(int cpu, u64 *ts) | 118 | void ring_buffer_normalize_time_stamp(int cpu, u64 *ts) |
116 | { | 119 | { |
117 | /* Just stupid testing the normalize function and deltas */ | 120 | /* Just stupid testing the normalize function and deltas */ |
118 | *ts >>= DEBUG_SHIFT; | 121 | *ts >>= DEBUG_SHIFT; |
119 | } | 122 | } |
123 | EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp); | ||
120 | 124 | ||
121 | #define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event)) | 125 | #define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event)) |
122 | #define RB_ALIGNMENT_SHIFT 2 | 126 | #define RB_ALIGNMENT_SHIFT 2 |
@@ -166,6 +170,7 @@ unsigned ring_buffer_event_length(struct ring_buffer_event *event) | |||
166 | { | 170 | { |
167 | return rb_event_length(event); | 171 | return rb_event_length(event); |
168 | } | 172 | } |
173 | EXPORT_SYMBOL_GPL(ring_buffer_event_length); | ||
169 | 174 | ||
170 | /* inline for ring buffer fast paths */ | 175 | /* inline for ring buffer fast paths */ |
171 | static inline void * | 176 | static inline void * |
@@ -187,9 +192,10 @@ void *ring_buffer_event_data(struct ring_buffer_event *event) | |||
187 | { | 192 | { |
188 | return rb_event_data(event); | 193 | return rb_event_data(event); |
189 | } | 194 | } |
195 | EXPORT_SYMBOL_GPL(ring_buffer_event_data); | ||
190 | 196 | ||
191 | #define for_each_buffer_cpu(buffer, cpu) \ | 197 | #define for_each_buffer_cpu(buffer, cpu) \ |
192 | for_each_cpu_mask(cpu, buffer->cpumask) | 198 | for_each_cpu(cpu, buffer->cpumask) |
193 | 199 | ||
194 | #define TS_SHIFT 27 | 200 | #define TS_SHIFT 27 |
195 | #define TS_MASK ((1ULL << TS_SHIFT) - 1) | 201 | #define TS_MASK ((1ULL << TS_SHIFT) - 1) |
@@ -261,7 +267,7 @@ struct ring_buffer { | |||
261 | unsigned pages; | 267 | unsigned pages; |
262 | unsigned flags; | 268 | unsigned flags; |
263 | int cpus; | 269 | int cpus; |
264 | cpumask_t cpumask; | 270 | cpumask_var_t cpumask; |
265 | atomic_t record_disabled; | 271 | atomic_t record_disabled; |
266 | 272 | ||
267 | struct mutex mutex; | 273 | struct mutex mutex; |
@@ -427,7 +433,7 @@ extern int ring_buffer_page_too_big(void); | |||
427 | 433 | ||
428 | /** | 434 | /** |
429 | * ring_buffer_alloc - allocate a new ring_buffer | 435 | * ring_buffer_alloc - allocate a new ring_buffer |
430 | * @size: the size in bytes that is needed. | 436 | * @size: the size in bytes per cpu that is needed. |
431 | * @flags: attributes to set for the ring buffer. | 437 | * @flags: attributes to set for the ring buffer. |
432 | * | 438 | * |
433 | * Currently the only flag that is available is the RB_FL_OVERWRITE | 439 | * Currently the only flag that is available is the RB_FL_OVERWRITE |
@@ -452,6 +458,9 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags) | |||
452 | if (!buffer) | 458 | if (!buffer) |
453 | return NULL; | 459 | return NULL; |
454 | 460 | ||
461 | if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) | ||
462 | goto fail_free_buffer; | ||
463 | |||
455 | buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); | 464 | buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); |
456 | buffer->flags = flags; | 465 | buffer->flags = flags; |
457 | 466 | ||
@@ -459,14 +468,14 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags) | |||
459 | if (buffer->pages == 1) | 468 | if (buffer->pages == 1) |
460 | buffer->pages++; | 469 | buffer->pages++; |
461 | 470 | ||
462 | buffer->cpumask = cpu_possible_map; | 471 | cpumask_copy(buffer->cpumask, cpu_possible_mask); |
463 | buffer->cpus = nr_cpu_ids; | 472 | buffer->cpus = nr_cpu_ids; |
464 | 473 | ||
465 | bsize = sizeof(void *) * nr_cpu_ids; | 474 | bsize = sizeof(void *) * nr_cpu_ids; |
466 | buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), | 475 | buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), |
467 | GFP_KERNEL); | 476 | GFP_KERNEL); |
468 | if (!buffer->buffers) | 477 | if (!buffer->buffers) |
469 | goto fail_free_buffer; | 478 | goto fail_free_cpumask; |
470 | 479 | ||
471 | for_each_buffer_cpu(buffer, cpu) { | 480 | for_each_buffer_cpu(buffer, cpu) { |
472 | buffer->buffers[cpu] = | 481 | buffer->buffers[cpu] = |
@@ -486,10 +495,14 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags) | |||
486 | } | 495 | } |
487 | kfree(buffer->buffers); | 496 | kfree(buffer->buffers); |
488 | 497 | ||
498 | fail_free_cpumask: | ||
499 | free_cpumask_var(buffer->cpumask); | ||
500 | |||
489 | fail_free_buffer: | 501 | fail_free_buffer: |
490 | kfree(buffer); | 502 | kfree(buffer); |
491 | return NULL; | 503 | return NULL; |
492 | } | 504 | } |
505 | EXPORT_SYMBOL_GPL(ring_buffer_alloc); | ||
493 | 506 | ||
494 | /** | 507 | /** |
495 | * ring_buffer_free - free a ring buffer. | 508 | * ring_buffer_free - free a ring buffer. |
@@ -503,8 +516,11 @@ ring_buffer_free(struct ring_buffer *buffer) | |||
503 | for_each_buffer_cpu(buffer, cpu) | 516 | for_each_buffer_cpu(buffer, cpu) |
504 | rb_free_cpu_buffer(buffer->buffers[cpu]); | 517 | rb_free_cpu_buffer(buffer->buffers[cpu]); |
505 | 518 | ||
519 | free_cpumask_var(buffer->cpumask); | ||
520 | |||
506 | kfree(buffer); | 521 | kfree(buffer); |
507 | } | 522 | } |
523 | EXPORT_SYMBOL_GPL(ring_buffer_free); | ||
508 | 524 | ||
509 | static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); | 525 | static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); |
510 | 526 | ||
@@ -680,6 +696,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
680 | mutex_unlock(&buffer->mutex); | 696 | mutex_unlock(&buffer->mutex); |
681 | return -ENOMEM; | 697 | return -ENOMEM; |
682 | } | 698 | } |
699 | EXPORT_SYMBOL_GPL(ring_buffer_resize); | ||
683 | 700 | ||
684 | static inline int rb_null_event(struct ring_buffer_event *event) | 701 | static inline int rb_null_event(struct ring_buffer_event *event) |
685 | { | 702 | { |
@@ -1274,7 +1291,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, | |||
1274 | 1291 | ||
1275 | cpu = raw_smp_processor_id(); | 1292 | cpu = raw_smp_processor_id(); |
1276 | 1293 | ||
1277 | if (!cpu_isset(cpu, buffer->cpumask)) | 1294 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
1278 | goto out; | 1295 | goto out; |
1279 | 1296 | ||
1280 | cpu_buffer = buffer->buffers[cpu]; | 1297 | cpu_buffer = buffer->buffers[cpu]; |
@@ -1304,6 +1321,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, | |||
1304 | ftrace_preempt_enable(resched); | 1321 | ftrace_preempt_enable(resched); |
1305 | return NULL; | 1322 | return NULL; |
1306 | } | 1323 | } |
1324 | EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve); | ||
1307 | 1325 | ||
1308 | static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, | 1326 | static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, |
1309 | struct ring_buffer_event *event) | 1327 | struct ring_buffer_event *event) |
@@ -1350,6 +1368,7 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer, | |||
1350 | 1368 | ||
1351 | return 0; | 1369 | return 0; |
1352 | } | 1370 | } |
1371 | EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit); | ||
1353 | 1372 | ||
1354 | /** | 1373 | /** |
1355 | * ring_buffer_write - write data to the buffer without reserving | 1374 | * ring_buffer_write - write data to the buffer without reserving |
@@ -1385,7 +1404,7 @@ int ring_buffer_write(struct ring_buffer *buffer, | |||
1385 | 1404 | ||
1386 | cpu = raw_smp_processor_id(); | 1405 | cpu = raw_smp_processor_id(); |
1387 | 1406 | ||
1388 | if (!cpu_isset(cpu, buffer->cpumask)) | 1407 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
1389 | goto out; | 1408 | goto out; |
1390 | 1409 | ||
1391 | cpu_buffer = buffer->buffers[cpu]; | 1410 | cpu_buffer = buffer->buffers[cpu]; |
@@ -1411,6 +1430,7 @@ int ring_buffer_write(struct ring_buffer *buffer, | |||
1411 | 1430 | ||
1412 | return ret; | 1431 | return ret; |
1413 | } | 1432 | } |
1433 | EXPORT_SYMBOL_GPL(ring_buffer_write); | ||
1414 | 1434 | ||
1415 | static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) | 1435 | static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) |
1416 | { | 1436 | { |
@@ -1437,6 +1457,7 @@ void ring_buffer_record_disable(struct ring_buffer *buffer) | |||
1437 | { | 1457 | { |
1438 | atomic_inc(&buffer->record_disabled); | 1458 | atomic_inc(&buffer->record_disabled); |
1439 | } | 1459 | } |
1460 | EXPORT_SYMBOL_GPL(ring_buffer_record_disable); | ||
1440 | 1461 | ||
1441 | /** | 1462 | /** |
1442 | * ring_buffer_record_enable - enable writes to the buffer | 1463 | * ring_buffer_record_enable - enable writes to the buffer |
@@ -1449,6 +1470,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer) | |||
1449 | { | 1470 | { |
1450 | atomic_dec(&buffer->record_disabled); | 1471 | atomic_dec(&buffer->record_disabled); |
1451 | } | 1472 | } |
1473 | EXPORT_SYMBOL_GPL(ring_buffer_record_enable); | ||
1452 | 1474 | ||
1453 | /** | 1475 | /** |
1454 | * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer | 1476 | * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer |
@@ -1464,12 +1486,13 @@ void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu) | |||
1464 | { | 1486 | { |
1465 | struct ring_buffer_per_cpu *cpu_buffer; | 1487 | struct ring_buffer_per_cpu *cpu_buffer; |
1466 | 1488 | ||
1467 | if (!cpu_isset(cpu, buffer->cpumask)) | 1489 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
1468 | return; | 1490 | return; |
1469 | 1491 | ||
1470 | cpu_buffer = buffer->buffers[cpu]; | 1492 | cpu_buffer = buffer->buffers[cpu]; |
1471 | atomic_inc(&cpu_buffer->record_disabled); | 1493 | atomic_inc(&cpu_buffer->record_disabled); |
1472 | } | 1494 | } |
1495 | EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu); | ||
1473 | 1496 | ||
1474 | /** | 1497 | /** |
1475 | * ring_buffer_record_enable_cpu - enable writes to the buffer | 1498 | * ring_buffer_record_enable_cpu - enable writes to the buffer |
@@ -1483,12 +1506,13 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) | |||
1483 | { | 1506 | { |
1484 | struct ring_buffer_per_cpu *cpu_buffer; | 1507 | struct ring_buffer_per_cpu *cpu_buffer; |
1485 | 1508 | ||
1486 | if (!cpu_isset(cpu, buffer->cpumask)) | 1509 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
1487 | return; | 1510 | return; |
1488 | 1511 | ||
1489 | cpu_buffer = buffer->buffers[cpu]; | 1512 | cpu_buffer = buffer->buffers[cpu]; |
1490 | atomic_dec(&cpu_buffer->record_disabled); | 1513 | atomic_dec(&cpu_buffer->record_disabled); |
1491 | } | 1514 | } |
1515 | EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu); | ||
1492 | 1516 | ||
1493 | /** | 1517 | /** |
1494 | * ring_buffer_entries_cpu - get the number of entries in a cpu buffer | 1518 | * ring_buffer_entries_cpu - get the number of entries in a cpu buffer |
@@ -1499,12 +1523,13 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu) | |||
1499 | { | 1523 | { |
1500 | struct ring_buffer_per_cpu *cpu_buffer; | 1524 | struct ring_buffer_per_cpu *cpu_buffer; |
1501 | 1525 | ||
1502 | if (!cpu_isset(cpu, buffer->cpumask)) | 1526 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
1503 | return 0; | 1527 | return 0; |
1504 | 1528 | ||
1505 | cpu_buffer = buffer->buffers[cpu]; | 1529 | cpu_buffer = buffer->buffers[cpu]; |
1506 | return cpu_buffer->entries; | 1530 | return cpu_buffer->entries; |
1507 | } | 1531 | } |
1532 | EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); | ||
1508 | 1533 | ||
1509 | /** | 1534 | /** |
1510 | * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer | 1535 | * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer |
@@ -1515,12 +1540,13 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu) | |||
1515 | { | 1540 | { |
1516 | struct ring_buffer_per_cpu *cpu_buffer; | 1541 | struct ring_buffer_per_cpu *cpu_buffer; |
1517 | 1542 | ||
1518 | if (!cpu_isset(cpu, buffer->cpumask)) | 1543 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
1519 | return 0; | 1544 | return 0; |
1520 | 1545 | ||
1521 | cpu_buffer = buffer->buffers[cpu]; | 1546 | cpu_buffer = buffer->buffers[cpu]; |
1522 | return cpu_buffer->overrun; | 1547 | return cpu_buffer->overrun; |
1523 | } | 1548 | } |
1549 | EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu); | ||
1524 | 1550 | ||
1525 | /** | 1551 | /** |
1526 | * ring_buffer_entries - get the number of entries in a buffer | 1552 | * ring_buffer_entries - get the number of entries in a buffer |
@@ -1543,6 +1569,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer) | |||
1543 | 1569 | ||
1544 | return entries; | 1570 | return entries; |
1545 | } | 1571 | } |
1572 | EXPORT_SYMBOL_GPL(ring_buffer_entries); | ||
1546 | 1573 | ||
1547 | /** | 1574 | /** |
1548 | * ring_buffer_overrun_cpu - get the number of overruns in buffer | 1575 | * ring_buffer_overrun_cpu - get the number of overruns in buffer |
@@ -1565,6 +1592,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer) | |||
1565 | 1592 | ||
1566 | return overruns; | 1593 | return overruns; |
1567 | } | 1594 | } |
1595 | EXPORT_SYMBOL_GPL(ring_buffer_overruns); | ||
1568 | 1596 | ||
1569 | static void rb_iter_reset(struct ring_buffer_iter *iter) | 1597 | static void rb_iter_reset(struct ring_buffer_iter *iter) |
1570 | { | 1598 | { |
@@ -1600,6 +1628,7 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter) | |||
1600 | rb_iter_reset(iter); | 1628 | rb_iter_reset(iter); |
1601 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 1629 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
1602 | } | 1630 | } |
1631 | EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); | ||
1603 | 1632 | ||
1604 | /** | 1633 | /** |
1605 | * ring_buffer_iter_empty - check if an iterator has no more to read | 1634 | * ring_buffer_iter_empty - check if an iterator has no more to read |
@@ -1614,6 +1643,7 @@ int ring_buffer_iter_empty(struct ring_buffer_iter *iter) | |||
1614 | return iter->head_page == cpu_buffer->commit_page && | 1643 | return iter->head_page == cpu_buffer->commit_page && |
1615 | iter->head == rb_commit_index(cpu_buffer); | 1644 | iter->head == rb_commit_index(cpu_buffer); |
1616 | } | 1645 | } |
1646 | EXPORT_SYMBOL_GPL(ring_buffer_iter_empty); | ||
1617 | 1647 | ||
1618 | static void | 1648 | static void |
1619 | rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, | 1649 | rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, |
@@ -1828,7 +1858,7 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
1828 | struct buffer_page *reader; | 1858 | struct buffer_page *reader; |
1829 | int nr_loops = 0; | 1859 | int nr_loops = 0; |
1830 | 1860 | ||
1831 | if (!cpu_isset(cpu, buffer->cpumask)) | 1861 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
1832 | return NULL; | 1862 | return NULL; |
1833 | 1863 | ||
1834 | cpu_buffer = buffer->buffers[cpu]; | 1864 | cpu_buffer = buffer->buffers[cpu]; |
@@ -1880,6 +1910,7 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
1880 | 1910 | ||
1881 | return NULL; | 1911 | return NULL; |
1882 | } | 1912 | } |
1913 | EXPORT_SYMBOL_GPL(ring_buffer_peek); | ||
1883 | 1914 | ||
1884 | static struct ring_buffer_event * | 1915 | static struct ring_buffer_event * |
1885 | rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | 1916 | rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) |
@@ -1940,6 +1971,7 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | |||
1940 | 1971 | ||
1941 | return NULL; | 1972 | return NULL; |
1942 | } | 1973 | } |
1974 | EXPORT_SYMBOL_GPL(ring_buffer_iter_peek); | ||
1943 | 1975 | ||
1944 | /** | 1976 | /** |
1945 | * ring_buffer_peek - peek at the next event to be read | 1977 | * ring_buffer_peek - peek at the next event to be read |
@@ -2001,7 +2033,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
2001 | struct ring_buffer_event *event; | 2033 | struct ring_buffer_event *event; |
2002 | unsigned long flags; | 2034 | unsigned long flags; |
2003 | 2035 | ||
2004 | if (!cpu_isset(cpu, buffer->cpumask)) | 2036 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
2005 | return NULL; | 2037 | return NULL; |
2006 | 2038 | ||
2007 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 2039 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
@@ -2017,6 +2049,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
2017 | 2049 | ||
2018 | return event; | 2050 | return event; |
2019 | } | 2051 | } |
2052 | EXPORT_SYMBOL_GPL(ring_buffer_consume); | ||
2020 | 2053 | ||
2021 | /** | 2054 | /** |
2022 | * ring_buffer_read_start - start a non consuming read of the buffer | 2055 | * ring_buffer_read_start - start a non consuming read of the buffer |
@@ -2037,7 +2070,7 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu) | |||
2037 | struct ring_buffer_iter *iter; | 2070 | struct ring_buffer_iter *iter; |
2038 | unsigned long flags; | 2071 | unsigned long flags; |
2039 | 2072 | ||
2040 | if (!cpu_isset(cpu, buffer->cpumask)) | 2073 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
2041 | return NULL; | 2074 | return NULL; |
2042 | 2075 | ||
2043 | iter = kmalloc(sizeof(*iter), GFP_KERNEL); | 2076 | iter = kmalloc(sizeof(*iter), GFP_KERNEL); |
@@ -2059,6 +2092,7 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu) | |||
2059 | 2092 | ||
2060 | return iter; | 2093 | return iter; |
2061 | } | 2094 | } |
2095 | EXPORT_SYMBOL_GPL(ring_buffer_read_start); | ||
2062 | 2096 | ||
2063 | /** | 2097 | /** |
2064 | * ring_buffer_finish - finish reading the iterator of the buffer | 2098 | * ring_buffer_finish - finish reading the iterator of the buffer |
@@ -2075,6 +2109,7 @@ ring_buffer_read_finish(struct ring_buffer_iter *iter) | |||
2075 | atomic_dec(&cpu_buffer->record_disabled); | 2109 | atomic_dec(&cpu_buffer->record_disabled); |
2076 | kfree(iter); | 2110 | kfree(iter); |
2077 | } | 2111 | } |
2112 | EXPORT_SYMBOL_GPL(ring_buffer_read_finish); | ||
2078 | 2113 | ||
2079 | /** | 2114 | /** |
2080 | * ring_buffer_read - read the next item in the ring buffer by the iterator | 2115 | * ring_buffer_read - read the next item in the ring buffer by the iterator |
@@ -2101,6 +2136,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) | |||
2101 | 2136 | ||
2102 | return event; | 2137 | return event; |
2103 | } | 2138 | } |
2139 | EXPORT_SYMBOL_GPL(ring_buffer_read); | ||
2104 | 2140 | ||
2105 | /** | 2141 | /** |
2106 | * ring_buffer_size - return the size of the ring buffer (in bytes) | 2142 | * ring_buffer_size - return the size of the ring buffer (in bytes) |
@@ -2110,6 +2146,7 @@ unsigned long ring_buffer_size(struct ring_buffer *buffer) | |||
2110 | { | 2146 | { |
2111 | return BUF_PAGE_SIZE * buffer->pages; | 2147 | return BUF_PAGE_SIZE * buffer->pages; |
2112 | } | 2148 | } |
2149 | EXPORT_SYMBOL_GPL(ring_buffer_size); | ||
2113 | 2150 | ||
2114 | static void | 2151 | static void |
2115 | rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) | 2152 | rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) |
@@ -2143,7 +2180,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) | |||
2143 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; | 2180 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; |
2144 | unsigned long flags; | 2181 | unsigned long flags; |
2145 | 2182 | ||
2146 | if (!cpu_isset(cpu, buffer->cpumask)) | 2183 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
2147 | return; | 2184 | return; |
2148 | 2185 | ||
2149 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 2186 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
@@ -2156,6 +2193,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) | |||
2156 | 2193 | ||
2157 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 2194 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
2158 | } | 2195 | } |
2196 | EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); | ||
2159 | 2197 | ||
2160 | /** | 2198 | /** |
2161 | * ring_buffer_reset - reset a ring buffer | 2199 | * ring_buffer_reset - reset a ring buffer |
@@ -2168,6 +2206,7 @@ void ring_buffer_reset(struct ring_buffer *buffer) | |||
2168 | for_each_buffer_cpu(buffer, cpu) | 2206 | for_each_buffer_cpu(buffer, cpu) |
2169 | ring_buffer_reset_cpu(buffer, cpu); | 2207 | ring_buffer_reset_cpu(buffer, cpu); |
2170 | } | 2208 | } |
2209 | EXPORT_SYMBOL_GPL(ring_buffer_reset); | ||
2171 | 2210 | ||
2172 | /** | 2211 | /** |
2173 | * rind_buffer_empty - is the ring buffer empty? | 2212 | * rind_buffer_empty - is the ring buffer empty? |
@@ -2186,6 +2225,7 @@ int ring_buffer_empty(struct ring_buffer *buffer) | |||
2186 | } | 2225 | } |
2187 | return 1; | 2226 | return 1; |
2188 | } | 2227 | } |
2228 | EXPORT_SYMBOL_GPL(ring_buffer_empty); | ||
2189 | 2229 | ||
2190 | /** | 2230 | /** |
2191 | * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? | 2231 | * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? |
@@ -2196,12 +2236,13 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) | |||
2196 | { | 2236 | { |
2197 | struct ring_buffer_per_cpu *cpu_buffer; | 2237 | struct ring_buffer_per_cpu *cpu_buffer; |
2198 | 2238 | ||
2199 | if (!cpu_isset(cpu, buffer->cpumask)) | 2239 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
2200 | return 1; | 2240 | return 1; |
2201 | 2241 | ||
2202 | cpu_buffer = buffer->buffers[cpu]; | 2242 | cpu_buffer = buffer->buffers[cpu]; |
2203 | return rb_per_cpu_empty(cpu_buffer); | 2243 | return rb_per_cpu_empty(cpu_buffer); |
2204 | } | 2244 | } |
2245 | EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu); | ||
2205 | 2246 | ||
2206 | /** | 2247 | /** |
2207 | * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers | 2248 | * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers |
@@ -2219,8 +2260,8 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, | |||
2219 | struct ring_buffer_per_cpu *cpu_buffer_a; | 2260 | struct ring_buffer_per_cpu *cpu_buffer_a; |
2220 | struct ring_buffer_per_cpu *cpu_buffer_b; | 2261 | struct ring_buffer_per_cpu *cpu_buffer_b; |
2221 | 2262 | ||
2222 | if (!cpu_isset(cpu, buffer_a->cpumask) || | 2263 | if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || |
2223 | !cpu_isset(cpu, buffer_b->cpumask)) | 2264 | !cpumask_test_cpu(cpu, buffer_b->cpumask)) |
2224 | return -EINVAL; | 2265 | return -EINVAL; |
2225 | 2266 | ||
2226 | /* At least make sure the two buffers are somewhat the same */ | 2267 | /* At least make sure the two buffers are somewhat the same */ |
@@ -2250,6 +2291,7 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, | |||
2250 | 2291 | ||
2251 | return 0; | 2292 | return 0; |
2252 | } | 2293 | } |
2294 | EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); | ||
2253 | 2295 | ||
2254 | static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer, | 2296 | static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer, |
2255 | struct buffer_data_page *bpage) | 2297 | struct buffer_data_page *bpage) |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index b789c010512c..0418fc338b5c 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -30,7 +30,6 @@ | |||
30 | #include <linux/gfp.h> | 30 | #include <linux/gfp.h> |
31 | #include <linux/fs.h> | 31 | #include <linux/fs.h> |
32 | #include <linux/kprobes.h> | 32 | #include <linux/kprobes.h> |
33 | #include <linux/seq_file.h> | ||
34 | #include <linux/writeback.h> | 33 | #include <linux/writeback.h> |
35 | 34 | ||
36 | #include <linux/stacktrace.h> | 35 | #include <linux/stacktrace.h> |
@@ -91,10 +90,10 @@ static inline void ftrace_enable_cpu(void) | |||
91 | preempt_enable(); | 90 | preempt_enable(); |
92 | } | 91 | } |
93 | 92 | ||
94 | static cpumask_t __read_mostly tracing_buffer_mask; | 93 | static cpumask_var_t __read_mostly tracing_buffer_mask; |
95 | 94 | ||
96 | #define for_each_tracing_cpu(cpu) \ | 95 | #define for_each_tracing_cpu(cpu) \ |
97 | for_each_cpu_mask(cpu, tracing_buffer_mask) | 96 | for_each_cpu(cpu, tracing_buffer_mask) |
98 | 97 | ||
99 | /* | 98 | /* |
100 | * ftrace_dump_on_oops - variable to dump ftrace buffer on oops | 99 | * ftrace_dump_on_oops - variable to dump ftrace buffer on oops |
@@ -1185,7 +1184,7 @@ enum trace_file_type { | |||
1185 | TRACE_FILE_ANNOTATE = 2, | 1184 | TRACE_FILE_ANNOTATE = 2, |
1186 | }; | 1185 | }; |
1187 | 1186 | ||
1188 | static void trace_iterator_increment(struct trace_iterator *iter, int cpu) | 1187 | static void trace_iterator_increment(struct trace_iterator *iter) |
1189 | { | 1188 | { |
1190 | /* Don't allow ftrace to trace into the ring buffers */ | 1189 | /* Don't allow ftrace to trace into the ring buffers */ |
1191 | ftrace_disable_cpu(); | 1190 | ftrace_disable_cpu(); |
@@ -1264,7 +1263,7 @@ static void *find_next_entry_inc(struct trace_iterator *iter) | |||
1264 | iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts); | 1263 | iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts); |
1265 | 1264 | ||
1266 | if (iter->ent) | 1265 | if (iter->ent) |
1267 | trace_iterator_increment(iter, iter->cpu); | 1266 | trace_iterator_increment(iter); |
1268 | 1267 | ||
1269 | return iter->ent ? iter : NULL; | 1268 | return iter->ent ? iter : NULL; |
1270 | } | 1269 | } |
@@ -1493,10 +1492,10 @@ static void test_cpu_buff_start(struct trace_iterator *iter) | |||
1493 | if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) | 1492 | if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) |
1494 | return; | 1493 | return; |
1495 | 1494 | ||
1496 | if (cpu_isset(iter->cpu, iter->started)) | 1495 | if (cpumask_test_cpu(iter->cpu, iter->started)) |
1497 | return; | 1496 | return; |
1498 | 1497 | ||
1499 | cpu_set(iter->cpu, iter->started); | 1498 | cpumask_set_cpu(iter->cpu, iter->started); |
1500 | trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu); | 1499 | trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu); |
1501 | } | 1500 | } |
1502 | 1501 | ||
@@ -1987,13 +1986,7 @@ static struct file_operations show_traces_fops = { | |||
1987 | /* | 1986 | /* |
1988 | * Only trace on a CPU if the bitmask is set: | 1987 | * Only trace on a CPU if the bitmask is set: |
1989 | */ | 1988 | */ |
1990 | static cpumask_t tracing_cpumask = CPU_MASK_ALL; | 1989 | static cpumask_var_t tracing_cpumask; |
1991 | |||
1992 | /* | ||
1993 | * When tracing/tracing_cpu_mask is modified then this holds | ||
1994 | * the new bitmask we are about to install: | ||
1995 | */ | ||
1996 | static cpumask_t tracing_cpumask_new; | ||
1997 | 1990 | ||
1998 | /* | 1991 | /* |
1999 | * The tracer itself will not take this lock, but still we want | 1992 | * The tracer itself will not take this lock, but still we want |
@@ -2034,6 +2027,10 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
2034 | size_t count, loff_t *ppos) | 2027 | size_t count, loff_t *ppos) |
2035 | { | 2028 | { |
2036 | int err, cpu; | 2029 | int err, cpu; |
2030 | cpumask_var_t tracing_cpumask_new; | ||
2031 | |||
2032 | if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) | ||
2033 | return -ENOMEM; | ||
2037 | 2034 | ||
2038 | mutex_lock(&tracing_cpumask_update_lock); | 2035 | mutex_lock(&tracing_cpumask_update_lock); |
2039 | err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); | 2036 | err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); |
@@ -2047,26 +2044,28 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
2047 | * Increase/decrease the disabled counter if we are | 2044 | * Increase/decrease the disabled counter if we are |
2048 | * about to flip a bit in the cpumask: | 2045 | * about to flip a bit in the cpumask: |
2049 | */ | 2046 | */ |
2050 | if (cpu_isset(cpu, tracing_cpumask) && | 2047 | if (cpumask_test_cpu(cpu, tracing_cpumask) && |
2051 | !cpu_isset(cpu, tracing_cpumask_new)) { | 2048 | !cpumask_test_cpu(cpu, tracing_cpumask_new)) { |
2052 | atomic_inc(&global_trace.data[cpu]->disabled); | 2049 | atomic_inc(&global_trace.data[cpu]->disabled); |
2053 | } | 2050 | } |
2054 | if (!cpu_isset(cpu, tracing_cpumask) && | 2051 | if (!cpumask_test_cpu(cpu, tracing_cpumask) && |
2055 | cpu_isset(cpu, tracing_cpumask_new)) { | 2052 | cpumask_test_cpu(cpu, tracing_cpumask_new)) { |
2056 | atomic_dec(&global_trace.data[cpu]->disabled); | 2053 | atomic_dec(&global_trace.data[cpu]->disabled); |
2057 | } | 2054 | } |
2058 | } | 2055 | } |
2059 | __raw_spin_unlock(&ftrace_max_lock); | 2056 | __raw_spin_unlock(&ftrace_max_lock); |
2060 | local_irq_enable(); | 2057 | local_irq_enable(); |
2061 | 2058 | ||
2062 | tracing_cpumask = tracing_cpumask_new; | 2059 | cpumask_copy(tracing_cpumask, tracing_cpumask_new); |
2063 | 2060 | ||
2064 | mutex_unlock(&tracing_cpumask_update_lock); | 2061 | mutex_unlock(&tracing_cpumask_update_lock); |
2062 | free_cpumask_var(tracing_cpumask_new); | ||
2065 | 2063 | ||
2066 | return count; | 2064 | return count; |
2067 | 2065 | ||
2068 | err_unlock: | 2066 | err_unlock: |
2069 | mutex_unlock(&tracing_cpumask_update_lock); | 2067 | mutex_unlock(&tracing_cpumask_update_lock); |
2068 | free_cpumask_var(tracing_cpumask); | ||
2070 | 2069 | ||
2071 | return err; | 2070 | return err; |
2072 | } | 2071 | } |
@@ -2456,10 +2455,15 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) | |||
2456 | if (!iter) | 2455 | if (!iter) |
2457 | return -ENOMEM; | 2456 | return -ENOMEM; |
2458 | 2457 | ||
2458 | if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { | ||
2459 | kfree(iter); | ||
2460 | return -ENOMEM; | ||
2461 | } | ||
2462 | |||
2459 | mutex_lock(&trace_types_lock); | 2463 | mutex_lock(&trace_types_lock); |
2460 | 2464 | ||
2461 | /* trace pipe does not show start of buffer */ | 2465 | /* trace pipe does not show start of buffer */ |
2462 | cpus_setall(iter->started); | 2466 | cpumask_setall(iter->started); |
2463 | 2467 | ||
2464 | iter->tr = &global_trace; | 2468 | iter->tr = &global_trace; |
2465 | iter->trace = current_trace; | 2469 | iter->trace = current_trace; |
@@ -2476,6 +2480,7 @@ static int tracing_release_pipe(struct inode *inode, struct file *file) | |||
2476 | { | 2480 | { |
2477 | struct trace_iterator *iter = file->private_data; | 2481 | struct trace_iterator *iter = file->private_data; |
2478 | 2482 | ||
2483 | free_cpumask_var(iter->started); | ||
2479 | kfree(iter); | 2484 | kfree(iter); |
2480 | atomic_dec(&tracing_reader); | 2485 | atomic_dec(&tracing_reader); |
2481 | 2486 | ||
@@ -3094,7 +3099,6 @@ void ftrace_dump(void) | |||
3094 | static DEFINE_SPINLOCK(ftrace_dump_lock); | 3099 | static DEFINE_SPINLOCK(ftrace_dump_lock); |
3095 | /* use static because iter can be a bit big for the stack */ | 3100 | /* use static because iter can be a bit big for the stack */ |
3096 | static struct trace_iterator iter; | 3101 | static struct trace_iterator iter; |
3097 | static cpumask_t mask; | ||
3098 | static int dump_ran; | 3102 | static int dump_ran; |
3099 | unsigned long flags; | 3103 | unsigned long flags; |
3100 | int cnt = 0, cpu; | 3104 | int cnt = 0, cpu; |
@@ -3128,8 +3132,6 @@ void ftrace_dump(void) | |||
3128 | * and then release the locks again. | 3132 | * and then release the locks again. |
3129 | */ | 3133 | */ |
3130 | 3134 | ||
3131 | cpus_clear(mask); | ||
3132 | |||
3133 | while (!trace_empty(&iter)) { | 3135 | while (!trace_empty(&iter)) { |
3134 | 3136 | ||
3135 | if (!cnt) | 3137 | if (!cnt) |
@@ -3165,19 +3167,28 @@ __init static int tracer_alloc_buffers(void) | |||
3165 | { | 3167 | { |
3166 | struct trace_array_cpu *data; | 3168 | struct trace_array_cpu *data; |
3167 | int i; | 3169 | int i; |
3170 | int ret = -ENOMEM; | ||
3168 | 3171 | ||
3169 | /* TODO: make the number of buffers hot pluggable with CPUS */ | 3172 | if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) |
3170 | tracing_buffer_mask = cpu_possible_map; | 3173 | goto out; |
3174 | |||
3175 | if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) | ||
3176 | goto out_free_buffer_mask; | ||
3177 | |||
3178 | cpumask_copy(tracing_buffer_mask, cpu_possible_mask); | ||
3179 | cpumask_copy(tracing_cpumask, cpu_all_mask); | ||
3171 | 3180 | ||
3181 | /* TODO: make the number of buffers hot pluggable with CPUS */ | ||
3172 | global_trace.buffer = ring_buffer_alloc(trace_buf_size, | 3182 | global_trace.buffer = ring_buffer_alloc(trace_buf_size, |
3173 | TRACE_BUFFER_FLAGS); | 3183 | TRACE_BUFFER_FLAGS); |
3174 | if (!global_trace.buffer) { | 3184 | if (!global_trace.buffer) { |
3175 | printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); | 3185 | printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); |
3176 | WARN_ON(1); | 3186 | WARN_ON(1); |
3177 | return 0; | 3187 | goto out_free_cpumask; |
3178 | } | 3188 | } |
3179 | global_trace.entries = ring_buffer_size(global_trace.buffer); | 3189 | global_trace.entries = ring_buffer_size(global_trace.buffer); |
3180 | 3190 | ||
3191 | |||
3181 | #ifdef CONFIG_TRACER_MAX_TRACE | 3192 | #ifdef CONFIG_TRACER_MAX_TRACE |
3182 | max_tr.buffer = ring_buffer_alloc(trace_buf_size, | 3193 | max_tr.buffer = ring_buffer_alloc(trace_buf_size, |
3183 | TRACE_BUFFER_FLAGS); | 3194 | TRACE_BUFFER_FLAGS); |
@@ -3185,7 +3196,7 @@ __init static int tracer_alloc_buffers(void) | |||
3185 | printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); | 3196 | printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); |
3186 | WARN_ON(1); | 3197 | WARN_ON(1); |
3187 | ring_buffer_free(global_trace.buffer); | 3198 | ring_buffer_free(global_trace.buffer); |
3188 | return 0; | 3199 | goto out_free_cpumask; |
3189 | } | 3200 | } |
3190 | max_tr.entries = ring_buffer_size(max_tr.buffer); | 3201 | max_tr.entries = ring_buffer_size(max_tr.buffer); |
3191 | WARN_ON(max_tr.entries != global_trace.entries); | 3202 | WARN_ON(max_tr.entries != global_trace.entries); |
@@ -3215,8 +3226,14 @@ __init static int tracer_alloc_buffers(void) | |||
3215 | &trace_panic_notifier); | 3226 | &trace_panic_notifier); |
3216 | 3227 | ||
3217 | register_die_notifier(&trace_die_notifier); | 3228 | register_die_notifier(&trace_die_notifier); |
3229 | ret = 0; | ||
3218 | 3230 | ||
3219 | return 0; | 3231 | out_free_cpumask: |
3232 | free_cpumask_var(tracing_cpumask); | ||
3233 | out_free_buffer_mask: | ||
3234 | free_cpumask_var(tracing_buffer_mask); | ||
3235 | out: | ||
3236 | return ret; | ||
3220 | } | 3237 | } |
3221 | early_initcall(tracer_alloc_buffers); | 3238 | early_initcall(tracer_alloc_buffers); |
3222 | fs_initcall(tracer_init_debugfs); | 3239 | fs_initcall(tracer_init_debugfs); |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index a8b624ccd4d6..94ed45e93a80 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/mmiotrace.h> | 9 | #include <linux/mmiotrace.h> |
10 | #include <linux/ftrace.h> | 10 | #include <linux/ftrace.h> |
11 | #include <trace/boot.h> | 11 | #include <trace/boot.h> |
12 | #include <trace/kmemtrace.h> | ||
12 | 13 | ||
13 | enum trace_type { | 14 | enum trace_type { |
14 | __TRACE_FIRST_TYPE = 0, | 15 | __TRACE_FIRST_TYPE = 0, |
@@ -28,6 +29,8 @@ enum trace_type { | |||
28 | TRACE_GRAPH_ENT, | 29 | TRACE_GRAPH_ENT, |
29 | TRACE_USER_STACK, | 30 | TRACE_USER_STACK, |
30 | TRACE_HW_BRANCHES, | 31 | TRACE_HW_BRANCHES, |
32 | TRACE_KMEM_ALLOC, | ||
33 | TRACE_KMEM_FREE, | ||
31 | TRACE_POWER, | 34 | TRACE_POWER, |
32 | 35 | ||
33 | __TRACE_LAST_TYPE, | 36 | __TRACE_LAST_TYPE, |
@@ -169,6 +172,24 @@ struct trace_power { | |||
169 | struct power_trace state_data; | 172 | struct power_trace state_data; |
170 | }; | 173 | }; |
171 | 174 | ||
175 | struct kmemtrace_alloc_entry { | ||
176 | struct trace_entry ent; | ||
177 | enum kmemtrace_type_id type_id; | ||
178 | unsigned long call_site; | ||
179 | const void *ptr; | ||
180 | size_t bytes_req; | ||
181 | size_t bytes_alloc; | ||
182 | gfp_t gfp_flags; | ||
183 | int node; | ||
184 | }; | ||
185 | |||
186 | struct kmemtrace_free_entry { | ||
187 | struct trace_entry ent; | ||
188 | enum kmemtrace_type_id type_id; | ||
189 | unsigned long call_site; | ||
190 | const void *ptr; | ||
191 | }; | ||
192 | |||
172 | /* | 193 | /* |
173 | * trace_flag_type is an enumeration that holds different | 194 | * trace_flag_type is an enumeration that holds different |
174 | * states when a trace occurs. These are: | 195 | * states when a trace occurs. These are: |
@@ -276,6 +297,10 @@ extern void __ftrace_bad_type(void); | |||
276 | TRACE_GRAPH_RET); \ | 297 | TRACE_GRAPH_RET); \ |
277 | IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\ | 298 | IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\ |
278 | IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \ | 299 | IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \ |
300 | IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \ | ||
301 | TRACE_KMEM_ALLOC); \ | ||
302 | IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \ | ||
303 | TRACE_KMEM_FREE); \ | ||
279 | __ftrace_bad_type(); \ | 304 | __ftrace_bad_type(); \ |
280 | } while (0) | 305 | } while (0) |
281 | 306 | ||
@@ -379,7 +404,7 @@ struct trace_iterator { | |||
379 | loff_t pos; | 404 | loff_t pos; |
380 | long idx; | 405 | long idx; |
381 | 406 | ||
382 | cpumask_t started; | 407 | cpumask_var_t started; |
383 | }; | 408 | }; |
384 | 409 | ||
385 | int tracing_is_enabled(void); | 410 | int tracing_is_enabled(void); |
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index cb2ff3e297b1..0e94b3d091f7 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c | |||
@@ -43,7 +43,7 @@ static int boot_trace_init(struct trace_array *tr) | |||
43 | int cpu; | 43 | int cpu; |
44 | boot_trace = tr; | 44 | boot_trace = tr; |
45 | 45 | ||
46 | for_each_cpu_mask(cpu, cpu_possible_map) | 46 | for_each_cpu(cpu, cpu_possible_mask) |
47 | tracing_reset(tr, cpu); | 47 | tracing_reset(tr, cpu); |
48 | 48 | ||
49 | tracing_sched_switch_assign_trace(tr); | 49 | tracing_sched_switch_assign_trace(tr); |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index f8ac5417afc8..3c545984816f 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -80,7 +80,7 @@ print_graph_cpu(struct trace_seq *s, int cpu) | |||
80 | int i; | 80 | int i; |
81 | int ret; | 81 | int ret; |
82 | int log10_this = log10_cpu(cpu); | 82 | int log10_this = log10_cpu(cpu); |
83 | int log10_all = log10_cpu(cpus_weight_nr(cpu_online_map)); | 83 | int log10_all = log10_cpu(cpumask_weight(cpu_online_mask)); |
84 | 84 | ||
85 | 85 | ||
86 | /* | 86 | /* |
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c index 879752b006b3..df21c1e72b95 100644 --- a/kernel/trace/trace_hw_branches.c +++ b/kernel/trace/trace_hw_branches.c | |||
@@ -47,7 +47,7 @@ static void bts_trace_start(struct trace_array *tr) | |||
47 | 47 | ||
48 | tracing_reset_online_cpus(tr); | 48 | tracing_reset_online_cpus(tr); |
49 | 49 | ||
50 | for_each_cpu_mask(cpu, cpu_possible_map) | 50 | for_each_cpu(cpu, cpu_possible_mask) |
51 | smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1); | 51 | smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1); |
52 | } | 52 | } |
53 | 53 | ||
@@ -63,7 +63,7 @@ static void bts_trace_stop(struct trace_array *tr) | |||
63 | { | 63 | { |
64 | int cpu; | 64 | int cpu; |
65 | 65 | ||
66 | for_each_cpu_mask(cpu, cpu_possible_map) | 66 | for_each_cpu(cpu, cpu_possible_mask) |
67 | smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1); | 67 | smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1); |
68 | } | 68 | } |
69 | 69 | ||
@@ -173,7 +173,7 @@ static void trace_bts_prepare(struct trace_iterator *iter) | |||
173 | { | 173 | { |
174 | int cpu; | 174 | int cpu; |
175 | 175 | ||
176 | for_each_cpu_mask(cpu, cpu_possible_map) | 176 | for_each_cpu(cpu, cpu_possible_mask) |
177 | smp_call_function_single(cpu, trace_bts_cpu, iter->tr, 1); | 177 | smp_call_function_single(cpu, trace_bts_cpu, iter->tr, 1); |
178 | } | 178 | } |
179 | 179 | ||
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c index b9b13c39b4bb..faa6ab7a1f5c 100644 --- a/kernel/trace/trace_power.c +++ b/kernel/trace/trace_power.c | |||
@@ -40,7 +40,7 @@ static int power_trace_init(struct trace_array *tr) | |||
40 | 40 | ||
41 | trace_power_enabled = 1; | 41 | trace_power_enabled = 1; |
42 | 42 | ||
43 | for_each_cpu_mask(cpu, cpu_possible_map) | 43 | for_each_cpu(cpu, cpu_possible_mask) |
44 | tracing_reset(tr, cpu); | 44 | tracing_reset(tr, cpu); |
45 | return 0; | 45 | return 0; |
46 | } | 46 | } |
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c index 01becf1f19ff..eaca5ad803ff 100644 --- a/kernel/trace/trace_sysprof.c +++ b/kernel/trace/trace_sysprof.c | |||
@@ -196,27 +196,19 @@ static enum hrtimer_restart stack_trace_timer_fn(struct hrtimer *hrtimer) | |||
196 | return HRTIMER_RESTART; | 196 | return HRTIMER_RESTART; |
197 | } | 197 | } |
198 | 198 | ||
199 | static void start_stack_timer(int cpu) | 199 | static void start_stack_timer(void *unused) |
200 | { | 200 | { |
201 | struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu); | 201 | struct hrtimer *hrtimer = &__get_cpu_var(stack_trace_hrtimer); |
202 | 202 | ||
203 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 203 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
204 | hrtimer->function = stack_trace_timer_fn; | 204 | hrtimer->function = stack_trace_timer_fn; |
205 | hrtimer->cb_mode = HRTIMER_CB_IRQSAFE_PERCPU; | ||
206 | 205 | ||
207 | hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL); | 206 | hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL); |
208 | } | 207 | } |
209 | 208 | ||
210 | static void start_stack_timers(void) | 209 | static void start_stack_timers(void) |
211 | { | 210 | { |
212 | cpumask_t saved_mask = current->cpus_allowed; | 211 | on_each_cpu(start_stack_timer, NULL, 1); |
213 | int cpu; | ||
214 | |||
215 | for_each_online_cpu(cpu) { | ||
216 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | ||
217 | start_stack_timer(cpu); | ||
218 | } | ||
219 | set_cpus_allowed_ptr(current, &saved_mask); | ||
220 | } | 212 | } |
221 | 213 | ||
222 | static void stop_stack_timer(int cpu) | 214 | static void stop_stack_timer(int cpu) |