aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/Kconfig17
-rw-r--r--kernel/trace/Makefile2
-rw-r--r--kernel/trace/ftrace.c15
-rw-r--r--kernel/trace/ring_buffer.c103
-rw-r--r--kernel/trace/trace.c156
-rw-r--r--kernel/trace/trace.h20
-rw-r--r--kernel/trace/trace_boot.c14
-rw-r--r--kernel/trace/trace_bts.c276
-rw-r--r--kernel/trace/trace_functions.c14
-rw-r--r--kernel/trace/trace_functions_graph.c68
-rw-r--r--kernel/trace/trace_hw_branches.c195
-rw-r--r--kernel/trace/trace_mmiotrace.c6
-rw-r--r--kernel/trace/trace_power.c2
-rw-r--r--kernel/trace/trace_sched_switch.c17
-rw-r--r--kernel/trace/trace_sched_wakeup.c2
-rw-r--r--kernel/trace/trace_stack.c49
-rw-r--r--kernel/trace/trace_sysprof.c26
17 files changed, 533 insertions, 449 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index bde6f03512d5..e2a4ff6fc3a6 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -244,16 +244,21 @@ config STACK_TRACER
244 244
245 This tracer works by hooking into every function call that the 245 This tracer works by hooking into every function call that the
246 kernel executes, and keeping a maximum stack depth value and 246 kernel executes, and keeping a maximum stack depth value and
247 stack-trace saved. Because this logic has to execute in every 247 stack-trace saved. If this is configured with DYNAMIC_FTRACE
248 kernel function, all the time, this option can slow down the 248 then it will not have any overhead while the stack tracer
249 kernel measurably and is generally intended for kernel 249 is disabled.
250 developers only. 250
251 To enable the stack tracer on bootup, pass in 'stacktrace'
252 on the kernel command line.
253
254 The stack tracer can also be enabled or disabled via the
255 sysctl kernel.stack_tracer_enabled
251 256
252 Say N if unsure. 257 Say N if unsure.
253 258
254config BTS_TRACER 259config HW_BRANCH_TRACER
255 depends on HAVE_HW_BRANCH_TRACER 260 depends on HAVE_HW_BRANCH_TRACER
256 bool "Trace branches" 261 bool "Trace hw branches"
257 select TRACING 262 select TRACING
258 help 263 help
259 This tracer records all branches on the system in a circular 264 This tracer records all branches on the system in a circular
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 62dc561b6676..349d5a93653f 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -31,7 +31,7 @@ obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
31obj-$(CONFIG_BOOT_TRACER) += trace_boot.o 31obj-$(CONFIG_BOOT_TRACER) += trace_boot.o
32obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o 32obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o
33obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o 33obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o
34obj-$(CONFIG_BTS_TRACER) += trace_bts.o 34obj-$(CONFIG_HW_BRANCH_TRACER) += trace_hw_branches.o
35obj-$(CONFIG_POWER_TRACER) += trace_power.o 35obj-$(CONFIG_POWER_TRACER) += trace_power.o
36 36
37libftrace-y := ftrace.o 37libftrace-y := ftrace.o
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index a12f80efceaa..2f32969c09df 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1047,6 +1047,13 @@ ftrace_match(unsigned char *buff, int len, int enable)
1047 int type = MATCH_FULL; 1047 int type = MATCH_FULL;
1048 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; 1048 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1049 unsigned i, match = 0, search_len = 0; 1049 unsigned i, match = 0, search_len = 0;
1050 int not = 0;
1051
1052 if (buff[0] == '!') {
1053 not = 1;
1054 buff++;
1055 len--;
1056 }
1050 1057
1051 for (i = 0; i < len; i++) { 1058 for (i = 0; i < len; i++) {
1052 if (buff[i] == '*') { 1059 if (buff[i] == '*') {
@@ -1100,8 +1107,12 @@ ftrace_match(unsigned char *buff, int len, int enable)
1100 matched = 1; 1107 matched = 1;
1101 break; 1108 break;
1102 } 1109 }
1103 if (matched) 1110 if (matched) {
1104 rec->flags |= flag; 1111 if (not)
1112 rec->flags &= ~flag;
1113 else
1114 rec->flags |= flag;
1115 }
1105 } 1116 }
1106 pg = pg->next; 1117 pg = pg->next;
1107 } 1118 }
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 7f69cfeaadf7..a9d9760dc7b6 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -69,6 +69,7 @@ void tracing_on(void)
69{ 69{
70 set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags); 70 set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
71} 71}
72EXPORT_SYMBOL_GPL(tracing_on);
72 73
73/** 74/**
74 * tracing_off - turn off all tracing buffers 75 * tracing_off - turn off all tracing buffers
@@ -82,6 +83,7 @@ void tracing_off(void)
82{ 83{
83 clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags); 84 clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
84} 85}
86EXPORT_SYMBOL_GPL(tracing_off);
85 87
86/** 88/**
87 * tracing_off_permanent - permanently disable ring buffers 89 * tracing_off_permanent - permanently disable ring buffers
@@ -107,16 +109,18 @@ u64 ring_buffer_time_stamp(int cpu)
107 preempt_disable_notrace(); 109 preempt_disable_notrace();
108 /* shift to debug/test normalization and TIME_EXTENTS */ 110 /* shift to debug/test normalization and TIME_EXTENTS */
109 time = sched_clock() << DEBUG_SHIFT; 111 time = sched_clock() << DEBUG_SHIFT;
110 preempt_enable_notrace(); 112 preempt_enable_no_resched_notrace();
111 113
112 return time; 114 return time;
113} 115}
116EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
114 117
115void ring_buffer_normalize_time_stamp(int cpu, u64 *ts) 118void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
116{ 119{
117 /* Just stupid testing the normalize function and deltas */ 120 /* Just stupid testing the normalize function and deltas */
118 *ts >>= DEBUG_SHIFT; 121 *ts >>= DEBUG_SHIFT;
119} 122}
123EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
120 124
121#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event)) 125#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
122#define RB_ALIGNMENT_SHIFT 2 126#define RB_ALIGNMENT_SHIFT 2
@@ -166,6 +170,7 @@ unsigned ring_buffer_event_length(struct ring_buffer_event *event)
166{ 170{
167 return rb_event_length(event); 171 return rb_event_length(event);
168} 172}
173EXPORT_SYMBOL_GPL(ring_buffer_event_length);
169 174
170/* inline for ring buffer fast paths */ 175/* inline for ring buffer fast paths */
171static inline void * 176static inline void *
@@ -187,9 +192,10 @@ void *ring_buffer_event_data(struct ring_buffer_event *event)
187{ 192{
188 return rb_event_data(event); 193 return rb_event_data(event);
189} 194}
195EXPORT_SYMBOL_GPL(ring_buffer_event_data);
190 196
191#define for_each_buffer_cpu(buffer, cpu) \ 197#define for_each_buffer_cpu(buffer, cpu) \
192 for_each_cpu_mask(cpu, buffer->cpumask) 198 for_each_cpu(cpu, buffer->cpumask)
193 199
194#define TS_SHIFT 27 200#define TS_SHIFT 27
195#define TS_MASK ((1ULL << TS_SHIFT) - 1) 201#define TS_MASK ((1ULL << TS_SHIFT) - 1)
@@ -258,11 +264,10 @@ struct ring_buffer_per_cpu {
258}; 264};
259 265
260struct ring_buffer { 266struct ring_buffer {
261 unsigned long size;
262 unsigned pages; 267 unsigned pages;
263 unsigned flags; 268 unsigned flags;
264 int cpus; 269 int cpus;
265 cpumask_t cpumask; 270 cpumask_var_t cpumask;
266 atomic_t record_disabled; 271 atomic_t record_disabled;
267 272
268 struct mutex mutex; 273 struct mutex mutex;
@@ -428,7 +433,7 @@ extern int ring_buffer_page_too_big(void);
428 433
429/** 434/**
430 * ring_buffer_alloc - allocate a new ring_buffer 435 * ring_buffer_alloc - allocate a new ring_buffer
431 * @size: the size in bytes that is needed. 436 * @size: the size in bytes per cpu that is needed.
432 * @flags: attributes to set for the ring buffer. 437 * @flags: attributes to set for the ring buffer.
433 * 438 *
434 * Currently the only flag that is available is the RB_FL_OVERWRITE 439 * Currently the only flag that is available is the RB_FL_OVERWRITE
@@ -453,6 +458,9 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
453 if (!buffer) 458 if (!buffer)
454 return NULL; 459 return NULL;
455 460
461 if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
462 goto fail_free_buffer;
463
456 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 464 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
457 buffer->flags = flags; 465 buffer->flags = flags;
458 466
@@ -460,14 +468,14 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
460 if (buffer->pages == 1) 468 if (buffer->pages == 1)
461 buffer->pages++; 469 buffer->pages++;
462 470
463 buffer->cpumask = cpu_possible_map; 471 cpumask_copy(buffer->cpumask, cpu_possible_mask);
464 buffer->cpus = nr_cpu_ids; 472 buffer->cpus = nr_cpu_ids;
465 473
466 bsize = sizeof(void *) * nr_cpu_ids; 474 bsize = sizeof(void *) * nr_cpu_ids;
467 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), 475 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
468 GFP_KERNEL); 476 GFP_KERNEL);
469 if (!buffer->buffers) 477 if (!buffer->buffers)
470 goto fail_free_buffer; 478 goto fail_free_cpumask;
471 479
472 for_each_buffer_cpu(buffer, cpu) { 480 for_each_buffer_cpu(buffer, cpu) {
473 buffer->buffers[cpu] = 481 buffer->buffers[cpu] =
@@ -487,10 +495,14 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
487 } 495 }
488 kfree(buffer->buffers); 496 kfree(buffer->buffers);
489 497
498 fail_free_cpumask:
499 free_cpumask_var(buffer->cpumask);
500
490 fail_free_buffer: 501 fail_free_buffer:
491 kfree(buffer); 502 kfree(buffer);
492 return NULL; 503 return NULL;
493} 504}
505EXPORT_SYMBOL_GPL(ring_buffer_alloc);
494 506
495/** 507/**
496 * ring_buffer_free - free a ring buffer. 508 * ring_buffer_free - free a ring buffer.
@@ -504,8 +516,11 @@ ring_buffer_free(struct ring_buffer *buffer)
504 for_each_buffer_cpu(buffer, cpu) 516 for_each_buffer_cpu(buffer, cpu)
505 rb_free_cpu_buffer(buffer->buffers[cpu]); 517 rb_free_cpu_buffer(buffer->buffers[cpu]);
506 518
519 free_cpumask_var(buffer->cpumask);
520
507 kfree(buffer); 521 kfree(buffer);
508} 522}
523EXPORT_SYMBOL_GPL(ring_buffer_free);
509 524
510static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); 525static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
511 526
@@ -681,6 +696,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
681 mutex_unlock(&buffer->mutex); 696 mutex_unlock(&buffer->mutex);
682 return -ENOMEM; 697 return -ENOMEM;
683} 698}
699EXPORT_SYMBOL_GPL(ring_buffer_resize);
684 700
685static inline int rb_null_event(struct ring_buffer_event *event) 701static inline int rb_null_event(struct ring_buffer_event *event)
686{ 702{
@@ -839,6 +855,7 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
839 * back to us). This allows us to do a simple loop to 855 * back to us). This allows us to do a simple loop to
840 * assign the commit to the tail. 856 * assign the commit to the tail.
841 */ 857 */
858 again:
842 while (cpu_buffer->commit_page != cpu_buffer->tail_page) { 859 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
843 cpu_buffer->commit_page->page->commit = 860 cpu_buffer->commit_page->page->commit =
844 cpu_buffer->commit_page->write; 861 cpu_buffer->commit_page->write;
@@ -854,6 +871,17 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
854 cpu_buffer->commit_page->write; 871 cpu_buffer->commit_page->write;
855 barrier(); 872 barrier();
856 } 873 }
874
875 /* again, keep gcc from optimizing */
876 barrier();
877
878 /*
879 * If an interrupt came in just after the first while loop
880 * and pushed the tail page forward, we will be left with
881 * a dangling commit that will never go forward.
882 */
883 if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
884 goto again;
857} 885}
858 886
859static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) 887static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
@@ -951,12 +979,15 @@ static struct ring_buffer_event *
951__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, 979__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
952 unsigned type, unsigned long length, u64 *ts) 980 unsigned type, unsigned long length, u64 *ts)
953{ 981{
954 struct buffer_page *tail_page, *head_page, *reader_page; 982 struct buffer_page *tail_page, *head_page, *reader_page, *commit_page;
955 unsigned long tail, write; 983 unsigned long tail, write;
956 struct ring_buffer *buffer = cpu_buffer->buffer; 984 struct ring_buffer *buffer = cpu_buffer->buffer;
957 struct ring_buffer_event *event; 985 struct ring_buffer_event *event;
958 unsigned long flags; 986 unsigned long flags;
959 987
988 commit_page = cpu_buffer->commit_page;
989 /* we just need to protect against interrupts */
990 barrier();
960 tail_page = cpu_buffer->tail_page; 991 tail_page = cpu_buffer->tail_page;
961 write = local_add_return(length, &tail_page->write); 992 write = local_add_return(length, &tail_page->write);
962 tail = write - length; 993 tail = write - length;
@@ -982,7 +1013,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
982 * it all the way around the buffer, bail, and warn 1013 * it all the way around the buffer, bail, and warn
983 * about it. 1014 * about it.
984 */ 1015 */
985 if (unlikely(next_page == cpu_buffer->commit_page)) { 1016 if (unlikely(next_page == commit_page)) {
986 WARN_ON_ONCE(1); 1017 WARN_ON_ONCE(1);
987 goto out_unlock; 1018 goto out_unlock;
988 } 1019 }
@@ -1260,7 +1291,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
1260 1291
1261 cpu = raw_smp_processor_id(); 1292 cpu = raw_smp_processor_id();
1262 1293
1263 if (!cpu_isset(cpu, buffer->cpumask)) 1294 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1264 goto out; 1295 goto out;
1265 1296
1266 cpu_buffer = buffer->buffers[cpu]; 1297 cpu_buffer = buffer->buffers[cpu];
@@ -1290,6 +1321,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
1290 ftrace_preempt_enable(resched); 1321 ftrace_preempt_enable(resched);
1291 return NULL; 1322 return NULL;
1292} 1323}
1324EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
1293 1325
1294static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, 1326static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1295 struct ring_buffer_event *event) 1327 struct ring_buffer_event *event)
@@ -1336,6 +1368,7 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1336 1368
1337 return 0; 1369 return 0;
1338} 1370}
1371EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
1339 1372
1340/** 1373/**
1341 * ring_buffer_write - write data to the buffer without reserving 1374 * ring_buffer_write - write data to the buffer without reserving
@@ -1371,7 +1404,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
1371 1404
1372 cpu = raw_smp_processor_id(); 1405 cpu = raw_smp_processor_id();
1373 1406
1374 if (!cpu_isset(cpu, buffer->cpumask)) 1407 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1375 goto out; 1408 goto out;
1376 1409
1377 cpu_buffer = buffer->buffers[cpu]; 1410 cpu_buffer = buffer->buffers[cpu];
@@ -1397,6 +1430,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
1397 1430
1398 return ret; 1431 return ret;
1399} 1432}
1433EXPORT_SYMBOL_GPL(ring_buffer_write);
1400 1434
1401static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) 1435static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1402{ 1436{
@@ -1423,6 +1457,7 @@ void ring_buffer_record_disable(struct ring_buffer *buffer)
1423{ 1457{
1424 atomic_inc(&buffer->record_disabled); 1458 atomic_inc(&buffer->record_disabled);
1425} 1459}
1460EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
1426 1461
1427/** 1462/**
1428 * ring_buffer_record_enable - enable writes to the buffer 1463 * ring_buffer_record_enable - enable writes to the buffer
@@ -1435,6 +1470,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer)
1435{ 1470{
1436 atomic_dec(&buffer->record_disabled); 1471 atomic_dec(&buffer->record_disabled);
1437} 1472}
1473EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
1438 1474
1439/** 1475/**
1440 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer 1476 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
@@ -1450,12 +1486,13 @@ void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1450{ 1486{
1451 struct ring_buffer_per_cpu *cpu_buffer; 1487 struct ring_buffer_per_cpu *cpu_buffer;
1452 1488
1453 if (!cpu_isset(cpu, buffer->cpumask)) 1489 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1454 return; 1490 return;
1455 1491
1456 cpu_buffer = buffer->buffers[cpu]; 1492 cpu_buffer = buffer->buffers[cpu];
1457 atomic_inc(&cpu_buffer->record_disabled); 1493 atomic_inc(&cpu_buffer->record_disabled);
1458} 1494}
1495EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
1459 1496
1460/** 1497/**
1461 * ring_buffer_record_enable_cpu - enable writes to the buffer 1498 * ring_buffer_record_enable_cpu - enable writes to the buffer
@@ -1469,12 +1506,13 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1469{ 1506{
1470 struct ring_buffer_per_cpu *cpu_buffer; 1507 struct ring_buffer_per_cpu *cpu_buffer;
1471 1508
1472 if (!cpu_isset(cpu, buffer->cpumask)) 1509 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1473 return; 1510 return;
1474 1511
1475 cpu_buffer = buffer->buffers[cpu]; 1512 cpu_buffer = buffer->buffers[cpu];
1476 atomic_dec(&cpu_buffer->record_disabled); 1513 atomic_dec(&cpu_buffer->record_disabled);
1477} 1514}
1515EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
1478 1516
1479/** 1517/**
1480 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer 1518 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
@@ -1485,12 +1523,13 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1485{ 1523{
1486 struct ring_buffer_per_cpu *cpu_buffer; 1524 struct ring_buffer_per_cpu *cpu_buffer;
1487 1525
1488 if (!cpu_isset(cpu, buffer->cpumask)) 1526 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1489 return 0; 1527 return 0;
1490 1528
1491 cpu_buffer = buffer->buffers[cpu]; 1529 cpu_buffer = buffer->buffers[cpu];
1492 return cpu_buffer->entries; 1530 return cpu_buffer->entries;
1493} 1531}
1532EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
1494 1533
1495/** 1534/**
1496 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer 1535 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
@@ -1501,12 +1540,13 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1501{ 1540{
1502 struct ring_buffer_per_cpu *cpu_buffer; 1541 struct ring_buffer_per_cpu *cpu_buffer;
1503 1542
1504 if (!cpu_isset(cpu, buffer->cpumask)) 1543 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1505 return 0; 1544 return 0;
1506 1545
1507 cpu_buffer = buffer->buffers[cpu]; 1546 cpu_buffer = buffer->buffers[cpu];
1508 return cpu_buffer->overrun; 1547 return cpu_buffer->overrun;
1509} 1548}
1549EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
1510 1550
1511/** 1551/**
1512 * ring_buffer_entries - get the number of entries in a buffer 1552 * ring_buffer_entries - get the number of entries in a buffer
@@ -1529,6 +1569,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer)
1529 1569
1530 return entries; 1570 return entries;
1531} 1571}
1572EXPORT_SYMBOL_GPL(ring_buffer_entries);
1532 1573
1533/** 1574/**
1534 * ring_buffer_overrun_cpu - get the number of overruns in buffer 1575 * ring_buffer_overrun_cpu - get the number of overruns in buffer
@@ -1551,6 +1592,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1551 1592
1552 return overruns; 1593 return overruns;
1553} 1594}
1595EXPORT_SYMBOL_GPL(ring_buffer_overruns);
1554 1596
1555static void rb_iter_reset(struct ring_buffer_iter *iter) 1597static void rb_iter_reset(struct ring_buffer_iter *iter)
1556{ 1598{
@@ -1586,6 +1628,7 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1586 rb_iter_reset(iter); 1628 rb_iter_reset(iter);
1587 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 1629 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1588} 1630}
1631EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
1589 1632
1590/** 1633/**
1591 * ring_buffer_iter_empty - check if an iterator has no more to read 1634 * ring_buffer_iter_empty - check if an iterator has no more to read
@@ -1600,6 +1643,7 @@ int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
1600 return iter->head_page == cpu_buffer->commit_page && 1643 return iter->head_page == cpu_buffer->commit_page &&
1601 iter->head == rb_commit_index(cpu_buffer); 1644 iter->head == rb_commit_index(cpu_buffer);
1602} 1645}
1646EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
1603 1647
1604static void 1648static void
1605rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, 1649rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
@@ -1814,7 +1858,7 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1814 struct buffer_page *reader; 1858 struct buffer_page *reader;
1815 int nr_loops = 0; 1859 int nr_loops = 0;
1816 1860
1817 if (!cpu_isset(cpu, buffer->cpumask)) 1861 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1818 return NULL; 1862 return NULL;
1819 1863
1820 cpu_buffer = buffer->buffers[cpu]; 1864 cpu_buffer = buffer->buffers[cpu];
@@ -1866,6 +1910,7 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1866 1910
1867 return NULL; 1911 return NULL;
1868} 1912}
1913EXPORT_SYMBOL_GPL(ring_buffer_peek);
1869 1914
1870static struct ring_buffer_event * 1915static struct ring_buffer_event *
1871rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) 1916rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
@@ -1926,6 +1971,7 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1926 1971
1927 return NULL; 1972 return NULL;
1928} 1973}
1974EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
1929 1975
1930/** 1976/**
1931 * ring_buffer_peek - peek at the next event to be read 1977 * ring_buffer_peek - peek at the next event to be read
@@ -1987,7 +2033,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
1987 struct ring_buffer_event *event; 2033 struct ring_buffer_event *event;
1988 unsigned long flags; 2034 unsigned long flags;
1989 2035
1990 if (!cpu_isset(cpu, buffer->cpumask)) 2036 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1991 return NULL; 2037 return NULL;
1992 2038
1993 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2039 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
@@ -2003,6 +2049,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
2003 2049
2004 return event; 2050 return event;
2005} 2051}
2052EXPORT_SYMBOL_GPL(ring_buffer_consume);
2006 2053
2007/** 2054/**
2008 * ring_buffer_read_start - start a non consuming read of the buffer 2055 * ring_buffer_read_start - start a non consuming read of the buffer
@@ -2023,7 +2070,7 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
2023 struct ring_buffer_iter *iter; 2070 struct ring_buffer_iter *iter;
2024 unsigned long flags; 2071 unsigned long flags;
2025 2072
2026 if (!cpu_isset(cpu, buffer->cpumask)) 2073 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2027 return NULL; 2074 return NULL;
2028 2075
2029 iter = kmalloc(sizeof(*iter), GFP_KERNEL); 2076 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
@@ -2045,6 +2092,7 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
2045 2092
2046 return iter; 2093 return iter;
2047} 2094}
2095EXPORT_SYMBOL_GPL(ring_buffer_read_start);
2048 2096
2049/** 2097/**
2050 * ring_buffer_finish - finish reading the iterator of the buffer 2098 * ring_buffer_finish - finish reading the iterator of the buffer
@@ -2061,6 +2109,7 @@ ring_buffer_read_finish(struct ring_buffer_iter *iter)
2061 atomic_dec(&cpu_buffer->record_disabled); 2109 atomic_dec(&cpu_buffer->record_disabled);
2062 kfree(iter); 2110 kfree(iter);
2063} 2111}
2112EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
2064 2113
2065/** 2114/**
2066 * ring_buffer_read - read the next item in the ring buffer by the iterator 2115 * ring_buffer_read - read the next item in the ring buffer by the iterator
@@ -2087,6 +2136,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
2087 2136
2088 return event; 2137 return event;
2089} 2138}
2139EXPORT_SYMBOL_GPL(ring_buffer_read);
2090 2140
2091/** 2141/**
2092 * ring_buffer_size - return the size of the ring buffer (in bytes) 2142 * ring_buffer_size - return the size of the ring buffer (in bytes)
@@ -2096,6 +2146,7 @@ unsigned long ring_buffer_size(struct ring_buffer *buffer)
2096{ 2146{
2097 return BUF_PAGE_SIZE * buffer->pages; 2147 return BUF_PAGE_SIZE * buffer->pages;
2098} 2148}
2149EXPORT_SYMBOL_GPL(ring_buffer_size);
2099 2150
2100static void 2151static void
2101rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) 2152rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
@@ -2129,7 +2180,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2129 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 2180 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2130 unsigned long flags; 2181 unsigned long flags;
2131 2182
2132 if (!cpu_isset(cpu, buffer->cpumask)) 2183 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2133 return; 2184 return;
2134 2185
2135 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2186 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
@@ -2142,6 +2193,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2142 2193
2143 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2194 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2144} 2195}
2196EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
2145 2197
2146/** 2198/**
2147 * ring_buffer_reset - reset a ring buffer 2199 * ring_buffer_reset - reset a ring buffer
@@ -2154,6 +2206,7 @@ void ring_buffer_reset(struct ring_buffer *buffer)
2154 for_each_buffer_cpu(buffer, cpu) 2206 for_each_buffer_cpu(buffer, cpu)
2155 ring_buffer_reset_cpu(buffer, cpu); 2207 ring_buffer_reset_cpu(buffer, cpu);
2156} 2208}
2209EXPORT_SYMBOL_GPL(ring_buffer_reset);
2157 2210
2158/** 2211/**
2159 * rind_buffer_empty - is the ring buffer empty? 2212 * rind_buffer_empty - is the ring buffer empty?
@@ -2172,6 +2225,7 @@ int ring_buffer_empty(struct ring_buffer *buffer)
2172 } 2225 }
2173 return 1; 2226 return 1;
2174} 2227}
2228EXPORT_SYMBOL_GPL(ring_buffer_empty);
2175 2229
2176/** 2230/**
2177 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? 2231 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
@@ -2182,12 +2236,13 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2182{ 2236{
2183 struct ring_buffer_per_cpu *cpu_buffer; 2237 struct ring_buffer_per_cpu *cpu_buffer;
2184 2238
2185 if (!cpu_isset(cpu, buffer->cpumask)) 2239 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2186 return 1; 2240 return 1;
2187 2241
2188 cpu_buffer = buffer->buffers[cpu]; 2242 cpu_buffer = buffer->buffers[cpu];
2189 return rb_per_cpu_empty(cpu_buffer); 2243 return rb_per_cpu_empty(cpu_buffer);
2190} 2244}
2245EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
2191 2246
2192/** 2247/**
2193 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers 2248 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
@@ -2205,13 +2260,12 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2205 struct ring_buffer_per_cpu *cpu_buffer_a; 2260 struct ring_buffer_per_cpu *cpu_buffer_a;
2206 struct ring_buffer_per_cpu *cpu_buffer_b; 2261 struct ring_buffer_per_cpu *cpu_buffer_b;
2207 2262
2208 if (!cpu_isset(cpu, buffer_a->cpumask) || 2263 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
2209 !cpu_isset(cpu, buffer_b->cpumask)) 2264 !cpumask_test_cpu(cpu, buffer_b->cpumask))
2210 return -EINVAL; 2265 return -EINVAL;
2211 2266
2212 /* At least make sure the two buffers are somewhat the same */ 2267 /* At least make sure the two buffers are somewhat the same */
2213 if (buffer_a->size != buffer_b->size || 2268 if (buffer_a->pages != buffer_b->pages)
2214 buffer_a->pages != buffer_b->pages)
2215 return -EINVAL; 2269 return -EINVAL;
2216 2270
2217 cpu_buffer_a = buffer_a->buffers[cpu]; 2271 cpu_buffer_a = buffer_a->buffers[cpu];
@@ -2237,6 +2291,7 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2237 2291
2238 return 0; 2292 return 0;
2239} 2293}
2294EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
2240 2295
2241static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer, 2296static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
2242 struct buffer_data_page *bpage) 2297 struct buffer_data_page *bpage)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 6adf660fc816..c580233add95 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -30,7 +30,6 @@
30#include <linux/gfp.h> 30#include <linux/gfp.h>
31#include <linux/fs.h> 31#include <linux/fs.h>
32#include <linux/kprobes.h> 32#include <linux/kprobes.h>
33#include <linux/seq_file.h>
34#include <linux/writeback.h> 33#include <linux/writeback.h>
35 34
36#include <linux/stacktrace.h> 35#include <linux/stacktrace.h>
@@ -90,10 +89,10 @@ static inline void ftrace_enable_cpu(void)
90 preempt_enable(); 89 preempt_enable();
91} 90}
92 91
93static cpumask_t __read_mostly tracing_buffer_mask; 92static cpumask_var_t __read_mostly tracing_buffer_mask;
94 93
95#define for_each_tracing_cpu(cpu) \ 94#define for_each_tracing_cpu(cpu) \
96 for_each_cpu_mask(cpu, tracing_buffer_mask) 95 for_each_cpu(cpu, tracing_buffer_mask)
97 96
98/* 97/*
99 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops 98 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
@@ -287,6 +286,7 @@ static const char *trace_options[] = {
287 "annotate", 286 "annotate",
288 "userstacktrace", 287 "userstacktrace",
289 "sym-userobj", 288 "sym-userobj",
289 "printk-msg-only",
290 NULL 290 NULL
291}; 291};
292 292
@@ -320,7 +320,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
320 320
321 memcpy(data->comm, tsk->comm, TASK_COMM_LEN); 321 memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
322 data->pid = tsk->pid; 322 data->pid = tsk->pid;
323 data->uid = tsk->uid; 323 data->uid = task_uid(tsk);
324 data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; 324 data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
325 data->policy = tsk->policy; 325 data->policy = tsk->policy;
326 data->rt_priority = tsk->rt_priority; 326 data->rt_priority = tsk->rt_priority;
@@ -678,6 +678,16 @@ void tracing_reset(struct trace_array *tr, int cpu)
678 ftrace_enable_cpu(); 678 ftrace_enable_cpu();
679} 679}
680 680
681void tracing_reset_online_cpus(struct trace_array *tr)
682{
683 int cpu;
684
685 tr->time_start = ftrace_now(tr->cpu);
686
687 for_each_online_cpu(cpu)
688 tracing_reset(tr, cpu);
689}
690
681#define SAVED_CMDLINES 128 691#define SAVED_CMDLINES 128
682static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; 692static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
683static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; 693static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
@@ -1299,7 +1309,7 @@ enum trace_file_type {
1299 TRACE_FILE_ANNOTATE = 2, 1309 TRACE_FILE_ANNOTATE = 2,
1300}; 1310};
1301 1311
1302static void trace_iterator_increment(struct trace_iterator *iter, int cpu) 1312static void trace_iterator_increment(struct trace_iterator *iter)
1303{ 1313{
1304 /* Don't allow ftrace to trace into the ring buffers */ 1314 /* Don't allow ftrace to trace into the ring buffers */
1305 ftrace_disable_cpu(); 1315 ftrace_disable_cpu();
@@ -1378,7 +1388,7 @@ static void *find_next_entry_inc(struct trace_iterator *iter)
1378 iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts); 1388 iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts);
1379 1389
1380 if (iter->ent) 1390 if (iter->ent)
1381 trace_iterator_increment(iter, iter->cpu); 1391 trace_iterator_increment(iter);
1382 1392
1383 return iter->ent ? iter : NULL; 1393 return iter->ent ? iter : NULL;
1384} 1394}
@@ -1747,6 +1757,13 @@ lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
1747 1757
1748static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; 1758static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
1749 1759
1760static int task_state_char(unsigned long state)
1761{
1762 int bit = state ? __ffs(state) + 1 : 0;
1763
1764 return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
1765}
1766
1750/* 1767/*
1751 * The message is supposed to contain an ending newline. 1768 * The message is supposed to contain an ending newline.
1752 * If the printing stops prematurely, try to add a newline of our own. 1769 * If the printing stops prematurely, try to add a newline of our own.
@@ -1794,10 +1811,10 @@ static void test_cpu_buff_start(struct trace_iterator *iter)
1794 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) 1811 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
1795 return; 1812 return;
1796 1813
1797 if (cpu_isset(iter->cpu, iter->started)) 1814 if (cpumask_test_cpu(iter->cpu, iter->started))
1798 return; 1815 return;
1799 1816
1800 cpu_set(iter->cpu, iter->started); 1817 cpumask_set_cpu(iter->cpu, iter->started);
1801 trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu); 1818 trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu);
1802} 1819}
1803 1820
@@ -1815,7 +1832,6 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1815 char *comm; 1832 char *comm;
1816 int S, T; 1833 int S, T;
1817 int i; 1834 int i;
1818 unsigned state;
1819 1835
1820 if (entry->type == TRACE_CONT) 1836 if (entry->type == TRACE_CONT)
1821 return TRACE_TYPE_HANDLED; 1837 return TRACE_TYPE_HANDLED;
@@ -1861,12 +1877,8 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1861 1877
1862 trace_assign_type(field, entry); 1878 trace_assign_type(field, entry);
1863 1879
1864 T = field->next_state < sizeof(state_to_char) ? 1880 T = task_state_char(field->next_state);
1865 state_to_char[field->next_state] : 'X'; 1881 S = task_state_char(field->prev_state);
1866
1867 state = field->prev_state ?
1868 __ffs(field->prev_state) + 1 : 0;
1869 S = state < sizeof(state_to_char) - 1 ? state_to_char[state] : 'X';
1870 comm = trace_find_cmdline(field->next_pid); 1882 comm = trace_find_cmdline(field->next_pid);
1871 trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n", 1883 trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
1872 field->prev_pid, 1884 field->prev_pid,
@@ -2007,10 +2019,8 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2007 2019
2008 trace_assign_type(field, entry); 2020 trace_assign_type(field, entry);
2009 2021
2010 S = field->prev_state < sizeof(state_to_char) ? 2022 T = task_state_char(field->next_state);
2011 state_to_char[field->prev_state] : 'X'; 2023 S = task_state_char(field->prev_state);
2012 T = field->next_state < sizeof(state_to_char) ?
2013 state_to_char[field->next_state] : 'X';
2014 ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n", 2024 ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n",
2015 field->prev_pid, 2025 field->prev_pid,
2016 field->prev_prio, 2026 field->prev_prio,
@@ -2140,12 +2150,9 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
2140 2150
2141 trace_assign_type(field, entry); 2151 trace_assign_type(field, entry);
2142 2152
2143 S = field->prev_state < sizeof(state_to_char) ? 2153 T = task_state_char(field->next_state);
2144 state_to_char[field->prev_state] : 'X'; 2154 S = entry->type == TRACE_WAKE ? '+' :
2145 T = field->next_state < sizeof(state_to_char) ? 2155 task_state_char(field->prev_state);
2146 state_to_char[field->next_state] : 'X';
2147 if (entry->type == TRACE_WAKE)
2148 S = '+';
2149 ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n", 2156 ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n",
2150 field->prev_pid, 2157 field->prev_pid,
2151 field->prev_prio, 2158 field->prev_prio,
@@ -2232,12 +2239,9 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2232 2239
2233 trace_assign_type(field, entry); 2240 trace_assign_type(field, entry);
2234 2241
2235 S = field->prev_state < sizeof(state_to_char) ? 2242 T = task_state_char(field->next_state);
2236 state_to_char[field->prev_state] : 'X'; 2243 S = entry->type == TRACE_WAKE ? '+' :
2237 T = field->next_state < sizeof(state_to_char) ? 2244 task_state_char(field->prev_state);
2238 state_to_char[field->next_state] : 'X';
2239 if (entry->type == TRACE_WAKE)
2240 S = '+';
2241 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid); 2245 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
2242 SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio); 2246 SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
2243 SEQ_PUT_HEX_FIELD_RET(s, S); 2247 SEQ_PUT_HEX_FIELD_RET(s, S);
@@ -2265,6 +2269,25 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2265 return TRACE_TYPE_HANDLED; 2269 return TRACE_TYPE_HANDLED;
2266} 2270}
2267 2271
2272static enum print_line_t print_printk_msg_only(struct trace_iterator *iter)
2273{
2274 struct trace_seq *s = &iter->seq;
2275 struct trace_entry *entry = iter->ent;
2276 struct print_entry *field;
2277 int ret;
2278
2279 trace_assign_type(field, entry);
2280
2281 ret = trace_seq_printf(s, field->buf);
2282 if (!ret)
2283 return TRACE_TYPE_PARTIAL_LINE;
2284
2285 if (entry->flags & TRACE_FLAG_CONT)
2286 trace_seq_print_cont(s, iter);
2287
2288 return TRACE_TYPE_HANDLED;
2289}
2290
2268static enum print_line_t print_bin_fmt(struct trace_iterator *iter) 2291static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2269{ 2292{
2270 struct trace_seq *s = &iter->seq; 2293 struct trace_seq *s = &iter->seq;
@@ -2345,6 +2368,11 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter)
2345 return ret; 2368 return ret;
2346 } 2369 }
2347 2370
2371 if (iter->ent->type == TRACE_PRINT &&
2372 trace_flags & TRACE_ITER_PRINTK &&
2373 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2374 return print_printk_msg_only(iter);
2375
2348 if (trace_flags & TRACE_ITER_BIN) 2376 if (trace_flags & TRACE_ITER_BIN)
2349 return print_bin_fmt(iter); 2377 return print_bin_fmt(iter);
2350 2378
@@ -2425,7 +2453,7 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
2425 2453
2426 /* Notify the tracer early; before we stop tracing. */ 2454 /* Notify the tracer early; before we stop tracing. */
2427 if (iter->trace && iter->trace->open) 2455 if (iter->trace && iter->trace->open)
2428 iter->trace->open(iter); 2456 iter->trace->open(iter);
2429 2457
2430 /* Annotate start of buffers if we had overruns */ 2458 /* Annotate start of buffers if we had overruns */
2431 if (ring_buffer_overruns(iter->tr->buffer)) 2459 if (ring_buffer_overruns(iter->tr->buffer))
@@ -2618,13 +2646,7 @@ static struct file_operations show_traces_fops = {
2618/* 2646/*
2619 * Only trace on a CPU if the bitmask is set: 2647 * Only trace on a CPU if the bitmask is set:
2620 */ 2648 */
2621static cpumask_t tracing_cpumask = CPU_MASK_ALL; 2649static cpumask_var_t tracing_cpumask;
2622
2623/*
2624 * When tracing/tracing_cpu_mask is modified then this holds
2625 * the new bitmask we are about to install:
2626 */
2627static cpumask_t tracing_cpumask_new;
2628 2650
2629/* 2651/*
2630 * The tracer itself will not take this lock, but still we want 2652 * The tracer itself will not take this lock, but still we want
@@ -2646,7 +2668,7 @@ tracing_cpumask_read(struct file *filp, char __user *ubuf,
2646 2668
2647 mutex_lock(&tracing_cpumask_update_lock); 2669 mutex_lock(&tracing_cpumask_update_lock);
2648 2670
2649 len = cpumask_scnprintf(mask_str, count, &tracing_cpumask); 2671 len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
2650 if (count - len < 2) { 2672 if (count - len < 2) {
2651 count = -EINVAL; 2673 count = -EINVAL;
2652 goto out_err; 2674 goto out_err;
@@ -2665,9 +2687,13 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2665 size_t count, loff_t *ppos) 2687 size_t count, loff_t *ppos)
2666{ 2688{
2667 int err, cpu; 2689 int err, cpu;
2690 cpumask_var_t tracing_cpumask_new;
2691
2692 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
2693 return -ENOMEM;
2668 2694
2669 mutex_lock(&tracing_cpumask_update_lock); 2695 mutex_lock(&tracing_cpumask_update_lock);
2670 err = cpumask_parse_user(ubuf, count, &tracing_cpumask_new); 2696 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
2671 if (err) 2697 if (err)
2672 goto err_unlock; 2698 goto err_unlock;
2673 2699
@@ -2678,26 +2704,28 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2678 * Increase/decrease the disabled counter if we are 2704 * Increase/decrease the disabled counter if we are
2679 * about to flip a bit in the cpumask: 2705 * about to flip a bit in the cpumask:
2680 */ 2706 */
2681 if (cpu_isset(cpu, tracing_cpumask) && 2707 if (cpumask_test_cpu(cpu, tracing_cpumask) &&
2682 !cpu_isset(cpu, tracing_cpumask_new)) { 2708 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
2683 atomic_inc(&global_trace.data[cpu]->disabled); 2709 atomic_inc(&global_trace.data[cpu]->disabled);
2684 } 2710 }
2685 if (!cpu_isset(cpu, tracing_cpumask) && 2711 if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
2686 cpu_isset(cpu, tracing_cpumask_new)) { 2712 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
2687 atomic_dec(&global_trace.data[cpu]->disabled); 2713 atomic_dec(&global_trace.data[cpu]->disabled);
2688 } 2714 }
2689 } 2715 }
2690 __raw_spin_unlock(&ftrace_max_lock); 2716 __raw_spin_unlock(&ftrace_max_lock);
2691 local_irq_enable(); 2717 local_irq_enable();
2692 2718
2693 tracing_cpumask = tracing_cpumask_new; 2719 cpumask_copy(tracing_cpumask, tracing_cpumask_new);
2694 2720
2695 mutex_unlock(&tracing_cpumask_update_lock); 2721 mutex_unlock(&tracing_cpumask_update_lock);
2722 free_cpumask_var(tracing_cpumask_new);
2696 2723
2697 return count; 2724 return count;
2698 2725
2699err_unlock: 2726err_unlock:
2700 mutex_unlock(&tracing_cpumask_update_lock); 2727 mutex_unlock(&tracing_cpumask_update_lock);
2728 free_cpumask_var(tracing_cpumask);
2701 2729
2702 return err; 2730 return err;
2703} 2731}
@@ -3086,10 +3114,15 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
3086 if (!iter) 3114 if (!iter)
3087 return -ENOMEM; 3115 return -ENOMEM;
3088 3116
3117 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
3118 kfree(iter);
3119 return -ENOMEM;
3120 }
3121
3089 mutex_lock(&trace_types_lock); 3122 mutex_lock(&trace_types_lock);
3090 3123
3091 /* trace pipe does not show start of buffer */ 3124 /* trace pipe does not show start of buffer */
3092 cpus_setall(iter->started); 3125 cpumask_setall(iter->started);
3093 3126
3094 iter->tr = &global_trace; 3127 iter->tr = &global_trace;
3095 iter->trace = current_trace; 3128 iter->trace = current_trace;
@@ -3106,6 +3139,7 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
3106{ 3139{
3107 struct trace_iterator *iter = file->private_data; 3140 struct trace_iterator *iter = file->private_data;
3108 3141
3142 free_cpumask_var(iter->started);
3109 kfree(iter); 3143 kfree(iter);
3110 atomic_dec(&tracing_reader); 3144 atomic_dec(&tracing_reader);
3111 3145
@@ -3724,7 +3758,6 @@ void ftrace_dump(void)
3724 static DEFINE_SPINLOCK(ftrace_dump_lock); 3758 static DEFINE_SPINLOCK(ftrace_dump_lock);
3725 /* use static because iter can be a bit big for the stack */ 3759 /* use static because iter can be a bit big for the stack */
3726 static struct trace_iterator iter; 3760 static struct trace_iterator iter;
3727 static cpumask_t mask;
3728 static int dump_ran; 3761 static int dump_ran;
3729 unsigned long flags; 3762 unsigned long flags;
3730 int cnt = 0, cpu; 3763 int cnt = 0, cpu;
@@ -3758,8 +3791,6 @@ void ftrace_dump(void)
3758 * and then release the locks again. 3791 * and then release the locks again.
3759 */ 3792 */
3760 3793
3761 cpus_clear(mask);
3762
3763 while (!trace_empty(&iter)) { 3794 while (!trace_empty(&iter)) {
3764 3795
3765 if (!cnt) 3796 if (!cnt)
@@ -3795,19 +3826,28 @@ __init static int tracer_alloc_buffers(void)
3795{ 3826{
3796 struct trace_array_cpu *data; 3827 struct trace_array_cpu *data;
3797 int i; 3828 int i;
3829 int ret = -ENOMEM;
3798 3830
3799 /* TODO: make the number of buffers hot pluggable with CPUS */ 3831 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
3800 tracing_buffer_mask = cpu_possible_map; 3832 goto out;
3833
3834 if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
3835 goto out_free_buffer_mask;
3801 3836
3837 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
3838 cpumask_copy(tracing_cpumask, cpu_all_mask);
3839
3840 /* TODO: make the number of buffers hot pluggable with CPUS */
3802 global_trace.buffer = ring_buffer_alloc(trace_buf_size, 3841 global_trace.buffer = ring_buffer_alloc(trace_buf_size,
3803 TRACE_BUFFER_FLAGS); 3842 TRACE_BUFFER_FLAGS);
3804 if (!global_trace.buffer) { 3843 if (!global_trace.buffer) {
3805 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); 3844 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
3806 WARN_ON(1); 3845 WARN_ON(1);
3807 return 0; 3846 goto out_free_cpumask;
3808 } 3847 }
3809 global_trace.entries = ring_buffer_size(global_trace.buffer); 3848 global_trace.entries = ring_buffer_size(global_trace.buffer);
3810 3849
3850
3811#ifdef CONFIG_TRACER_MAX_TRACE 3851#ifdef CONFIG_TRACER_MAX_TRACE
3812 max_tr.buffer = ring_buffer_alloc(trace_buf_size, 3852 max_tr.buffer = ring_buffer_alloc(trace_buf_size,
3813 TRACE_BUFFER_FLAGS); 3853 TRACE_BUFFER_FLAGS);
@@ -3815,7 +3855,7 @@ __init static int tracer_alloc_buffers(void)
3815 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); 3855 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
3816 WARN_ON(1); 3856 WARN_ON(1);
3817 ring_buffer_free(global_trace.buffer); 3857 ring_buffer_free(global_trace.buffer);
3818 return 0; 3858 goto out_free_cpumask;
3819 } 3859 }
3820 max_tr.entries = ring_buffer_size(max_tr.buffer); 3860 max_tr.entries = ring_buffer_size(max_tr.buffer);
3821 WARN_ON(max_tr.entries != global_trace.entries); 3861 WARN_ON(max_tr.entries != global_trace.entries);
@@ -3845,8 +3885,14 @@ __init static int tracer_alloc_buffers(void)
3845 &trace_panic_notifier); 3885 &trace_panic_notifier);
3846 3886
3847 register_die_notifier(&trace_die_notifier); 3887 register_die_notifier(&trace_die_notifier);
3888 ret = 0;
3848 3889
3849 return 0; 3890out_free_cpumask:
3891 free_cpumask_var(tracing_cpumask);
3892out_free_buffer_mask:
3893 free_cpumask_var(tracing_buffer_mask);
3894out:
3895 return ret;
3850} 3896}
3851early_initcall(tracer_alloc_buffers); 3897early_initcall(tracer_alloc_buffers);
3852fs_initcall(tracer_init_debugfs); 3898fs_initcall(tracer_init_debugfs);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 5ac697065a48..4d3d381bfd95 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -28,7 +28,7 @@ enum trace_type {
28 TRACE_GRAPH_RET, 28 TRACE_GRAPH_RET,
29 TRACE_GRAPH_ENT, 29 TRACE_GRAPH_ENT,
30 TRACE_USER_STACK, 30 TRACE_USER_STACK,
31 TRACE_BTS, 31 TRACE_HW_BRANCHES,
32 TRACE_POWER, 32 TRACE_POWER,
33 33
34 __TRACE_LAST_TYPE 34 __TRACE_LAST_TYPE
@@ -159,10 +159,10 @@ struct trace_branch {
159 char correct; 159 char correct;
160}; 160};
161 161
162struct bts_entry { 162struct hw_branch_entry {
163 struct trace_entry ent; 163 struct trace_entry ent;
164 unsigned long from; 164 u64 from;
165 unsigned long to; 165 u64 to;
166}; 166};
167 167
168struct trace_power { 168struct trace_power {
@@ -278,7 +278,7 @@ extern void __ftrace_bad_type(void);
278 TRACE_GRAPH_ENT); \ 278 TRACE_GRAPH_ENT); \
279 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ 279 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
280 TRACE_GRAPH_RET); \ 280 TRACE_GRAPH_RET); \
281 IF_ASSIGN(var, ent, struct bts_entry, TRACE_BTS);\ 281 IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\
282 IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \ 282 IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \
283 __ftrace_bad_type(); \ 283 __ftrace_bad_type(); \
284 } while (0) 284 } while (0)
@@ -368,12 +368,13 @@ struct trace_iterator {
368 loff_t pos; 368 loff_t pos;
369 long idx; 369 long idx;
370 370
371 cpumask_t started; 371 cpumask_var_t started;
372}; 372};
373 373
374int tracing_is_enabled(void); 374int tracing_is_enabled(void);
375void trace_wake_up(void); 375void trace_wake_up(void);
376void tracing_reset(struct trace_array *tr, int cpu); 376void tracing_reset(struct trace_array *tr, int cpu);
377void tracing_reset_online_cpus(struct trace_array *tr);
377int tracing_open_generic(struct inode *inode, struct file *filp); 378int tracing_open_generic(struct inode *inode, struct file *filp);
378struct dentry *tracing_init_dentry(void); 379struct dentry *tracing_init_dentry(void);
379void init_tracer_sysprof_debugfs(struct dentry *d_tracer); 380void init_tracer_sysprof_debugfs(struct dentry *d_tracer);
@@ -414,9 +415,7 @@ void trace_function(struct trace_array *tr,
414 415
415void trace_graph_return(struct ftrace_graph_ret *trace); 416void trace_graph_return(struct ftrace_graph_ret *trace);
416int trace_graph_entry(struct ftrace_graph_ent *trace); 417int trace_graph_entry(struct ftrace_graph_ent *trace);
417void trace_bts(struct trace_array *tr, 418void trace_hw_branch(struct trace_array *tr, u64 from, u64 to);
418 unsigned long from,
419 unsigned long to);
420 419
421void tracing_start_cmdline_record(void); 420void tracing_start_cmdline_record(void);
422void tracing_stop_cmdline_record(void); 421void tracing_stop_cmdline_record(void);
@@ -580,7 +579,8 @@ enum trace_iterator_flags {
580 TRACE_ITER_BRANCH = 0x1000, 579 TRACE_ITER_BRANCH = 0x1000,
581 TRACE_ITER_ANNOTATE = 0x2000, 580 TRACE_ITER_ANNOTATE = 0x2000,
582 TRACE_ITER_USERSTACKTRACE = 0x4000, 581 TRACE_ITER_USERSTACKTRACE = 0x4000,
583 TRACE_ITER_SYM_USEROBJ = 0x8000 582 TRACE_ITER_SYM_USEROBJ = 0x8000,
583 TRACE_ITER_PRINTK_MSGONLY = 0x10000
584}; 584};
585 585
586/* 586/*
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
index a4fa2c57e34e..366c8c333e13 100644
--- a/kernel/trace/trace_boot.c
+++ b/kernel/trace/trace_boot.c
@@ -37,22 +37,12 @@ void disable_boot_trace(void)
37 tracing_stop_sched_switch_record(); 37 tracing_stop_sched_switch_record();
38} 38}
39 39
40static void reset_boot_trace(struct trace_array *tr)
41{
42 int cpu;
43
44 tr->time_start = ftrace_now(tr->cpu);
45
46 for_each_online_cpu(cpu)
47 tracing_reset(tr, cpu);
48}
49
50static int boot_trace_init(struct trace_array *tr) 40static int boot_trace_init(struct trace_array *tr)
51{ 41{
52 int cpu; 42 int cpu;
53 boot_trace = tr; 43 boot_trace = tr;
54 44
55 for_each_cpu_mask(cpu, cpu_possible_map) 45 for_each_cpu(cpu, cpu_possible_mask)
56 tracing_reset(tr, cpu); 46 tracing_reset(tr, cpu);
57 47
58 tracing_sched_switch_assign_trace(tr); 48 tracing_sched_switch_assign_trace(tr);
@@ -130,7 +120,7 @@ struct tracer boot_tracer __read_mostly =
130{ 120{
131 .name = "initcall", 121 .name = "initcall",
132 .init = boot_trace_init, 122 .init = boot_trace_init,
133 .reset = reset_boot_trace, 123 .reset = tracing_reset_online_cpus,
134 .print_line = initcall_print_line, 124 .print_line = initcall_print_line,
135}; 125};
136 126
diff --git a/kernel/trace/trace_bts.c b/kernel/trace/trace_bts.c
deleted file mode 100644
index 23b76e4690ef..000000000000
--- a/kernel/trace/trace_bts.c
+++ /dev/null
@@ -1,276 +0,0 @@
1/*
2 * BTS tracer
3 *
4 * Copyright (C) 2008 Markus Metzger <markus.t.metzger@gmail.com>
5 *
6 */
7
8#include <linux/module.h>
9#include <linux/fs.h>
10#include <linux/debugfs.h>
11#include <linux/ftrace.h>
12#include <linux/kallsyms.h>
13
14#include <asm/ds.h>
15
16#include "trace.h"
17
18
19#define SIZEOF_BTS (1 << 13)
20
21static DEFINE_PER_CPU(struct bts_tracer *, tracer);
22static DEFINE_PER_CPU(unsigned char[SIZEOF_BTS], buffer);
23
24#define this_tracer per_cpu(tracer, smp_processor_id())
25#define this_buffer per_cpu(buffer, smp_processor_id())
26
27
28/*
29 * Information to interpret a BTS record.
30 * This will go into an in-kernel BTS interface.
31 */
32static unsigned char sizeof_field;
33static unsigned long debugctl_mask;
34
35#define sizeof_bts (3 * sizeof_field)
36
37static void bts_trace_cpuinit(struct cpuinfo_x86 *c)
38{
39 switch (c->x86) {
40 case 0x6:
41 switch (c->x86_model) {
42 case 0x0 ... 0xC:
43 break;
44 case 0xD:
45 case 0xE: /* Pentium M */
46 sizeof_field = sizeof(long);
47 debugctl_mask = (1<<6)|(1<<7);
48 break;
49 default:
50 sizeof_field = 8;
51 debugctl_mask = (1<<6)|(1<<7);
52 break;
53 }
54 break;
55 case 0xF:
56 switch (c->x86_model) {
57 case 0x0:
58 case 0x1:
59 case 0x2: /* Netburst */
60 sizeof_field = sizeof(long);
61 debugctl_mask = (1<<2)|(1<<3);
62 break;
63 default:
64 /* sorry, don't know about them */
65 break;
66 }
67 break;
68 default:
69 /* sorry, don't know about them */
70 break;
71 }
72}
73
74static inline void bts_enable(void)
75{
76 unsigned long debugctl;
77
78 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
79 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl | debugctl_mask);
80}
81
82static inline void bts_disable(void)
83{
84 unsigned long debugctl;
85
86 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
87 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl & ~debugctl_mask);
88}
89
90static void bts_trace_reset(struct trace_array *tr)
91{
92 int cpu;
93
94 tr->time_start = ftrace_now(tr->cpu);
95
96 for_each_online_cpu(cpu)
97 tracing_reset(tr, cpu);
98}
99
100static void bts_trace_start_cpu(void *arg)
101{
102 this_tracer =
103 ds_request_bts(/* task = */ NULL, this_buffer, SIZEOF_BTS,
104 /* ovfl = */ NULL, /* th = */ (size_t)-1);
105 if (IS_ERR(this_tracer)) {
106 this_tracer = NULL;
107 return;
108 }
109
110 bts_enable();
111}
112
113static void bts_trace_start(struct trace_array *tr)
114{
115 int cpu;
116
117 bts_trace_reset(tr);
118
119 for_each_cpu_mask(cpu, cpu_possible_map)
120 smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1);
121}
122
123static void bts_trace_stop_cpu(void *arg)
124{
125 if (this_tracer) {
126 bts_disable();
127
128 ds_release_bts(this_tracer);
129 this_tracer = NULL;
130 }
131}
132
133static void bts_trace_stop(struct trace_array *tr)
134{
135 int cpu;
136
137 for_each_cpu_mask(cpu, cpu_possible_map)
138 smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1);
139}
140
141static int bts_trace_init(struct trace_array *tr)
142{
143 bts_trace_cpuinit(&boot_cpu_data);
144 bts_trace_reset(tr);
145 bts_trace_start(tr);
146
147 return 0;
148}
149
150static void bts_trace_print_header(struct seq_file *m)
151{
152#ifdef __i386__
153 seq_puts(m, "# CPU# FROM TO FUNCTION\n");
154 seq_puts(m, "# | | | |\n");
155#else
156 seq_puts(m,
157 "# CPU# FROM TO FUNCTION\n");
158 seq_puts(m,
159 "# | | | |\n");
160#endif
161}
162
163static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
164{
165 struct trace_entry *entry = iter->ent;
166 struct trace_seq *seq = &iter->seq;
167 struct bts_entry *it;
168
169 trace_assign_type(it, entry);
170
171 if (entry->type == TRACE_BTS) {
172 int ret;
173#ifdef CONFIG_KALLSYMS
174 char function[KSYM_SYMBOL_LEN];
175 sprint_symbol(function, it->from);
176#else
177 char *function = "<unknown>";
178#endif
179
180 ret = trace_seq_printf(seq, "%4d 0x%lx -> 0x%lx [%s]\n",
181 entry->cpu, it->from, it->to, function);
182 if (!ret)
183 return TRACE_TYPE_PARTIAL_LINE;;
184 return TRACE_TYPE_HANDLED;
185 }
186 return TRACE_TYPE_UNHANDLED;
187}
188
189void trace_bts(struct trace_array *tr, unsigned long from, unsigned long to)
190{
191 struct ring_buffer_event *event;
192 struct bts_entry *entry;
193 unsigned long irq;
194
195 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), &irq);
196 if (!event)
197 return;
198 entry = ring_buffer_event_data(event);
199 tracing_generic_entry_update(&entry->ent, 0, from);
200 entry->ent.type = TRACE_BTS;
201 entry->ent.cpu = smp_processor_id();
202 entry->from = from;
203 entry->to = to;
204 ring_buffer_unlock_commit(tr->buffer, event, irq);
205}
206
207static void trace_bts_at(struct trace_array *tr, size_t index)
208{
209 const void *raw = NULL;
210 unsigned long from, to;
211 int err;
212
213 err = ds_access_bts(this_tracer, index, &raw);
214 if (err < 0)
215 return;
216
217 from = *(const unsigned long *)raw;
218 to = *(const unsigned long *)((const char *)raw + sizeof_field);
219
220 trace_bts(tr, from, to);
221}
222
223static void trace_bts_cpu(void *arg)
224{
225 struct trace_array *tr = (struct trace_array *) arg;
226 size_t index = 0, end = 0, i;
227 int err;
228
229 if (!this_tracer)
230 return;
231
232 bts_disable();
233
234 err = ds_get_bts_index(this_tracer, &index);
235 if (err < 0)
236 goto out;
237
238 err = ds_get_bts_end(this_tracer, &end);
239 if (err < 0)
240 goto out;
241
242 for (i = index; i < end; i++)
243 trace_bts_at(tr, i);
244
245 for (i = 0; i < index; i++)
246 trace_bts_at(tr, i);
247
248out:
249 bts_enable();
250}
251
252static void trace_bts_prepare(struct trace_iterator *iter)
253{
254 int cpu;
255
256 for_each_cpu_mask(cpu, cpu_possible_map)
257 smp_call_function_single(cpu, trace_bts_cpu, iter->tr, 1);
258}
259
260struct tracer bts_tracer __read_mostly =
261{
262 .name = "bts",
263 .init = bts_trace_init,
264 .reset = bts_trace_stop,
265 .print_header = bts_trace_print_header,
266 .print_line = bts_trace_print_line,
267 .start = bts_trace_start,
268 .stop = bts_trace_stop,
269 .open = trace_bts_prepare
270};
271
272__init static int init_bts_trace(void)
273{
274 return register_tracer(&bts_tracer);
275}
276device_initcall(init_bts_trace);
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index e74f6d0a3216..9236d7e25a16 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -16,20 +16,10 @@
16 16
17#include "trace.h" 17#include "trace.h"
18 18
19static void function_reset(struct trace_array *tr)
20{
21 int cpu;
22
23 tr->time_start = ftrace_now(tr->cpu);
24
25 for_each_online_cpu(cpu)
26 tracing_reset(tr, cpu);
27}
28
29static void start_function_trace(struct trace_array *tr) 19static void start_function_trace(struct trace_array *tr)
30{ 20{
31 tr->cpu = get_cpu(); 21 tr->cpu = get_cpu();
32 function_reset(tr); 22 tracing_reset_online_cpus(tr);
33 put_cpu(); 23 put_cpu();
34 24
35 tracing_start_cmdline_record(); 25 tracing_start_cmdline_record();
@@ -55,7 +45,7 @@ static void function_trace_reset(struct trace_array *tr)
55 45
56static void function_trace_start(struct trace_array *tr) 46static void function_trace_start(struct trace_array *tr)
57{ 47{
58 function_reset(tr); 48 tracing_reset_online_cpus(tr);
59} 49}
60 50
61static struct tracer function_trace __read_mostly = 51static struct tracer function_trace __read_mostly =
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index af60eef4cbcc..930c08e5b38e 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -79,7 +79,7 @@ print_graph_cpu(struct trace_seq *s, int cpu)
79 int i; 79 int i;
80 int ret; 80 int ret;
81 int log10_this = log10_cpu(cpu); 81 int log10_this = log10_cpu(cpu);
82 int log10_all = log10_cpu(cpus_weight_nr(cpu_online_map)); 82 int log10_all = log10_cpu(cpumask_weight(cpu_online_mask));
83 83
84 84
85 /* 85 /*
@@ -231,6 +231,49 @@ trace_branch_is_leaf(struct trace_iterator *iter,
231 return true; 231 return true;
232} 232}
233 233
234static enum print_line_t
235print_graph_irq(struct trace_seq *s, unsigned long addr,
236 enum trace_type type, int cpu, pid_t pid)
237{
238 int ret;
239
240 if (addr < (unsigned long)__irqentry_text_start ||
241 addr >= (unsigned long)__irqentry_text_end)
242 return TRACE_TYPE_UNHANDLED;
243
244 if (type == TRACE_GRAPH_ENT) {
245 ret = trace_seq_printf(s, "==========> | ");
246 } else {
247 /* Cpu */
248 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
249 ret = print_graph_cpu(s, cpu);
250 if (ret == TRACE_TYPE_PARTIAL_LINE)
251 return TRACE_TYPE_PARTIAL_LINE;
252 }
253 /* Proc */
254 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
255 ret = print_graph_proc(s, pid);
256 if (ret == TRACE_TYPE_PARTIAL_LINE)
257 return TRACE_TYPE_PARTIAL_LINE;
258
259 ret = trace_seq_printf(s, " | ");
260 if (!ret)
261 return TRACE_TYPE_PARTIAL_LINE;
262 }
263
264 /* No overhead */
265 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
266 ret = trace_seq_printf(s, " ");
267 if (!ret)
268 return TRACE_TYPE_PARTIAL_LINE;
269 }
270
271 ret = trace_seq_printf(s, "<========== |\n");
272 }
273 if (!ret)
274 return TRACE_TYPE_PARTIAL_LINE;
275 return TRACE_TYPE_HANDLED;
276}
234 277
235static enum print_line_t 278static enum print_line_t
236print_graph_duration(unsigned long long duration, struct trace_seq *s) 279print_graph_duration(unsigned long long duration, struct trace_seq *s)
@@ -344,7 +387,7 @@ print_graph_entry_leaf(struct trace_iterator *iter,
344 387
345static enum print_line_t 388static enum print_line_t
346print_graph_entry_nested(struct ftrace_graph_ent_entry *entry, 389print_graph_entry_nested(struct ftrace_graph_ent_entry *entry,
347 struct trace_seq *s) 390 struct trace_seq *s, pid_t pid, int cpu)
348{ 391{
349 int i; 392 int i;
350 int ret; 393 int ret;
@@ -357,8 +400,18 @@ print_graph_entry_nested(struct ftrace_graph_ent_entry *entry,
357 return TRACE_TYPE_PARTIAL_LINE; 400 return TRACE_TYPE_PARTIAL_LINE;
358 } 401 }
359 402
360 /* No time */ 403 /* Interrupt */
361 ret = trace_seq_printf(s, " | "); 404 ret = print_graph_irq(s, call->func, TRACE_GRAPH_ENT, cpu, pid);
405 if (ret == TRACE_TYPE_UNHANDLED) {
406 /* No time */
407 ret = trace_seq_printf(s, " | ");
408 if (!ret)
409 return TRACE_TYPE_PARTIAL_LINE;
410 } else {
411 if (ret == TRACE_TYPE_PARTIAL_LINE)
412 return TRACE_TYPE_PARTIAL_LINE;
413 }
414
362 415
363 /* Function */ 416 /* Function */
364 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { 417 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
@@ -410,7 +463,7 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
410 if (trace_branch_is_leaf(iter, field)) 463 if (trace_branch_is_leaf(iter, field))
411 return print_graph_entry_leaf(iter, field, s); 464 return print_graph_entry_leaf(iter, field, s);
412 else 465 else
413 return print_graph_entry_nested(field, s); 466 return print_graph_entry_nested(field, s, iter->ent->pid, cpu);
414 467
415} 468}
416 469
@@ -474,6 +527,11 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
474 if (!ret) 527 if (!ret)
475 return TRACE_TYPE_PARTIAL_LINE; 528 return TRACE_TYPE_PARTIAL_LINE;
476 } 529 }
530
531 ret = print_graph_irq(s, trace->func, TRACE_GRAPH_RET, cpu, ent->pid);
532 if (ret == TRACE_TYPE_PARTIAL_LINE)
533 return TRACE_TYPE_PARTIAL_LINE;
534
477 return TRACE_TYPE_HANDLED; 535 return TRACE_TYPE_HANDLED;
478} 536}
479 537
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c
new file mode 100644
index 000000000000..649df22d435f
--- /dev/null
+++ b/kernel/trace/trace_hw_branches.c
@@ -0,0 +1,195 @@
1/*
2 * h/w branch tracer for x86 based on bts
3 *
4 * Copyright (C) 2008 Markus Metzger <markus.t.metzger@gmail.com>
5 *
6 */
7
8#include <linux/module.h>
9#include <linux/fs.h>
10#include <linux/debugfs.h>
11#include <linux/ftrace.h>
12#include <linux/kallsyms.h>
13
14#include <asm/ds.h>
15
16#include "trace.h"
17
18
19#define SIZEOF_BTS (1 << 13)
20
21static DEFINE_PER_CPU(struct bts_tracer *, tracer);
22static DEFINE_PER_CPU(unsigned char[SIZEOF_BTS], buffer);
23
24#define this_tracer per_cpu(tracer, smp_processor_id())
25#define this_buffer per_cpu(buffer, smp_processor_id())
26
27
28static void bts_trace_start_cpu(void *arg)
29{
30 if (this_tracer)
31 ds_release_bts(this_tracer);
32
33 this_tracer =
34 ds_request_bts(/* task = */ NULL, this_buffer, SIZEOF_BTS,
35 /* ovfl = */ NULL, /* th = */ (size_t)-1,
36 BTS_KERNEL);
37 if (IS_ERR(this_tracer)) {
38 this_tracer = NULL;
39 return;
40 }
41}
42
43static void bts_trace_start(struct trace_array *tr)
44{
45 int cpu;
46
47 tracing_reset_online_cpus(tr);
48
49 for_each_cpu(cpu, cpu_possible_mask)
50 smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1);
51}
52
53static void bts_trace_stop_cpu(void *arg)
54{
55 if (this_tracer) {
56 ds_release_bts(this_tracer);
57 this_tracer = NULL;
58 }
59}
60
61static void bts_trace_stop(struct trace_array *tr)
62{
63 int cpu;
64
65 for_each_cpu(cpu, cpu_possible_mask)
66 smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1);
67}
68
69static int bts_trace_init(struct trace_array *tr)
70{
71 tracing_reset_online_cpus(tr);
72 bts_trace_start(tr);
73
74 return 0;
75}
76
77static void bts_trace_print_header(struct seq_file *m)
78{
79 seq_puts(m,
80 "# CPU# FROM TO FUNCTION\n");
81 seq_puts(m,
82 "# | | | |\n");
83}
84
85static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
86{
87 struct trace_entry *entry = iter->ent;
88 struct trace_seq *seq = &iter->seq;
89 struct hw_branch_entry *it;
90
91 trace_assign_type(it, entry);
92
93 if (entry->type == TRACE_HW_BRANCHES) {
94 if (trace_seq_printf(seq, "%4d ", entry->cpu) &&
95 trace_seq_printf(seq, "0x%016llx -> 0x%016llx ",
96 it->from, it->to) &&
97 (!it->from ||
98 seq_print_ip_sym(seq, it->from, /* sym_flags = */ 0)) &&
99 trace_seq_printf(seq, "\n"))
100 return TRACE_TYPE_HANDLED;
101 return TRACE_TYPE_PARTIAL_LINE;;
102 }
103 return TRACE_TYPE_UNHANDLED;
104}
105
106void trace_hw_branch(struct trace_array *tr, u64 from, u64 to)
107{
108 struct ring_buffer_event *event;
109 struct hw_branch_entry *entry;
110 unsigned long irq;
111
112 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), &irq);
113 if (!event)
114 return;
115 entry = ring_buffer_event_data(event);
116 tracing_generic_entry_update(&entry->ent, 0, from);
117 entry->ent.type = TRACE_HW_BRANCHES;
118 entry->ent.cpu = smp_processor_id();
119 entry->from = from;
120 entry->to = to;
121 ring_buffer_unlock_commit(tr->buffer, event, irq);
122}
123
124static void trace_bts_at(struct trace_array *tr,
125 const struct bts_trace *trace, void *at)
126{
127 struct bts_struct bts;
128 int err = 0;
129
130 WARN_ON_ONCE(!trace->read);
131 if (!trace->read)
132 return;
133
134 err = trace->read(this_tracer, at, &bts);
135 if (err < 0)
136 return;
137
138 switch (bts.qualifier) {
139 case BTS_BRANCH:
140 trace_hw_branch(tr, bts.variant.lbr.from, bts.variant.lbr.to);
141 break;
142 }
143}
144
145static void trace_bts_cpu(void *arg)
146{
147 struct trace_array *tr = (struct trace_array *) arg;
148 const struct bts_trace *trace;
149 unsigned char *at;
150
151 if (!this_tracer)
152 return;
153
154 ds_suspend_bts(this_tracer);
155 trace = ds_read_bts(this_tracer);
156 if (!trace)
157 goto out;
158
159 for (at = trace->ds.top; (void *)at < trace->ds.end;
160 at += trace->ds.size)
161 trace_bts_at(tr, trace, at);
162
163 for (at = trace->ds.begin; (void *)at < trace->ds.top;
164 at += trace->ds.size)
165 trace_bts_at(tr, trace, at);
166
167out:
168 ds_resume_bts(this_tracer);
169}
170
171static void trace_bts_prepare(struct trace_iterator *iter)
172{
173 int cpu;
174
175 for_each_cpu(cpu, cpu_possible_mask)
176 smp_call_function_single(cpu, trace_bts_cpu, iter->tr, 1);
177}
178
179struct tracer bts_tracer __read_mostly =
180{
181 .name = "hw-branch-tracer",
182 .init = bts_trace_init,
183 .reset = bts_trace_stop,
184 .print_header = bts_trace_print_header,
185 .print_line = bts_trace_print_line,
186 .start = bts_trace_start,
187 .stop = bts_trace_stop,
188 .open = trace_bts_prepare
189};
190
191__init static int init_bts_trace(void)
192{
193 return register_tracer(&bts_tracer);
194}
195device_initcall(init_bts_trace);
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index 2fb6da6523b3..fffcb069f1dc 100644
--- a/kernel/trace/trace_mmiotrace.c
+++ b/kernel/trace/trace_mmiotrace.c
@@ -22,14 +22,10 @@ static unsigned long prev_overruns;
22 22
23static void mmio_reset_data(struct trace_array *tr) 23static void mmio_reset_data(struct trace_array *tr)
24{ 24{
25 int cpu;
26
27 overrun_detected = false; 25 overrun_detected = false;
28 prev_overruns = 0; 26 prev_overruns = 0;
29 tr->time_start = ftrace_now(tr->cpu);
30 27
31 for_each_online_cpu(cpu) 28 tracing_reset_online_cpus(tr);
32 tracing_reset(tr, cpu);
33} 29}
34 30
35static int mmio_trace_init(struct trace_array *tr) 31static int mmio_trace_init(struct trace_array *tr)
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c
index a7172a352f62..7bda248daf55 100644
--- a/kernel/trace/trace_power.c
+++ b/kernel/trace/trace_power.c
@@ -39,7 +39,7 @@ static int power_trace_init(struct trace_array *tr)
39 39
40 trace_power_enabled = 1; 40 trace_power_enabled = 1;
41 41
42 for_each_cpu_mask(cpu, cpu_possible_map) 42 for_each_cpu(cpu, cpu_possible_mask)
43 tracing_reset(tr, cpu); 43 tracing_reset(tr, cpu);
44 return 0; 44 return 0;
45} 45}
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 863390557b44..df175cb4564f 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -49,7 +49,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
49} 49}
50 50
51static void 51static void
52probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee) 52probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success)
53{ 53{
54 struct trace_array_cpu *data; 54 struct trace_array_cpu *data;
55 unsigned long flags; 55 unsigned long flags;
@@ -72,16 +72,6 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
72 local_irq_restore(flags); 72 local_irq_restore(flags);
73} 73}
74 74
75static void sched_switch_reset(struct trace_array *tr)
76{
77 int cpu;
78
79 tr->time_start = ftrace_now(tr->cpu);
80
81 for_each_online_cpu(cpu)
82 tracing_reset(tr, cpu);
83}
84
85static int tracing_sched_register(void) 75static int tracing_sched_register(void)
86{ 76{
87 int ret; 77 int ret;
@@ -197,7 +187,7 @@ void tracing_sched_switch_assign_trace(struct trace_array *tr)
197 187
198static void start_sched_trace(struct trace_array *tr) 188static void start_sched_trace(struct trace_array *tr)
199{ 189{
200 sched_switch_reset(tr); 190 tracing_reset_online_cpus(tr);
201 tracing_start_sched_switch_record(); 191 tracing_start_sched_switch_record();
202} 192}
203 193
@@ -221,7 +211,7 @@ static void sched_switch_trace_reset(struct trace_array *tr)
221 211
222static void sched_switch_trace_start(struct trace_array *tr) 212static void sched_switch_trace_start(struct trace_array *tr)
223{ 213{
224 sched_switch_reset(tr); 214 tracing_reset_online_cpus(tr);
225 tracing_start_sched_switch(); 215 tracing_start_sched_switch();
226} 216}
227 217
@@ -247,3 +237,4 @@ __init static int init_sched_switch_trace(void)
247 return register_tracer(&sched_switch_trace); 237 return register_tracer(&sched_switch_trace);
248} 238}
249device_initcall(init_sched_switch_trace); 239device_initcall(init_sched_switch_trace);
240
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 0067b49746c1..43586b689e31 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -211,7 +211,7 @@ static void wakeup_reset(struct trace_array *tr)
211} 211}
212 212
213static void 213static void
214probe_wakeup(struct rq *rq, struct task_struct *p) 214probe_wakeup(struct rq *rq, struct task_struct *p, int success)
215{ 215{
216 int cpu = smp_processor_id(); 216 int cpu = smp_processor_id();
217 unsigned long flags; 217 unsigned long flags;
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 0b863f2cbc8e..d0871bc0aca5 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -10,6 +10,7 @@
10#include <linux/debugfs.h> 10#include <linux/debugfs.h>
11#include <linux/ftrace.h> 11#include <linux/ftrace.h>
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/sysctl.h>
13#include <linux/init.h> 14#include <linux/init.h>
14#include <linux/fs.h> 15#include <linux/fs.h>
15#include "trace.h" 16#include "trace.h"
@@ -31,6 +32,10 @@ static raw_spinlock_t max_stack_lock =
31 32
32static int stack_trace_disabled __read_mostly; 33static int stack_trace_disabled __read_mostly;
33static DEFINE_PER_CPU(int, trace_active); 34static DEFINE_PER_CPU(int, trace_active);
35static DEFINE_MUTEX(stack_sysctl_mutex);
36
37int stack_tracer_enabled;
38static int last_stack_tracer_enabled;
34 39
35static inline void check_stack(void) 40static inline void check_stack(void)
36{ 41{
@@ -174,7 +179,7 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
174 return count; 179 return count;
175} 180}
176 181
177static struct file_operations stack_max_size_fops = { 182static const struct file_operations stack_max_size_fops = {
178 .open = tracing_open_generic, 183 .open = tracing_open_generic,
179 .read = stack_max_size_read, 184 .read = stack_max_size_read,
180 .write = stack_max_size_write, 185 .write = stack_max_size_write,
@@ -272,7 +277,7 @@ static int t_show(struct seq_file *m, void *v)
272 return 0; 277 return 0;
273} 278}
274 279
275static struct seq_operations stack_trace_seq_ops = { 280static const struct seq_operations stack_trace_seq_ops = {
276 .start = t_start, 281 .start = t_start,
277 .next = t_next, 282 .next = t_next,
278 .stop = t_stop, 283 .stop = t_stop,
@@ -288,12 +293,47 @@ static int stack_trace_open(struct inode *inode, struct file *file)
288 return ret; 293 return ret;
289} 294}
290 295
291static struct file_operations stack_trace_fops = { 296static const struct file_operations stack_trace_fops = {
292 .open = stack_trace_open, 297 .open = stack_trace_open,
293 .read = seq_read, 298 .read = seq_read,
294 .llseek = seq_lseek, 299 .llseek = seq_lseek,
295}; 300};
296 301
302int
303stack_trace_sysctl(struct ctl_table *table, int write,
304 struct file *file, void __user *buffer, size_t *lenp,
305 loff_t *ppos)
306{
307 int ret;
308
309 mutex_lock(&stack_sysctl_mutex);
310
311 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
312
313 if (ret || !write ||
314 (last_stack_tracer_enabled == stack_tracer_enabled))
315 goto out;
316
317 last_stack_tracer_enabled = stack_tracer_enabled;
318
319 if (stack_tracer_enabled)
320 register_ftrace_function(&trace_ops);
321 else
322 unregister_ftrace_function(&trace_ops);
323
324 out:
325 mutex_unlock(&stack_sysctl_mutex);
326 return ret;
327}
328
329static __init int enable_stacktrace(char *str)
330{
331 stack_tracer_enabled = 1;
332 last_stack_tracer_enabled = 1;
333 return 1;
334}
335__setup("stacktrace", enable_stacktrace);
336
297static __init int stack_trace_init(void) 337static __init int stack_trace_init(void)
298{ 338{
299 struct dentry *d_tracer; 339 struct dentry *d_tracer;
@@ -311,7 +351,8 @@ static __init int stack_trace_init(void)
311 if (!entry) 351 if (!entry)
312 pr_warning("Could not create debugfs 'stack_trace' entry\n"); 352 pr_warning("Could not create debugfs 'stack_trace' entry\n");
313 353
314 register_ftrace_function(&trace_ops); 354 if (stack_tracer_enabled)
355 register_ftrace_function(&trace_ops);
315 356
316 return 0; 357 return 0;
317} 358}
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c
index 54960edb96d0..eaca5ad803ff 100644
--- a/kernel/trace/trace_sysprof.c
+++ b/kernel/trace/trace_sysprof.c
@@ -196,27 +196,19 @@ static enum hrtimer_restart stack_trace_timer_fn(struct hrtimer *hrtimer)
196 return HRTIMER_RESTART; 196 return HRTIMER_RESTART;
197} 197}
198 198
199static void start_stack_timer(int cpu) 199static void start_stack_timer(void *unused)
200{ 200{
201 struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu); 201 struct hrtimer *hrtimer = &__get_cpu_var(stack_trace_hrtimer);
202 202
203 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 203 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
204 hrtimer->function = stack_trace_timer_fn; 204 hrtimer->function = stack_trace_timer_fn;
205 hrtimer->cb_mode = HRTIMER_CB_IRQSAFE_PERCPU;
206 205
207 hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL); 206 hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL);
208} 207}
209 208
210static void start_stack_timers(void) 209static void start_stack_timers(void)
211{ 210{
212 cpumask_t saved_mask = current->cpus_allowed; 211 on_each_cpu(start_stack_timer, NULL, 1);
213 int cpu;
214
215 for_each_online_cpu(cpu) {
216 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
217 start_stack_timer(cpu);
218 }
219 set_cpus_allowed_ptr(current, &saved_mask);
220} 212}
221 213
222static void stop_stack_timer(int cpu) 214static void stop_stack_timer(int cpu)
@@ -234,20 +226,10 @@ static void stop_stack_timers(void)
234 stop_stack_timer(cpu); 226 stop_stack_timer(cpu);
235} 227}
236 228
237static void stack_reset(struct trace_array *tr)
238{
239 int cpu;
240
241 tr->time_start = ftrace_now(tr->cpu);
242
243 for_each_online_cpu(cpu)
244 tracing_reset(tr, cpu);
245}
246
247static void start_stack_trace(struct trace_array *tr) 229static void start_stack_trace(struct trace_array *tr)
248{ 230{
249 mutex_lock(&sample_timer_lock); 231 mutex_lock(&sample_timer_lock);
250 stack_reset(tr); 232 tracing_reset_online_cpus(tr);
251 start_stack_timers(); 233 start_stack_timers();
252 tracer_enabled = 1; 234 tracer_enabled = 1;
253 mutex_unlock(&sample_timer_lock); 235 mutex_unlock(&sample_timer_lock);