aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-01-12 05:32:03 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-12 05:32:03 -0500
commite3ee1e123183ca9847e74b7b8e2694c9e3b817a6 (patch)
tree652a84674ed05eaa46a813de2223af0bd0168a5a /kernel/trace
parent5762ba1873b0bb9faa631aaa02f533c2b9837f82 (diff)
parentc59765042f53a79a7a65585042ff463b69cb248c (diff)
Merge commit 'v2.6.29-rc1' into timers/hrtimers
Conflicts: kernel/time/tick-common.c
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ring_buffer.c86
-rw-r--r--kernel/trace/trace.c73
-rw-r--r--kernel/trace/trace.h2
-rw-r--r--kernel/trace/trace_boot.c2
-rw-r--r--kernel/trace/trace_functions_graph.c2
-rw-r--r--kernel/trace/trace_hw_branches.c6
-rw-r--r--kernel/trace/trace_power.c2
-rw-r--r--kernel/trace/trace_sysprof.c13
8 files changed, 122 insertions, 64 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 76f34c0ef29c..8b0daf0662ef 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -69,6 +69,7 @@ void tracing_on(void)
69{ 69{
70 set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags); 70 set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
71} 71}
72EXPORT_SYMBOL_GPL(tracing_on);
72 73
73/** 74/**
74 * tracing_off - turn off all tracing buffers 75 * tracing_off - turn off all tracing buffers
@@ -82,6 +83,7 @@ void tracing_off(void)
82{ 83{
83 clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags); 84 clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
84} 85}
86EXPORT_SYMBOL_GPL(tracing_off);
85 87
86/** 88/**
87 * tracing_off_permanent - permanently disable ring buffers 89 * tracing_off_permanent - permanently disable ring buffers
@@ -111,12 +113,14 @@ u64 ring_buffer_time_stamp(int cpu)
111 113
112 return time; 114 return time;
113} 115}
116EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
114 117
115void ring_buffer_normalize_time_stamp(int cpu, u64 *ts) 118void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
116{ 119{
117 /* Just stupid testing the normalize function and deltas */ 120 /* Just stupid testing the normalize function and deltas */
118 *ts >>= DEBUG_SHIFT; 121 *ts >>= DEBUG_SHIFT;
119} 122}
123EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
120 124
121#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event)) 125#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
122#define RB_ALIGNMENT_SHIFT 2 126#define RB_ALIGNMENT_SHIFT 2
@@ -164,8 +168,15 @@ rb_event_length(struct ring_buffer_event *event)
164 */ 168 */
165unsigned ring_buffer_event_length(struct ring_buffer_event *event) 169unsigned ring_buffer_event_length(struct ring_buffer_event *event)
166{ 170{
167 return rb_event_length(event); 171 unsigned length = rb_event_length(event);
172 if (event->type != RINGBUF_TYPE_DATA)
173 return length;
174 length -= RB_EVNT_HDR_SIZE;
175 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
176 length -= sizeof(event->array[0]);
177 return length;
168} 178}
179EXPORT_SYMBOL_GPL(ring_buffer_event_length);
169 180
170/* inline for ring buffer fast paths */ 181/* inline for ring buffer fast paths */
171static inline void * 182static inline void *
@@ -187,9 +198,10 @@ void *ring_buffer_event_data(struct ring_buffer_event *event)
187{ 198{
188 return rb_event_data(event); 199 return rb_event_data(event);
189} 200}
201EXPORT_SYMBOL_GPL(ring_buffer_event_data);
190 202
191#define for_each_buffer_cpu(buffer, cpu) \ 203#define for_each_buffer_cpu(buffer, cpu) \
192 for_each_cpu_mask(cpu, buffer->cpumask) 204 for_each_cpu(cpu, buffer->cpumask)
193 205
194#define TS_SHIFT 27 206#define TS_SHIFT 27
195#define TS_MASK ((1ULL << TS_SHIFT) - 1) 207#define TS_MASK ((1ULL << TS_SHIFT) - 1)
@@ -261,7 +273,7 @@ struct ring_buffer {
261 unsigned pages; 273 unsigned pages;
262 unsigned flags; 274 unsigned flags;
263 int cpus; 275 int cpus;
264 cpumask_t cpumask; 276 cpumask_var_t cpumask;
265 atomic_t record_disabled; 277 atomic_t record_disabled;
266 278
267 struct mutex mutex; 279 struct mutex mutex;
@@ -427,7 +439,7 @@ extern int ring_buffer_page_too_big(void);
427 439
428/** 440/**
429 * ring_buffer_alloc - allocate a new ring_buffer 441 * ring_buffer_alloc - allocate a new ring_buffer
430 * @size: the size in bytes that is needed. 442 * @size: the size in bytes per cpu that is needed.
431 * @flags: attributes to set for the ring buffer. 443 * @flags: attributes to set for the ring buffer.
432 * 444 *
433 * Currently the only flag that is available is the RB_FL_OVERWRITE 445 * Currently the only flag that is available is the RB_FL_OVERWRITE
@@ -452,6 +464,9 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
452 if (!buffer) 464 if (!buffer)
453 return NULL; 465 return NULL;
454 466
467 if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
468 goto fail_free_buffer;
469
455 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 470 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
456 buffer->flags = flags; 471 buffer->flags = flags;
457 472
@@ -459,14 +474,14 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
459 if (buffer->pages == 1) 474 if (buffer->pages == 1)
460 buffer->pages++; 475 buffer->pages++;
461 476
462 buffer->cpumask = cpu_possible_map; 477 cpumask_copy(buffer->cpumask, cpu_possible_mask);
463 buffer->cpus = nr_cpu_ids; 478 buffer->cpus = nr_cpu_ids;
464 479
465 bsize = sizeof(void *) * nr_cpu_ids; 480 bsize = sizeof(void *) * nr_cpu_ids;
466 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), 481 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
467 GFP_KERNEL); 482 GFP_KERNEL);
468 if (!buffer->buffers) 483 if (!buffer->buffers)
469 goto fail_free_buffer; 484 goto fail_free_cpumask;
470 485
471 for_each_buffer_cpu(buffer, cpu) { 486 for_each_buffer_cpu(buffer, cpu) {
472 buffer->buffers[cpu] = 487 buffer->buffers[cpu] =
@@ -486,10 +501,14 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
486 } 501 }
487 kfree(buffer->buffers); 502 kfree(buffer->buffers);
488 503
504 fail_free_cpumask:
505 free_cpumask_var(buffer->cpumask);
506
489 fail_free_buffer: 507 fail_free_buffer:
490 kfree(buffer); 508 kfree(buffer);
491 return NULL; 509 return NULL;
492} 510}
511EXPORT_SYMBOL_GPL(ring_buffer_alloc);
493 512
494/** 513/**
495 * ring_buffer_free - free a ring buffer. 514 * ring_buffer_free - free a ring buffer.
@@ -503,8 +522,11 @@ ring_buffer_free(struct ring_buffer *buffer)
503 for_each_buffer_cpu(buffer, cpu) 522 for_each_buffer_cpu(buffer, cpu)
504 rb_free_cpu_buffer(buffer->buffers[cpu]); 523 rb_free_cpu_buffer(buffer->buffers[cpu]);
505 524
525 free_cpumask_var(buffer->cpumask);
526
506 kfree(buffer); 527 kfree(buffer);
507} 528}
529EXPORT_SYMBOL_GPL(ring_buffer_free);
508 530
509static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); 531static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
510 532
@@ -680,6 +702,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
680 mutex_unlock(&buffer->mutex); 702 mutex_unlock(&buffer->mutex);
681 return -ENOMEM; 703 return -ENOMEM;
682} 704}
705EXPORT_SYMBOL_GPL(ring_buffer_resize);
683 706
684static inline int rb_null_event(struct ring_buffer_event *event) 707static inline int rb_null_event(struct ring_buffer_event *event)
685{ 708{
@@ -1274,7 +1297,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
1274 1297
1275 cpu = raw_smp_processor_id(); 1298 cpu = raw_smp_processor_id();
1276 1299
1277 if (!cpu_isset(cpu, buffer->cpumask)) 1300 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1278 goto out; 1301 goto out;
1279 1302
1280 cpu_buffer = buffer->buffers[cpu]; 1303 cpu_buffer = buffer->buffers[cpu];
@@ -1304,6 +1327,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
1304 ftrace_preempt_enable(resched); 1327 ftrace_preempt_enable(resched);
1305 return NULL; 1328 return NULL;
1306} 1329}
1330EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
1307 1331
1308static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, 1332static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1309 struct ring_buffer_event *event) 1333 struct ring_buffer_event *event)
@@ -1350,6 +1374,7 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1350 1374
1351 return 0; 1375 return 0;
1352} 1376}
1377EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
1353 1378
1354/** 1379/**
1355 * ring_buffer_write - write data to the buffer without reserving 1380 * ring_buffer_write - write data to the buffer without reserving
@@ -1385,7 +1410,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
1385 1410
1386 cpu = raw_smp_processor_id(); 1411 cpu = raw_smp_processor_id();
1387 1412
1388 if (!cpu_isset(cpu, buffer->cpumask)) 1413 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1389 goto out; 1414 goto out;
1390 1415
1391 cpu_buffer = buffer->buffers[cpu]; 1416 cpu_buffer = buffer->buffers[cpu];
@@ -1411,6 +1436,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
1411 1436
1412 return ret; 1437 return ret;
1413} 1438}
1439EXPORT_SYMBOL_GPL(ring_buffer_write);
1414 1440
1415static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) 1441static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1416{ 1442{
@@ -1437,6 +1463,7 @@ void ring_buffer_record_disable(struct ring_buffer *buffer)
1437{ 1463{
1438 atomic_inc(&buffer->record_disabled); 1464 atomic_inc(&buffer->record_disabled);
1439} 1465}
1466EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
1440 1467
1441/** 1468/**
1442 * ring_buffer_record_enable - enable writes to the buffer 1469 * ring_buffer_record_enable - enable writes to the buffer
@@ -1449,6 +1476,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer)
1449{ 1476{
1450 atomic_dec(&buffer->record_disabled); 1477 atomic_dec(&buffer->record_disabled);
1451} 1478}
1479EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
1452 1480
1453/** 1481/**
1454 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer 1482 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
@@ -1464,12 +1492,13 @@ void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1464{ 1492{
1465 struct ring_buffer_per_cpu *cpu_buffer; 1493 struct ring_buffer_per_cpu *cpu_buffer;
1466 1494
1467 if (!cpu_isset(cpu, buffer->cpumask)) 1495 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1468 return; 1496 return;
1469 1497
1470 cpu_buffer = buffer->buffers[cpu]; 1498 cpu_buffer = buffer->buffers[cpu];
1471 atomic_inc(&cpu_buffer->record_disabled); 1499 atomic_inc(&cpu_buffer->record_disabled);
1472} 1500}
1501EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
1473 1502
1474/** 1503/**
1475 * ring_buffer_record_enable_cpu - enable writes to the buffer 1504 * ring_buffer_record_enable_cpu - enable writes to the buffer
@@ -1483,12 +1512,13 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1483{ 1512{
1484 struct ring_buffer_per_cpu *cpu_buffer; 1513 struct ring_buffer_per_cpu *cpu_buffer;
1485 1514
1486 if (!cpu_isset(cpu, buffer->cpumask)) 1515 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1487 return; 1516 return;
1488 1517
1489 cpu_buffer = buffer->buffers[cpu]; 1518 cpu_buffer = buffer->buffers[cpu];
1490 atomic_dec(&cpu_buffer->record_disabled); 1519 atomic_dec(&cpu_buffer->record_disabled);
1491} 1520}
1521EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
1492 1522
1493/** 1523/**
1494 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer 1524 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
@@ -1499,12 +1529,13 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1499{ 1529{
1500 struct ring_buffer_per_cpu *cpu_buffer; 1530 struct ring_buffer_per_cpu *cpu_buffer;
1501 1531
1502 if (!cpu_isset(cpu, buffer->cpumask)) 1532 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1503 return 0; 1533 return 0;
1504 1534
1505 cpu_buffer = buffer->buffers[cpu]; 1535 cpu_buffer = buffer->buffers[cpu];
1506 return cpu_buffer->entries; 1536 return cpu_buffer->entries;
1507} 1537}
1538EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
1508 1539
1509/** 1540/**
1510 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer 1541 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
@@ -1515,12 +1546,13 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1515{ 1546{
1516 struct ring_buffer_per_cpu *cpu_buffer; 1547 struct ring_buffer_per_cpu *cpu_buffer;
1517 1548
1518 if (!cpu_isset(cpu, buffer->cpumask)) 1549 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1519 return 0; 1550 return 0;
1520 1551
1521 cpu_buffer = buffer->buffers[cpu]; 1552 cpu_buffer = buffer->buffers[cpu];
1522 return cpu_buffer->overrun; 1553 return cpu_buffer->overrun;
1523} 1554}
1555EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
1524 1556
1525/** 1557/**
1526 * ring_buffer_entries - get the number of entries in a buffer 1558 * ring_buffer_entries - get the number of entries in a buffer
@@ -1543,6 +1575,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer)
1543 1575
1544 return entries; 1576 return entries;
1545} 1577}
1578EXPORT_SYMBOL_GPL(ring_buffer_entries);
1546 1579
1547/** 1580/**
1548 * ring_buffer_overrun_cpu - get the number of overruns in buffer 1581 * ring_buffer_overrun_cpu - get the number of overruns in buffer
@@ -1565,6 +1598,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1565 1598
1566 return overruns; 1599 return overruns;
1567} 1600}
1601EXPORT_SYMBOL_GPL(ring_buffer_overruns);
1568 1602
1569static void rb_iter_reset(struct ring_buffer_iter *iter) 1603static void rb_iter_reset(struct ring_buffer_iter *iter)
1570{ 1604{
@@ -1600,6 +1634,7 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1600 rb_iter_reset(iter); 1634 rb_iter_reset(iter);
1601 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 1635 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1602} 1636}
1637EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
1603 1638
1604/** 1639/**
1605 * ring_buffer_iter_empty - check if an iterator has no more to read 1640 * ring_buffer_iter_empty - check if an iterator has no more to read
@@ -1614,6 +1649,7 @@ int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
1614 return iter->head_page == cpu_buffer->commit_page && 1649 return iter->head_page == cpu_buffer->commit_page &&
1615 iter->head == rb_commit_index(cpu_buffer); 1650 iter->head == rb_commit_index(cpu_buffer);
1616} 1651}
1652EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
1617 1653
1618static void 1654static void
1619rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, 1655rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
@@ -1828,7 +1864,7 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1828 struct buffer_page *reader; 1864 struct buffer_page *reader;
1829 int nr_loops = 0; 1865 int nr_loops = 0;
1830 1866
1831 if (!cpu_isset(cpu, buffer->cpumask)) 1867 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1832 return NULL; 1868 return NULL;
1833 1869
1834 cpu_buffer = buffer->buffers[cpu]; 1870 cpu_buffer = buffer->buffers[cpu];
@@ -1880,6 +1916,7 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1880 1916
1881 return NULL; 1917 return NULL;
1882} 1918}
1919EXPORT_SYMBOL_GPL(ring_buffer_peek);
1883 1920
1884static struct ring_buffer_event * 1921static struct ring_buffer_event *
1885rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) 1922rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
@@ -1940,6 +1977,7 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1940 1977
1941 return NULL; 1978 return NULL;
1942} 1979}
1980EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
1943 1981
1944/** 1982/**
1945 * ring_buffer_peek - peek at the next event to be read 1983 * ring_buffer_peek - peek at the next event to be read
@@ -2001,7 +2039,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
2001 struct ring_buffer_event *event; 2039 struct ring_buffer_event *event;
2002 unsigned long flags; 2040 unsigned long flags;
2003 2041
2004 if (!cpu_isset(cpu, buffer->cpumask)) 2042 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2005 return NULL; 2043 return NULL;
2006 2044
2007 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2045 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
@@ -2017,6 +2055,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
2017 2055
2018 return event; 2056 return event;
2019} 2057}
2058EXPORT_SYMBOL_GPL(ring_buffer_consume);
2020 2059
2021/** 2060/**
2022 * ring_buffer_read_start - start a non consuming read of the buffer 2061 * ring_buffer_read_start - start a non consuming read of the buffer
@@ -2037,7 +2076,7 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
2037 struct ring_buffer_iter *iter; 2076 struct ring_buffer_iter *iter;
2038 unsigned long flags; 2077 unsigned long flags;
2039 2078
2040 if (!cpu_isset(cpu, buffer->cpumask)) 2079 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2041 return NULL; 2080 return NULL;
2042 2081
2043 iter = kmalloc(sizeof(*iter), GFP_KERNEL); 2082 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
@@ -2059,6 +2098,7 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
2059 2098
2060 return iter; 2099 return iter;
2061} 2100}
2101EXPORT_SYMBOL_GPL(ring_buffer_read_start);
2062 2102
2063/** 2103/**
2064 * ring_buffer_finish - finish reading the iterator of the buffer 2104 * ring_buffer_finish - finish reading the iterator of the buffer
@@ -2075,6 +2115,7 @@ ring_buffer_read_finish(struct ring_buffer_iter *iter)
2075 atomic_dec(&cpu_buffer->record_disabled); 2115 atomic_dec(&cpu_buffer->record_disabled);
2076 kfree(iter); 2116 kfree(iter);
2077} 2117}
2118EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
2078 2119
2079/** 2120/**
2080 * ring_buffer_read - read the next item in the ring buffer by the iterator 2121 * ring_buffer_read - read the next item in the ring buffer by the iterator
@@ -2101,6 +2142,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
2101 2142
2102 return event; 2143 return event;
2103} 2144}
2145EXPORT_SYMBOL_GPL(ring_buffer_read);
2104 2146
2105/** 2147/**
2106 * ring_buffer_size - return the size of the ring buffer (in bytes) 2148 * ring_buffer_size - return the size of the ring buffer (in bytes)
@@ -2110,6 +2152,7 @@ unsigned long ring_buffer_size(struct ring_buffer *buffer)
2110{ 2152{
2111 return BUF_PAGE_SIZE * buffer->pages; 2153 return BUF_PAGE_SIZE * buffer->pages;
2112} 2154}
2155EXPORT_SYMBOL_GPL(ring_buffer_size);
2113 2156
2114static void 2157static void
2115rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) 2158rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
@@ -2143,7 +2186,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2143 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 2186 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2144 unsigned long flags; 2187 unsigned long flags;
2145 2188
2146 if (!cpu_isset(cpu, buffer->cpumask)) 2189 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2147 return; 2190 return;
2148 2191
2149 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2192 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
@@ -2156,6 +2199,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2156 2199
2157 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2200 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2158} 2201}
2202EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
2159 2203
2160/** 2204/**
2161 * ring_buffer_reset - reset a ring buffer 2205 * ring_buffer_reset - reset a ring buffer
@@ -2168,6 +2212,7 @@ void ring_buffer_reset(struct ring_buffer *buffer)
2168 for_each_buffer_cpu(buffer, cpu) 2212 for_each_buffer_cpu(buffer, cpu)
2169 ring_buffer_reset_cpu(buffer, cpu); 2213 ring_buffer_reset_cpu(buffer, cpu);
2170} 2214}
2215EXPORT_SYMBOL_GPL(ring_buffer_reset);
2171 2216
2172/** 2217/**
2173 * rind_buffer_empty - is the ring buffer empty? 2218 * rind_buffer_empty - is the ring buffer empty?
@@ -2186,6 +2231,7 @@ int ring_buffer_empty(struct ring_buffer *buffer)
2186 } 2231 }
2187 return 1; 2232 return 1;
2188} 2233}
2234EXPORT_SYMBOL_GPL(ring_buffer_empty);
2189 2235
2190/** 2236/**
2191 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? 2237 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
@@ -2196,12 +2242,13 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2196{ 2242{
2197 struct ring_buffer_per_cpu *cpu_buffer; 2243 struct ring_buffer_per_cpu *cpu_buffer;
2198 2244
2199 if (!cpu_isset(cpu, buffer->cpumask)) 2245 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2200 return 1; 2246 return 1;
2201 2247
2202 cpu_buffer = buffer->buffers[cpu]; 2248 cpu_buffer = buffer->buffers[cpu];
2203 return rb_per_cpu_empty(cpu_buffer); 2249 return rb_per_cpu_empty(cpu_buffer);
2204} 2250}
2251EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
2205 2252
2206/** 2253/**
2207 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers 2254 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
@@ -2219,8 +2266,8 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2219 struct ring_buffer_per_cpu *cpu_buffer_a; 2266 struct ring_buffer_per_cpu *cpu_buffer_a;
2220 struct ring_buffer_per_cpu *cpu_buffer_b; 2267 struct ring_buffer_per_cpu *cpu_buffer_b;
2221 2268
2222 if (!cpu_isset(cpu, buffer_a->cpumask) || 2269 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
2223 !cpu_isset(cpu, buffer_b->cpumask)) 2270 !cpumask_test_cpu(cpu, buffer_b->cpumask))
2224 return -EINVAL; 2271 return -EINVAL;
2225 2272
2226 /* At least make sure the two buffers are somewhat the same */ 2273 /* At least make sure the two buffers are somewhat the same */
@@ -2250,6 +2297,7 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2250 2297
2251 return 0; 2298 return 0;
2252} 2299}
2300EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
2253 2301
2254static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer, 2302static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
2255 struct buffer_data_page *bpage) 2303 struct buffer_data_page *bpage)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f4bb3800318b..c580233add95 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -30,7 +30,6 @@
30#include <linux/gfp.h> 30#include <linux/gfp.h>
31#include <linux/fs.h> 31#include <linux/fs.h>
32#include <linux/kprobes.h> 32#include <linux/kprobes.h>
33#include <linux/seq_file.h>
34#include <linux/writeback.h> 33#include <linux/writeback.h>
35 34
36#include <linux/stacktrace.h> 35#include <linux/stacktrace.h>
@@ -90,10 +89,10 @@ static inline void ftrace_enable_cpu(void)
90 preempt_enable(); 89 preempt_enable();
91} 90}
92 91
93static cpumask_t __read_mostly tracing_buffer_mask; 92static cpumask_var_t __read_mostly tracing_buffer_mask;
94 93
95#define for_each_tracing_cpu(cpu) \ 94#define for_each_tracing_cpu(cpu) \
96 for_each_cpu_mask(cpu, tracing_buffer_mask) 95 for_each_cpu(cpu, tracing_buffer_mask)
97 96
98/* 97/*
99 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops 98 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
@@ -1310,7 +1309,7 @@ enum trace_file_type {
1310 TRACE_FILE_ANNOTATE = 2, 1309 TRACE_FILE_ANNOTATE = 2,
1311}; 1310};
1312 1311
1313static void trace_iterator_increment(struct trace_iterator *iter, int cpu) 1312static void trace_iterator_increment(struct trace_iterator *iter)
1314{ 1313{
1315 /* Don't allow ftrace to trace into the ring buffers */ 1314 /* Don't allow ftrace to trace into the ring buffers */
1316 ftrace_disable_cpu(); 1315 ftrace_disable_cpu();
@@ -1389,7 +1388,7 @@ static void *find_next_entry_inc(struct trace_iterator *iter)
1389 iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts); 1388 iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts);
1390 1389
1391 if (iter->ent) 1390 if (iter->ent)
1392 trace_iterator_increment(iter, iter->cpu); 1391 trace_iterator_increment(iter);
1393 1392
1394 return iter->ent ? iter : NULL; 1393 return iter->ent ? iter : NULL;
1395} 1394}
@@ -1812,10 +1811,10 @@ static void test_cpu_buff_start(struct trace_iterator *iter)
1812 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) 1811 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
1813 return; 1812 return;
1814 1813
1815 if (cpu_isset(iter->cpu, iter->started)) 1814 if (cpumask_test_cpu(iter->cpu, iter->started))
1816 return; 1815 return;
1817 1816
1818 cpu_set(iter->cpu, iter->started); 1817 cpumask_set_cpu(iter->cpu, iter->started);
1819 trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu); 1818 trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu);
1820} 1819}
1821 1820
@@ -2647,13 +2646,7 @@ static struct file_operations show_traces_fops = {
2647/* 2646/*
2648 * Only trace on a CPU if the bitmask is set: 2647 * Only trace on a CPU if the bitmask is set:
2649 */ 2648 */
2650static cpumask_t tracing_cpumask = CPU_MASK_ALL; 2649static cpumask_var_t tracing_cpumask;
2651
2652/*
2653 * When tracing/tracing_cpu_mask is modified then this holds
2654 * the new bitmask we are about to install:
2655 */
2656static cpumask_t tracing_cpumask_new;
2657 2650
2658/* 2651/*
2659 * The tracer itself will not take this lock, but still we want 2652 * The tracer itself will not take this lock, but still we want
@@ -2694,6 +2687,10 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2694 size_t count, loff_t *ppos) 2687 size_t count, loff_t *ppos)
2695{ 2688{
2696 int err, cpu; 2689 int err, cpu;
2690 cpumask_var_t tracing_cpumask_new;
2691
2692 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
2693 return -ENOMEM;
2697 2694
2698 mutex_lock(&tracing_cpumask_update_lock); 2695 mutex_lock(&tracing_cpumask_update_lock);
2699 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); 2696 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
@@ -2707,26 +2704,28 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2707 * Increase/decrease the disabled counter if we are 2704 * Increase/decrease the disabled counter if we are
2708 * about to flip a bit in the cpumask: 2705 * about to flip a bit in the cpumask:
2709 */ 2706 */
2710 if (cpu_isset(cpu, tracing_cpumask) && 2707 if (cpumask_test_cpu(cpu, tracing_cpumask) &&
2711 !cpu_isset(cpu, tracing_cpumask_new)) { 2708 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
2712 atomic_inc(&global_trace.data[cpu]->disabled); 2709 atomic_inc(&global_trace.data[cpu]->disabled);
2713 } 2710 }
2714 if (!cpu_isset(cpu, tracing_cpumask) && 2711 if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
2715 cpu_isset(cpu, tracing_cpumask_new)) { 2712 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
2716 atomic_dec(&global_trace.data[cpu]->disabled); 2713 atomic_dec(&global_trace.data[cpu]->disabled);
2717 } 2714 }
2718 } 2715 }
2719 __raw_spin_unlock(&ftrace_max_lock); 2716 __raw_spin_unlock(&ftrace_max_lock);
2720 local_irq_enable(); 2717 local_irq_enable();
2721 2718
2722 tracing_cpumask = tracing_cpumask_new; 2719 cpumask_copy(tracing_cpumask, tracing_cpumask_new);
2723 2720
2724 mutex_unlock(&tracing_cpumask_update_lock); 2721 mutex_unlock(&tracing_cpumask_update_lock);
2722 free_cpumask_var(tracing_cpumask_new);
2725 2723
2726 return count; 2724 return count;
2727 2725
2728err_unlock: 2726err_unlock:
2729 mutex_unlock(&tracing_cpumask_update_lock); 2727 mutex_unlock(&tracing_cpumask_update_lock);
2728 free_cpumask_var(tracing_cpumask);
2730 2729
2731 return err; 2730 return err;
2732} 2731}
@@ -3115,10 +3114,15 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
3115 if (!iter) 3114 if (!iter)
3116 return -ENOMEM; 3115 return -ENOMEM;
3117 3116
3117 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
3118 kfree(iter);
3119 return -ENOMEM;
3120 }
3121
3118 mutex_lock(&trace_types_lock); 3122 mutex_lock(&trace_types_lock);
3119 3123
3120 /* trace pipe does not show start of buffer */ 3124 /* trace pipe does not show start of buffer */
3121 cpus_setall(iter->started); 3125 cpumask_setall(iter->started);
3122 3126
3123 iter->tr = &global_trace; 3127 iter->tr = &global_trace;
3124 iter->trace = current_trace; 3128 iter->trace = current_trace;
@@ -3135,6 +3139,7 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
3135{ 3139{
3136 struct trace_iterator *iter = file->private_data; 3140 struct trace_iterator *iter = file->private_data;
3137 3141
3142 free_cpumask_var(iter->started);
3138 kfree(iter); 3143 kfree(iter);
3139 atomic_dec(&tracing_reader); 3144 atomic_dec(&tracing_reader);
3140 3145
@@ -3753,7 +3758,6 @@ void ftrace_dump(void)
3753 static DEFINE_SPINLOCK(ftrace_dump_lock); 3758 static DEFINE_SPINLOCK(ftrace_dump_lock);
3754 /* use static because iter can be a bit big for the stack */ 3759 /* use static because iter can be a bit big for the stack */
3755 static struct trace_iterator iter; 3760 static struct trace_iterator iter;
3756 static cpumask_t mask;
3757 static int dump_ran; 3761 static int dump_ran;
3758 unsigned long flags; 3762 unsigned long flags;
3759 int cnt = 0, cpu; 3763 int cnt = 0, cpu;
@@ -3787,8 +3791,6 @@ void ftrace_dump(void)
3787 * and then release the locks again. 3791 * and then release the locks again.
3788 */ 3792 */
3789 3793
3790 cpus_clear(mask);
3791
3792 while (!trace_empty(&iter)) { 3794 while (!trace_empty(&iter)) {
3793 3795
3794 if (!cnt) 3796 if (!cnt)
@@ -3824,19 +3826,28 @@ __init static int tracer_alloc_buffers(void)
3824{ 3826{
3825 struct trace_array_cpu *data; 3827 struct trace_array_cpu *data;
3826 int i; 3828 int i;
3829 int ret = -ENOMEM;
3827 3830
3828 /* TODO: make the number of buffers hot pluggable with CPUS */ 3831 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
3829 tracing_buffer_mask = cpu_possible_map; 3832 goto out;
3833
3834 if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
3835 goto out_free_buffer_mask;
3836
3837 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
3838 cpumask_copy(tracing_cpumask, cpu_all_mask);
3830 3839
3840 /* TODO: make the number of buffers hot pluggable with CPUS */
3831 global_trace.buffer = ring_buffer_alloc(trace_buf_size, 3841 global_trace.buffer = ring_buffer_alloc(trace_buf_size,
3832 TRACE_BUFFER_FLAGS); 3842 TRACE_BUFFER_FLAGS);
3833 if (!global_trace.buffer) { 3843 if (!global_trace.buffer) {
3834 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); 3844 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
3835 WARN_ON(1); 3845 WARN_ON(1);
3836 return 0; 3846 goto out_free_cpumask;
3837 } 3847 }
3838 global_trace.entries = ring_buffer_size(global_trace.buffer); 3848 global_trace.entries = ring_buffer_size(global_trace.buffer);
3839 3849
3850
3840#ifdef CONFIG_TRACER_MAX_TRACE 3851#ifdef CONFIG_TRACER_MAX_TRACE
3841 max_tr.buffer = ring_buffer_alloc(trace_buf_size, 3852 max_tr.buffer = ring_buffer_alloc(trace_buf_size,
3842 TRACE_BUFFER_FLAGS); 3853 TRACE_BUFFER_FLAGS);
@@ -3844,7 +3855,7 @@ __init static int tracer_alloc_buffers(void)
3844 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); 3855 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
3845 WARN_ON(1); 3856 WARN_ON(1);
3846 ring_buffer_free(global_trace.buffer); 3857 ring_buffer_free(global_trace.buffer);
3847 return 0; 3858 goto out_free_cpumask;
3848 } 3859 }
3849 max_tr.entries = ring_buffer_size(max_tr.buffer); 3860 max_tr.entries = ring_buffer_size(max_tr.buffer);
3850 WARN_ON(max_tr.entries != global_trace.entries); 3861 WARN_ON(max_tr.entries != global_trace.entries);
@@ -3874,8 +3885,14 @@ __init static int tracer_alloc_buffers(void)
3874 &trace_panic_notifier); 3885 &trace_panic_notifier);
3875 3886
3876 register_die_notifier(&trace_die_notifier); 3887 register_die_notifier(&trace_die_notifier);
3888 ret = 0;
3877 3889
3878 return 0; 3890out_free_cpumask:
3891 free_cpumask_var(tracing_cpumask);
3892out_free_buffer_mask:
3893 free_cpumask_var(tracing_buffer_mask);
3894out:
3895 return ret;
3879} 3896}
3880early_initcall(tracer_alloc_buffers); 3897early_initcall(tracer_alloc_buffers);
3881fs_initcall(tracer_init_debugfs); 3898fs_initcall(tracer_init_debugfs);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index cc7a4f864036..4d3d381bfd95 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -368,7 +368,7 @@ struct trace_iterator {
368 loff_t pos; 368 loff_t pos;
369 long idx; 369 long idx;
370 370
371 cpumask_t started; 371 cpumask_var_t started;
372}; 372};
373 373
374int tracing_is_enabled(void); 374int tracing_is_enabled(void);
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
index 3ccebde28482..366c8c333e13 100644
--- a/kernel/trace/trace_boot.c
+++ b/kernel/trace/trace_boot.c
@@ -42,7 +42,7 @@ static int boot_trace_init(struct trace_array *tr)
42 int cpu; 42 int cpu;
43 boot_trace = tr; 43 boot_trace = tr;
44 44
45 for_each_cpu_mask(cpu, cpu_possible_map) 45 for_each_cpu(cpu, cpu_possible_mask)
46 tracing_reset(tr, cpu); 46 tracing_reset(tr, cpu);
47 47
48 tracing_sched_switch_assign_trace(tr); 48 tracing_sched_switch_assign_trace(tr);
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 4bf39fcae97a..930c08e5b38e 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -79,7 +79,7 @@ print_graph_cpu(struct trace_seq *s, int cpu)
79 int i; 79 int i;
80 int ret; 80 int ret;
81 int log10_this = log10_cpu(cpu); 81 int log10_this = log10_cpu(cpu);
82 int log10_all = log10_cpu(cpus_weight_nr(cpu_online_map)); 82 int log10_all = log10_cpu(cpumask_weight(cpu_online_mask));
83 83
84 84
85 /* 85 /*
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c
index b6a3e20a49a9..649df22d435f 100644
--- a/kernel/trace/trace_hw_branches.c
+++ b/kernel/trace/trace_hw_branches.c
@@ -46,7 +46,7 @@ static void bts_trace_start(struct trace_array *tr)
46 46
47 tracing_reset_online_cpus(tr); 47 tracing_reset_online_cpus(tr);
48 48
49 for_each_cpu_mask(cpu, cpu_possible_map) 49 for_each_cpu(cpu, cpu_possible_mask)
50 smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1); 50 smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1);
51} 51}
52 52
@@ -62,7 +62,7 @@ static void bts_trace_stop(struct trace_array *tr)
62{ 62{
63 int cpu; 63 int cpu;
64 64
65 for_each_cpu_mask(cpu, cpu_possible_map) 65 for_each_cpu(cpu, cpu_possible_mask)
66 smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1); 66 smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1);
67} 67}
68 68
@@ -172,7 +172,7 @@ static void trace_bts_prepare(struct trace_iterator *iter)
172{ 172{
173 int cpu; 173 int cpu;
174 174
175 for_each_cpu_mask(cpu, cpu_possible_map) 175 for_each_cpu(cpu, cpu_possible_mask)
176 smp_call_function_single(cpu, trace_bts_cpu, iter->tr, 1); 176 smp_call_function_single(cpu, trace_bts_cpu, iter->tr, 1);
177} 177}
178 178
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c
index a7172a352f62..7bda248daf55 100644
--- a/kernel/trace/trace_power.c
+++ b/kernel/trace/trace_power.c
@@ -39,7 +39,7 @@ static int power_trace_init(struct trace_array *tr)
39 39
40 trace_power_enabled = 1; 40 trace_power_enabled = 1;
41 41
42 for_each_cpu_mask(cpu, cpu_possible_map) 42 for_each_cpu(cpu, cpu_possible_mask)
43 tracing_reset(tr, cpu); 43 tracing_reset(tr, cpu);
44 return 0; 44 return 0;
45} 45}
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c
index a5779bd975db..eaca5ad803ff 100644
--- a/kernel/trace/trace_sysprof.c
+++ b/kernel/trace/trace_sysprof.c
@@ -196,9 +196,9 @@ static enum hrtimer_restart stack_trace_timer_fn(struct hrtimer *hrtimer)
196 return HRTIMER_RESTART; 196 return HRTIMER_RESTART;
197} 197}
198 198
199static void start_stack_timer(int cpu) 199static void start_stack_timer(void *unused)
200{ 200{
201 struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu); 201 struct hrtimer *hrtimer = &__get_cpu_var(stack_trace_hrtimer);
202 202
203 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 203 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
204 hrtimer->function = stack_trace_timer_fn; 204 hrtimer->function = stack_trace_timer_fn;
@@ -208,14 +208,7 @@ static void start_stack_timer(int cpu)
208 208
209static void start_stack_timers(void) 209static void start_stack_timers(void)
210{ 210{
211 cpumask_t saved_mask = current->cpus_allowed; 211 on_each_cpu(start_stack_timer, NULL, 1);
212 int cpu;
213
214 for_each_online_cpu(cpu) {
215 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
216 start_stack_timer(cpu);
217 }
218 set_cpus_allowed_ptr(current, &saved_mask);
219} 212}
220 213
221static void stop_stack_timer(int cpu) 214static void stop_stack_timer(int cpu)