aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c50
1 files changed, 32 insertions, 18 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 1d601a7c4587..8b0daf0662ef 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -168,7 +168,13 @@ rb_event_length(struct ring_buffer_event *event)
168 */ 168 */
169unsigned ring_buffer_event_length(struct ring_buffer_event *event) 169unsigned ring_buffer_event_length(struct ring_buffer_event *event)
170{ 170{
171 return rb_event_length(event); 171 unsigned length = rb_event_length(event);
172 if (event->type != RINGBUF_TYPE_DATA)
173 return length;
174 length -= RB_EVNT_HDR_SIZE;
175 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
176 length -= sizeof(event->array[0]);
177 return length;
172} 178}
173EXPORT_SYMBOL_GPL(ring_buffer_event_length); 179EXPORT_SYMBOL_GPL(ring_buffer_event_length);
174 180
@@ -195,7 +201,7 @@ void *ring_buffer_event_data(struct ring_buffer_event *event)
195EXPORT_SYMBOL_GPL(ring_buffer_event_data); 201EXPORT_SYMBOL_GPL(ring_buffer_event_data);
196 202
197#define for_each_buffer_cpu(buffer, cpu) \ 203#define for_each_buffer_cpu(buffer, cpu) \
198 for_each_cpu_mask(cpu, buffer->cpumask) 204 for_each_cpu(cpu, buffer->cpumask)
199 205
200#define TS_SHIFT 27 206#define TS_SHIFT 27
201#define TS_MASK ((1ULL << TS_SHIFT) - 1) 207#define TS_MASK ((1ULL << TS_SHIFT) - 1)
@@ -267,7 +273,7 @@ struct ring_buffer {
267 unsigned pages; 273 unsigned pages;
268 unsigned flags; 274 unsigned flags;
269 int cpus; 275 int cpus;
270 cpumask_t cpumask; 276 cpumask_var_t cpumask;
271 atomic_t record_disabled; 277 atomic_t record_disabled;
272 278
273 struct mutex mutex; 279 struct mutex mutex;
@@ -458,6 +464,9 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
458 if (!buffer) 464 if (!buffer)
459 return NULL; 465 return NULL;
460 466
467 if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
468 goto fail_free_buffer;
469
461 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 470 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
462 buffer->flags = flags; 471 buffer->flags = flags;
463 472
@@ -465,14 +474,14 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
465 if (buffer->pages == 1) 474 if (buffer->pages == 1)
466 buffer->pages++; 475 buffer->pages++;
467 476
468 buffer->cpumask = cpu_possible_map; 477 cpumask_copy(buffer->cpumask, cpu_possible_mask);
469 buffer->cpus = nr_cpu_ids; 478 buffer->cpus = nr_cpu_ids;
470 479
471 bsize = sizeof(void *) * nr_cpu_ids; 480 bsize = sizeof(void *) * nr_cpu_ids;
472 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), 481 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
473 GFP_KERNEL); 482 GFP_KERNEL);
474 if (!buffer->buffers) 483 if (!buffer->buffers)
475 goto fail_free_buffer; 484 goto fail_free_cpumask;
476 485
477 for_each_buffer_cpu(buffer, cpu) { 486 for_each_buffer_cpu(buffer, cpu) {
478 buffer->buffers[cpu] = 487 buffer->buffers[cpu] =
@@ -492,6 +501,9 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
492 } 501 }
493 kfree(buffer->buffers); 502 kfree(buffer->buffers);
494 503
504 fail_free_cpumask:
505 free_cpumask_var(buffer->cpumask);
506
495 fail_free_buffer: 507 fail_free_buffer:
496 kfree(buffer); 508 kfree(buffer);
497 return NULL; 509 return NULL;
@@ -510,6 +522,8 @@ ring_buffer_free(struct ring_buffer *buffer)
510 for_each_buffer_cpu(buffer, cpu) 522 for_each_buffer_cpu(buffer, cpu)
511 rb_free_cpu_buffer(buffer->buffers[cpu]); 523 rb_free_cpu_buffer(buffer->buffers[cpu]);
512 524
525 free_cpumask_var(buffer->cpumask);
526
513 kfree(buffer); 527 kfree(buffer);
514} 528}
515EXPORT_SYMBOL_GPL(ring_buffer_free); 529EXPORT_SYMBOL_GPL(ring_buffer_free);
@@ -1283,7 +1297,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
1283 1297
1284 cpu = raw_smp_processor_id(); 1298 cpu = raw_smp_processor_id();
1285 1299
1286 if (!cpu_isset(cpu, buffer->cpumask)) 1300 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1287 goto out; 1301 goto out;
1288 1302
1289 cpu_buffer = buffer->buffers[cpu]; 1303 cpu_buffer = buffer->buffers[cpu];
@@ -1396,7 +1410,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
1396 1410
1397 cpu = raw_smp_processor_id(); 1411 cpu = raw_smp_processor_id();
1398 1412
1399 if (!cpu_isset(cpu, buffer->cpumask)) 1413 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1400 goto out; 1414 goto out;
1401 1415
1402 cpu_buffer = buffer->buffers[cpu]; 1416 cpu_buffer = buffer->buffers[cpu];
@@ -1478,7 +1492,7 @@ void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1478{ 1492{
1479 struct ring_buffer_per_cpu *cpu_buffer; 1493 struct ring_buffer_per_cpu *cpu_buffer;
1480 1494
1481 if (!cpu_isset(cpu, buffer->cpumask)) 1495 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1482 return; 1496 return;
1483 1497
1484 cpu_buffer = buffer->buffers[cpu]; 1498 cpu_buffer = buffer->buffers[cpu];
@@ -1498,7 +1512,7 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1498{ 1512{
1499 struct ring_buffer_per_cpu *cpu_buffer; 1513 struct ring_buffer_per_cpu *cpu_buffer;
1500 1514
1501 if (!cpu_isset(cpu, buffer->cpumask)) 1515 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1502 return; 1516 return;
1503 1517
1504 cpu_buffer = buffer->buffers[cpu]; 1518 cpu_buffer = buffer->buffers[cpu];
@@ -1515,7 +1529,7 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1515{ 1529{
1516 struct ring_buffer_per_cpu *cpu_buffer; 1530 struct ring_buffer_per_cpu *cpu_buffer;
1517 1531
1518 if (!cpu_isset(cpu, buffer->cpumask)) 1532 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1519 return 0; 1533 return 0;
1520 1534
1521 cpu_buffer = buffer->buffers[cpu]; 1535 cpu_buffer = buffer->buffers[cpu];
@@ -1532,7 +1546,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1532{ 1546{
1533 struct ring_buffer_per_cpu *cpu_buffer; 1547 struct ring_buffer_per_cpu *cpu_buffer;
1534 1548
1535 if (!cpu_isset(cpu, buffer->cpumask)) 1549 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1536 return 0; 1550 return 0;
1537 1551
1538 cpu_buffer = buffer->buffers[cpu]; 1552 cpu_buffer = buffer->buffers[cpu];
@@ -1850,7 +1864,7 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1850 struct buffer_page *reader; 1864 struct buffer_page *reader;
1851 int nr_loops = 0; 1865 int nr_loops = 0;
1852 1866
1853 if (!cpu_isset(cpu, buffer->cpumask)) 1867 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1854 return NULL; 1868 return NULL;
1855 1869
1856 cpu_buffer = buffer->buffers[cpu]; 1870 cpu_buffer = buffer->buffers[cpu];
@@ -2025,7 +2039,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
2025 struct ring_buffer_event *event; 2039 struct ring_buffer_event *event;
2026 unsigned long flags; 2040 unsigned long flags;
2027 2041
2028 if (!cpu_isset(cpu, buffer->cpumask)) 2042 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2029 return NULL; 2043 return NULL;
2030 2044
2031 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2045 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
@@ -2062,7 +2076,7 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
2062 struct ring_buffer_iter *iter; 2076 struct ring_buffer_iter *iter;
2063 unsigned long flags; 2077 unsigned long flags;
2064 2078
2065 if (!cpu_isset(cpu, buffer->cpumask)) 2079 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2066 return NULL; 2080 return NULL;
2067 2081
2068 iter = kmalloc(sizeof(*iter), GFP_KERNEL); 2082 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
@@ -2172,7 +2186,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2172 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 2186 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2173 unsigned long flags; 2187 unsigned long flags;
2174 2188
2175 if (!cpu_isset(cpu, buffer->cpumask)) 2189 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2176 return; 2190 return;
2177 2191
2178 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2192 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
@@ -2228,7 +2242,7 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2228{ 2242{
2229 struct ring_buffer_per_cpu *cpu_buffer; 2243 struct ring_buffer_per_cpu *cpu_buffer;
2230 2244
2231 if (!cpu_isset(cpu, buffer->cpumask)) 2245 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2232 return 1; 2246 return 1;
2233 2247
2234 cpu_buffer = buffer->buffers[cpu]; 2248 cpu_buffer = buffer->buffers[cpu];
@@ -2252,8 +2266,8 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2252 struct ring_buffer_per_cpu *cpu_buffer_a; 2266 struct ring_buffer_per_cpu *cpu_buffer_a;
2253 struct ring_buffer_per_cpu *cpu_buffer_b; 2267 struct ring_buffer_per_cpu *cpu_buffer_b;
2254 2268
2255 if (!cpu_isset(cpu, buffer_a->cpumask) || 2269 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
2256 !cpu_isset(cpu, buffer_b->cpumask)) 2270 !cpumask_test_cpu(cpu, buffer_b->cpumask))
2257 return -EINVAL; 2271 return -EINVAL;
2258 2272
2259 /* At least make sure the two buffers are somewhat the same */ 2273 /* At least make sure the two buffers are somewhat the same */