aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c745
1 files changed, 538 insertions, 207 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index f780e9552f91..1d601a7c4587 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -18,8 +18,46 @@
18 18
19#include "trace.h" 19#include "trace.h"
20 20
21/* Global flag to disable all recording to ring buffers */ 21/*
22static int ring_buffers_off __read_mostly; 22 * A fast way to enable or disable all ring buffers is to
23 * call tracing_on or tracing_off. Turning off the ring buffers
24 * prevents all ring buffers from being recorded to.
25 * Turning this switch on, makes it OK to write to the
26 * ring buffer, if the ring buffer is enabled itself.
27 *
28 * There's three layers that must be on in order to write
29 * to the ring buffer.
30 *
31 * 1) This global flag must be set.
32 * 2) The ring buffer must be enabled for recording.
33 * 3) The per cpu buffer must be enabled for recording.
34 *
35 * In case of an anomaly, this global flag has a bit set that
36 * will permantly disable all ring buffers.
37 */
38
39/*
40 * Global flag to disable all recording to ring buffers
41 * This has two bits: ON, DISABLED
42 *
43 * ON DISABLED
44 * ---- ----------
45 * 0 0 : ring buffers are off
46 * 1 0 : ring buffers are on
47 * X 1 : ring buffers are permanently disabled
48 */
49
50enum {
51 RB_BUFFERS_ON_BIT = 0,
52 RB_BUFFERS_DISABLED_BIT = 1,
53};
54
55enum {
56 RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
57 RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
58};
59
60static long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
23 61
24/** 62/**
25 * tracing_on - enable all tracing buffers 63 * tracing_on - enable all tracing buffers
@@ -29,8 +67,9 @@ static int ring_buffers_off __read_mostly;
29 */ 67 */
30void tracing_on(void) 68void tracing_on(void)
31{ 69{
32 ring_buffers_off = 0; 70 set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
33} 71}
72EXPORT_SYMBOL_GPL(tracing_on);
34 73
35/** 74/**
36 * tracing_off - turn off all tracing buffers 75 * tracing_off - turn off all tracing buffers
@@ -42,8 +81,22 @@ void tracing_on(void)
42 */ 81 */
43void tracing_off(void) 82void tracing_off(void)
44{ 83{
45 ring_buffers_off = 1; 84 clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
46} 85}
86EXPORT_SYMBOL_GPL(tracing_off);
87
88/**
89 * tracing_off_permanent - permanently disable ring buffers
90 *
91 * This function, once called, will disable all ring buffers
92 * permanenty.
93 */
94void tracing_off_permanent(void)
95{
96 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
97}
98
99#include "trace.h"
47 100
48/* Up this if you want to test the TIME_EXTENTS and normalization */ 101/* Up this if you want to test the TIME_EXTENTS and normalization */
49#define DEBUG_SHIFT 0 102#define DEBUG_SHIFT 0
@@ -56,16 +109,18 @@ u64 ring_buffer_time_stamp(int cpu)
56 preempt_disable_notrace(); 109 preempt_disable_notrace();
57 /* shift to debug/test normalization and TIME_EXTENTS */ 110 /* shift to debug/test normalization and TIME_EXTENTS */
58 time = sched_clock() << DEBUG_SHIFT; 111 time = sched_clock() << DEBUG_SHIFT;
59 preempt_enable_notrace(); 112 preempt_enable_no_resched_notrace();
60 113
61 return time; 114 return time;
62} 115}
116EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
63 117
64void ring_buffer_normalize_time_stamp(int cpu, u64 *ts) 118void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
65{ 119{
66 /* Just stupid testing the normalize function and deltas */ 120 /* Just stupid testing the normalize function and deltas */
67 *ts >>= DEBUG_SHIFT; 121 *ts >>= DEBUG_SHIFT;
68} 122}
123EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
69 124
70#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event)) 125#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
71#define RB_ALIGNMENT_SHIFT 2 126#define RB_ALIGNMENT_SHIFT 2
@@ -115,6 +170,7 @@ unsigned ring_buffer_event_length(struct ring_buffer_event *event)
115{ 170{
116 return rb_event_length(event); 171 return rb_event_length(event);
117} 172}
173EXPORT_SYMBOL_GPL(ring_buffer_event_length);
118 174
119/* inline for ring buffer fast paths */ 175/* inline for ring buffer fast paths */
120static inline void * 176static inline void *
@@ -136,6 +192,7 @@ void *ring_buffer_event_data(struct ring_buffer_event *event)
136{ 192{
137 return rb_event_data(event); 193 return rb_event_data(event);
138} 194}
195EXPORT_SYMBOL_GPL(ring_buffer_event_data);
139 196
140#define for_each_buffer_cpu(buffer, cpu) \ 197#define for_each_buffer_cpu(buffer, cpu) \
141 for_each_cpu_mask(cpu, buffer->cpumask) 198 for_each_cpu_mask(cpu, buffer->cpumask)
@@ -144,20 +201,24 @@ void *ring_buffer_event_data(struct ring_buffer_event *event)
144#define TS_MASK ((1ULL << TS_SHIFT) - 1) 201#define TS_MASK ((1ULL << TS_SHIFT) - 1)
145#define TS_DELTA_TEST (~TS_MASK) 202#define TS_DELTA_TEST (~TS_MASK)
146 203
147/* 204struct buffer_data_page {
148 * This hack stolen from mm/slob.c.
149 * We can store per page timing information in the page frame of the page.
150 * Thanks to Peter Zijlstra for suggesting this idea.
151 */
152struct buffer_page {
153 u64 time_stamp; /* page time stamp */ 205 u64 time_stamp; /* page time stamp */
154 local_t write; /* index for next write */
155 local_t commit; /* write commited index */ 206 local_t commit; /* write commited index */
207 unsigned char data[]; /* data of buffer page */
208};
209
210struct buffer_page {
211 local_t write; /* index for next write */
156 unsigned read; /* index for next read */ 212 unsigned read; /* index for next read */
157 struct list_head list; /* list of free pages */ 213 struct list_head list; /* list of free pages */
158 void *page; /* Actual data page */ 214 struct buffer_data_page *page; /* Actual data page */
159}; 215};
160 216
217static void rb_init_page(struct buffer_data_page *bpage)
218{
219 local_set(&bpage->commit, 0);
220}
221
161/* 222/*
162 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing 223 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
163 * this issue out. 224 * this issue out.
@@ -179,7 +240,7 @@ static inline int test_time_stamp(u64 delta)
179 return 0; 240 return 0;
180} 241}
181 242
182#define BUF_PAGE_SIZE PAGE_SIZE 243#define BUF_PAGE_SIZE (PAGE_SIZE - sizeof(struct buffer_data_page))
183 244
184/* 245/*
185 * head_page == tail_page && head == tail then buffer is empty. 246 * head_page == tail_page && head == tail then buffer is empty.
@@ -187,7 +248,8 @@ static inline int test_time_stamp(u64 delta)
187struct ring_buffer_per_cpu { 248struct ring_buffer_per_cpu {
188 int cpu; 249 int cpu;
189 struct ring_buffer *buffer; 250 struct ring_buffer *buffer;
190 spinlock_t lock; 251 spinlock_t reader_lock; /* serialize readers */
252 raw_spinlock_t lock;
191 struct lock_class_key lock_key; 253 struct lock_class_key lock_key;
192 struct list_head pages; 254 struct list_head pages;
193 struct buffer_page *head_page; /* read from head */ 255 struct buffer_page *head_page; /* read from head */
@@ -202,7 +264,6 @@ struct ring_buffer_per_cpu {
202}; 264};
203 265
204struct ring_buffer { 266struct ring_buffer {
205 unsigned long size;
206 unsigned pages; 267 unsigned pages;
207 unsigned flags; 268 unsigned flags;
208 int cpus; 269 int cpus;
@@ -221,32 +282,16 @@ struct ring_buffer_iter {
221 u64 read_stamp; 282 u64 read_stamp;
222}; 283};
223 284
285/* buffer may be either ring_buffer or ring_buffer_per_cpu */
224#define RB_WARN_ON(buffer, cond) \ 286#define RB_WARN_ON(buffer, cond) \
225 do { \ 287 ({ \
226 if (unlikely(cond)) { \ 288 int _____ret = unlikely(cond); \
227 atomic_inc(&buffer->record_disabled); \ 289 if (_____ret) { \
228 WARN_ON(1); \
229 } \
230 } while (0)
231
232#define RB_WARN_ON_RET(buffer, cond) \
233 do { \
234 if (unlikely(cond)) { \
235 atomic_inc(&buffer->record_disabled); \
236 WARN_ON(1); \
237 return -1; \
238 } \
239 } while (0)
240
241#define RB_WARN_ON_ONCE(buffer, cond) \
242 do { \
243 static int once; \
244 if (unlikely(cond) && !once) { \
245 once++; \
246 atomic_inc(&buffer->record_disabled); \ 290 atomic_inc(&buffer->record_disabled); \
247 WARN_ON(1); \ 291 WARN_ON(1); \
248 } \ 292 } \
249 } while (0) 293 _____ret; \
294 })
250 295
251/** 296/**
252 * check_pages - integrity check of buffer pages 297 * check_pages - integrity check of buffer pages
@@ -258,16 +303,20 @@ struct ring_buffer_iter {
258static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) 303static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
259{ 304{
260 struct list_head *head = &cpu_buffer->pages; 305 struct list_head *head = &cpu_buffer->pages;
261 struct buffer_page *page, *tmp; 306 struct buffer_page *bpage, *tmp;
262 307
263 RB_WARN_ON_RET(cpu_buffer, head->next->prev != head); 308 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
264 RB_WARN_ON_RET(cpu_buffer, head->prev->next != head); 309 return -1;
310 if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
311 return -1;
265 312
266 list_for_each_entry_safe(page, tmp, head, list) { 313 list_for_each_entry_safe(bpage, tmp, head, list) {
267 RB_WARN_ON_RET(cpu_buffer, 314 if (RB_WARN_ON(cpu_buffer,
268 page->list.next->prev != &page->list); 315 bpage->list.next->prev != &bpage->list))
269 RB_WARN_ON_RET(cpu_buffer, 316 return -1;
270 page->list.prev->next != &page->list); 317 if (RB_WARN_ON(cpu_buffer,
318 bpage->list.prev->next != &bpage->list))
319 return -1;
271 } 320 }
272 321
273 return 0; 322 return 0;
@@ -277,22 +326,23 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
277 unsigned nr_pages) 326 unsigned nr_pages)
278{ 327{
279 struct list_head *head = &cpu_buffer->pages; 328 struct list_head *head = &cpu_buffer->pages;
280 struct buffer_page *page, *tmp; 329 struct buffer_page *bpage, *tmp;
281 unsigned long addr; 330 unsigned long addr;
282 LIST_HEAD(pages); 331 LIST_HEAD(pages);
283 unsigned i; 332 unsigned i;
284 333
285 for (i = 0; i < nr_pages; i++) { 334 for (i = 0; i < nr_pages; i++) {
286 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()), 335 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
287 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu)); 336 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
288 if (!page) 337 if (!bpage)
289 goto free_pages; 338 goto free_pages;
290 list_add(&page->list, &pages); 339 list_add(&bpage->list, &pages);
291 340
292 addr = __get_free_page(GFP_KERNEL); 341 addr = __get_free_page(GFP_KERNEL);
293 if (!addr) 342 if (!addr)
294 goto free_pages; 343 goto free_pages;
295 page->page = (void *)addr; 344 bpage->page = (void *)addr;
345 rb_init_page(bpage->page);
296 } 346 }
297 347
298 list_splice(&pages, head); 348 list_splice(&pages, head);
@@ -302,9 +352,9 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
302 return 0; 352 return 0;
303 353
304 free_pages: 354 free_pages:
305 list_for_each_entry_safe(page, tmp, &pages, list) { 355 list_for_each_entry_safe(bpage, tmp, &pages, list) {
306 list_del_init(&page->list); 356 list_del_init(&bpage->list);
307 free_buffer_page(page); 357 free_buffer_page(bpage);
308 } 358 }
309 return -ENOMEM; 359 return -ENOMEM;
310} 360}
@@ -313,7 +363,7 @@ static struct ring_buffer_per_cpu *
313rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) 363rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
314{ 364{
315 struct ring_buffer_per_cpu *cpu_buffer; 365 struct ring_buffer_per_cpu *cpu_buffer;
316 struct buffer_page *page; 366 struct buffer_page *bpage;
317 unsigned long addr; 367 unsigned long addr;
318 int ret; 368 int ret;
319 369
@@ -324,19 +374,21 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
324 374
325 cpu_buffer->cpu = cpu; 375 cpu_buffer->cpu = cpu;
326 cpu_buffer->buffer = buffer; 376 cpu_buffer->buffer = buffer;
327 spin_lock_init(&cpu_buffer->lock); 377 spin_lock_init(&cpu_buffer->reader_lock);
378 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
328 INIT_LIST_HEAD(&cpu_buffer->pages); 379 INIT_LIST_HEAD(&cpu_buffer->pages);
329 380
330 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()), 381 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
331 GFP_KERNEL, cpu_to_node(cpu)); 382 GFP_KERNEL, cpu_to_node(cpu));
332 if (!page) 383 if (!bpage)
333 goto fail_free_buffer; 384 goto fail_free_buffer;
334 385
335 cpu_buffer->reader_page = page; 386 cpu_buffer->reader_page = bpage;
336 addr = __get_free_page(GFP_KERNEL); 387 addr = __get_free_page(GFP_KERNEL);
337 if (!addr) 388 if (!addr)
338 goto fail_free_reader; 389 goto fail_free_reader;
339 page->page = (void *)addr; 390 bpage->page = (void *)addr;
391 rb_init_page(bpage->page);
340 392
341 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 393 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
342 394
@@ -361,14 +413,14 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
361static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) 413static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
362{ 414{
363 struct list_head *head = &cpu_buffer->pages; 415 struct list_head *head = &cpu_buffer->pages;
364 struct buffer_page *page, *tmp; 416 struct buffer_page *bpage, *tmp;
365 417
366 list_del_init(&cpu_buffer->reader_page->list); 418 list_del_init(&cpu_buffer->reader_page->list);
367 free_buffer_page(cpu_buffer->reader_page); 419 free_buffer_page(cpu_buffer->reader_page);
368 420
369 list_for_each_entry_safe(page, tmp, head, list) { 421 list_for_each_entry_safe(bpage, tmp, head, list) {
370 list_del_init(&page->list); 422 list_del_init(&bpage->list);
371 free_buffer_page(page); 423 free_buffer_page(bpage);
372 } 424 }
373 kfree(cpu_buffer); 425 kfree(cpu_buffer);
374} 426}
@@ -381,7 +433,7 @@ extern int ring_buffer_page_too_big(void);
381 433
382/** 434/**
383 * ring_buffer_alloc - allocate a new ring_buffer 435 * ring_buffer_alloc - allocate a new ring_buffer
384 * @size: the size in bytes that is needed. 436 * @size: the size in bytes per cpu that is needed.
385 * @flags: attributes to set for the ring buffer. 437 * @flags: attributes to set for the ring buffer.
386 * 438 *
387 * Currently the only flag that is available is the RB_FL_OVERWRITE 439 * Currently the only flag that is available is the RB_FL_OVERWRITE
@@ -444,6 +496,7 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
444 kfree(buffer); 496 kfree(buffer);
445 return NULL; 497 return NULL;
446} 498}
499EXPORT_SYMBOL_GPL(ring_buffer_alloc);
447 500
448/** 501/**
449 * ring_buffer_free - free a ring buffer. 502 * ring_buffer_free - free a ring buffer.
@@ -459,13 +512,14 @@ ring_buffer_free(struct ring_buffer *buffer)
459 512
460 kfree(buffer); 513 kfree(buffer);
461} 514}
515EXPORT_SYMBOL_GPL(ring_buffer_free);
462 516
463static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); 517static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
464 518
465static void 519static void
466rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) 520rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
467{ 521{
468 struct buffer_page *page; 522 struct buffer_page *bpage;
469 struct list_head *p; 523 struct list_head *p;
470 unsigned i; 524 unsigned i;
471 525
@@ -473,13 +527,15 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
473 synchronize_sched(); 527 synchronize_sched();
474 528
475 for (i = 0; i < nr_pages; i++) { 529 for (i = 0; i < nr_pages; i++) {
476 BUG_ON(list_empty(&cpu_buffer->pages)); 530 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
531 return;
477 p = cpu_buffer->pages.next; 532 p = cpu_buffer->pages.next;
478 page = list_entry(p, struct buffer_page, list); 533 bpage = list_entry(p, struct buffer_page, list);
479 list_del_init(&page->list); 534 list_del_init(&bpage->list);
480 free_buffer_page(page); 535 free_buffer_page(bpage);
481 } 536 }
482 BUG_ON(list_empty(&cpu_buffer->pages)); 537 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
538 return;
483 539
484 rb_reset_cpu(cpu_buffer); 540 rb_reset_cpu(cpu_buffer);
485 541
@@ -493,7 +549,7 @@ static void
493rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, 549rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
494 struct list_head *pages, unsigned nr_pages) 550 struct list_head *pages, unsigned nr_pages)
495{ 551{
496 struct buffer_page *page; 552 struct buffer_page *bpage;
497 struct list_head *p; 553 struct list_head *p;
498 unsigned i; 554 unsigned i;
499 555
@@ -501,11 +557,12 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
501 synchronize_sched(); 557 synchronize_sched();
502 558
503 for (i = 0; i < nr_pages; i++) { 559 for (i = 0; i < nr_pages; i++) {
504 BUG_ON(list_empty(pages)); 560 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
561 return;
505 p = pages->next; 562 p = pages->next;
506 page = list_entry(p, struct buffer_page, list); 563 bpage = list_entry(p, struct buffer_page, list);
507 list_del_init(&page->list); 564 list_del_init(&bpage->list);
508 list_add_tail(&page->list, &cpu_buffer->pages); 565 list_add_tail(&bpage->list, &cpu_buffer->pages);
509 } 566 }
510 rb_reset_cpu(cpu_buffer); 567 rb_reset_cpu(cpu_buffer);
511 568
@@ -532,7 +589,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
532{ 589{
533 struct ring_buffer_per_cpu *cpu_buffer; 590 struct ring_buffer_per_cpu *cpu_buffer;
534 unsigned nr_pages, rm_pages, new_pages; 591 unsigned nr_pages, rm_pages, new_pages;
535 struct buffer_page *page, *tmp; 592 struct buffer_page *bpage, *tmp;
536 unsigned long buffer_size; 593 unsigned long buffer_size;
537 unsigned long addr; 594 unsigned long addr;
538 LIST_HEAD(pages); 595 LIST_HEAD(pages);
@@ -562,7 +619,10 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
562 if (size < buffer_size) { 619 if (size < buffer_size) {
563 620
564 /* easy case, just free pages */ 621 /* easy case, just free pages */
565 BUG_ON(nr_pages >= buffer->pages); 622 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) {
623 mutex_unlock(&buffer->mutex);
624 return -1;
625 }
566 626
567 rm_pages = buffer->pages - nr_pages; 627 rm_pages = buffer->pages - nr_pages;
568 628
@@ -581,21 +641,26 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
581 * add these pages to the cpu_buffers. Otherwise we just free 641 * add these pages to the cpu_buffers. Otherwise we just free
582 * them all and return -ENOMEM; 642 * them all and return -ENOMEM;
583 */ 643 */
584 BUG_ON(nr_pages <= buffer->pages); 644 if (RB_WARN_ON(buffer, nr_pages <= buffer->pages)) {
645 mutex_unlock(&buffer->mutex);
646 return -1;
647 }
648
585 new_pages = nr_pages - buffer->pages; 649 new_pages = nr_pages - buffer->pages;
586 650
587 for_each_buffer_cpu(buffer, cpu) { 651 for_each_buffer_cpu(buffer, cpu) {
588 for (i = 0; i < new_pages; i++) { 652 for (i = 0; i < new_pages; i++) {
589 page = kzalloc_node(ALIGN(sizeof(*page), 653 bpage = kzalloc_node(ALIGN(sizeof(*bpage),
590 cache_line_size()), 654 cache_line_size()),
591 GFP_KERNEL, cpu_to_node(cpu)); 655 GFP_KERNEL, cpu_to_node(cpu));
592 if (!page) 656 if (!bpage)
593 goto free_pages; 657 goto free_pages;
594 list_add(&page->list, &pages); 658 list_add(&bpage->list, &pages);
595 addr = __get_free_page(GFP_KERNEL); 659 addr = __get_free_page(GFP_KERNEL);
596 if (!addr) 660 if (!addr)
597 goto free_pages; 661 goto free_pages;
598 page->page = (void *)addr; 662 bpage->page = (void *)addr;
663 rb_init_page(bpage->page);
599 } 664 }
600 } 665 }
601 666
@@ -604,7 +669,10 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
604 rb_insert_pages(cpu_buffer, &pages, new_pages); 669 rb_insert_pages(cpu_buffer, &pages, new_pages);
605 } 670 }
606 671
607 BUG_ON(!list_empty(&pages)); 672 if (RB_WARN_ON(buffer, !list_empty(&pages))) {
673 mutex_unlock(&buffer->mutex);
674 return -1;
675 }
608 676
609 out: 677 out:
610 buffer->pages = nr_pages; 678 buffer->pages = nr_pages;
@@ -613,22 +681,29 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
613 return size; 681 return size;
614 682
615 free_pages: 683 free_pages:
616 list_for_each_entry_safe(page, tmp, &pages, list) { 684 list_for_each_entry_safe(bpage, tmp, &pages, list) {
617 list_del_init(&page->list); 685 list_del_init(&bpage->list);
618 free_buffer_page(page); 686 free_buffer_page(bpage);
619 } 687 }
620 mutex_unlock(&buffer->mutex); 688 mutex_unlock(&buffer->mutex);
621 return -ENOMEM; 689 return -ENOMEM;
622} 690}
691EXPORT_SYMBOL_GPL(ring_buffer_resize);
623 692
624static inline int rb_null_event(struct ring_buffer_event *event) 693static inline int rb_null_event(struct ring_buffer_event *event)
625{ 694{
626 return event->type == RINGBUF_TYPE_PADDING; 695 return event->type == RINGBUF_TYPE_PADDING;
627} 696}
628 697
629static inline void *__rb_page_index(struct buffer_page *page, unsigned index) 698static inline void *
699__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
700{
701 return bpage->data + index;
702}
703
704static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
630{ 705{
631 return page->page + index; 706 return bpage->page->data + index;
632} 707}
633 708
634static inline struct ring_buffer_event * 709static inline struct ring_buffer_event *
@@ -658,7 +733,7 @@ static inline unsigned rb_page_write(struct buffer_page *bpage)
658 733
659static inline unsigned rb_page_commit(struct buffer_page *bpage) 734static inline unsigned rb_page_commit(struct buffer_page *bpage)
660{ 735{
661 return local_read(&bpage->commit); 736 return local_read(&bpage->page->commit);
662} 737}
663 738
664/* Size is determined by what has been commited */ 739/* Size is determined by what has been commited */
@@ -693,7 +768,8 @@ static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
693 head += rb_event_length(event)) { 768 head += rb_event_length(event)) {
694 769
695 event = __rb_page_index(cpu_buffer->head_page, head); 770 event = __rb_page_index(cpu_buffer->head_page, head);
696 BUG_ON(rb_null_event(event)); 771 if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
772 return;
697 /* Only count data entries */ 773 /* Only count data entries */
698 if (event->type != RINGBUF_TYPE_DATA) 774 if (event->type != RINGBUF_TYPE_DATA)
699 continue; 775 continue;
@@ -703,14 +779,14 @@ static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
703} 779}
704 780
705static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer, 781static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
706 struct buffer_page **page) 782 struct buffer_page **bpage)
707{ 783{
708 struct list_head *p = (*page)->list.next; 784 struct list_head *p = (*bpage)->list.next;
709 785
710 if (p == &cpu_buffer->pages) 786 if (p == &cpu_buffer->pages)
711 p = p->next; 787 p = p->next;
712 788
713 *page = list_entry(p, struct buffer_page, list); 789 *bpage = list_entry(p, struct buffer_page, list);
714} 790}
715 791
716static inline unsigned 792static inline unsigned
@@ -746,16 +822,18 @@ rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
746 addr &= PAGE_MASK; 822 addr &= PAGE_MASK;
747 823
748 while (cpu_buffer->commit_page->page != (void *)addr) { 824 while (cpu_buffer->commit_page->page != (void *)addr) {
749 RB_WARN_ON(cpu_buffer, 825 if (RB_WARN_ON(cpu_buffer,
750 cpu_buffer->commit_page == cpu_buffer->tail_page); 826 cpu_buffer->commit_page == cpu_buffer->tail_page))
751 cpu_buffer->commit_page->commit = 827 return;
828 cpu_buffer->commit_page->page->commit =
752 cpu_buffer->commit_page->write; 829 cpu_buffer->commit_page->write;
753 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); 830 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
754 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp; 831 cpu_buffer->write_stamp =
832 cpu_buffer->commit_page->page->time_stamp;
755 } 833 }
756 834
757 /* Now set the commit to the event's index */ 835 /* Now set the commit to the event's index */
758 local_set(&cpu_buffer->commit_page->commit, index); 836 local_set(&cpu_buffer->commit_page->page->commit, index);
759} 837}
760 838
761static inline void 839static inline void
@@ -769,25 +847,38 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
769 * back to us). This allows us to do a simple loop to 847 * back to us). This allows us to do a simple loop to
770 * assign the commit to the tail. 848 * assign the commit to the tail.
771 */ 849 */
850 again:
772 while (cpu_buffer->commit_page != cpu_buffer->tail_page) { 851 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
773 cpu_buffer->commit_page->commit = 852 cpu_buffer->commit_page->page->commit =
774 cpu_buffer->commit_page->write; 853 cpu_buffer->commit_page->write;
775 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); 854 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
776 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp; 855 cpu_buffer->write_stamp =
856 cpu_buffer->commit_page->page->time_stamp;
777 /* add barrier to keep gcc from optimizing too much */ 857 /* add barrier to keep gcc from optimizing too much */
778 barrier(); 858 barrier();
779 } 859 }
780 while (rb_commit_index(cpu_buffer) != 860 while (rb_commit_index(cpu_buffer) !=
781 rb_page_write(cpu_buffer->commit_page)) { 861 rb_page_write(cpu_buffer->commit_page)) {
782 cpu_buffer->commit_page->commit = 862 cpu_buffer->commit_page->page->commit =
783 cpu_buffer->commit_page->write; 863 cpu_buffer->commit_page->write;
784 barrier(); 864 barrier();
785 } 865 }
866
867 /* again, keep gcc from optimizing */
868 barrier();
869
870 /*
871 * If an interrupt came in just after the first while loop
872 * and pushed the tail page forward, we will be left with
873 * a dangling commit that will never go forward.
874 */
875 if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
876 goto again;
786} 877}
787 878
788static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) 879static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
789{ 880{
790 cpu_buffer->read_stamp = cpu_buffer->reader_page->time_stamp; 881 cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
791 cpu_buffer->reader_page->read = 0; 882 cpu_buffer->reader_page->read = 0;
792} 883}
793 884
@@ -806,7 +897,7 @@ static inline void rb_inc_iter(struct ring_buffer_iter *iter)
806 else 897 else
807 rb_inc_page(cpu_buffer, &iter->head_page); 898 rb_inc_page(cpu_buffer, &iter->head_page);
808 899
809 iter->read_stamp = iter->head_page->time_stamp; 900 iter->read_stamp = iter->head_page->page->time_stamp;
810 iter->head = 0; 901 iter->head = 0;
811} 902}
812 903
@@ -880,12 +971,15 @@ static struct ring_buffer_event *
880__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, 971__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
881 unsigned type, unsigned long length, u64 *ts) 972 unsigned type, unsigned long length, u64 *ts)
882{ 973{
883 struct buffer_page *tail_page, *head_page, *reader_page; 974 struct buffer_page *tail_page, *head_page, *reader_page, *commit_page;
884 unsigned long tail, write; 975 unsigned long tail, write;
885 struct ring_buffer *buffer = cpu_buffer->buffer; 976 struct ring_buffer *buffer = cpu_buffer->buffer;
886 struct ring_buffer_event *event; 977 struct ring_buffer_event *event;
887 unsigned long flags; 978 unsigned long flags;
888 979
980 commit_page = cpu_buffer->commit_page;
981 /* we just need to protect against interrupts */
982 barrier();
889 tail_page = cpu_buffer->tail_page; 983 tail_page = cpu_buffer->tail_page;
890 write = local_add_return(length, &tail_page->write); 984 write = local_add_return(length, &tail_page->write);
891 tail = write - length; 985 tail = write - length;
@@ -894,7 +988,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
894 if (write > BUF_PAGE_SIZE) { 988 if (write > BUF_PAGE_SIZE) {
895 struct buffer_page *next_page = tail_page; 989 struct buffer_page *next_page = tail_page;
896 990
897 spin_lock_irqsave(&cpu_buffer->lock, flags); 991 local_irq_save(flags);
992 __raw_spin_lock(&cpu_buffer->lock);
898 993
899 rb_inc_page(cpu_buffer, &next_page); 994 rb_inc_page(cpu_buffer, &next_page);
900 995
@@ -902,14 +997,15 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
902 reader_page = cpu_buffer->reader_page; 997 reader_page = cpu_buffer->reader_page;
903 998
904 /* we grabbed the lock before incrementing */ 999 /* we grabbed the lock before incrementing */
905 RB_WARN_ON(cpu_buffer, next_page == reader_page); 1000 if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
1001 goto out_unlock;
906 1002
907 /* 1003 /*
908 * If for some reason, we had an interrupt storm that made 1004 * If for some reason, we had an interrupt storm that made
909 * it all the way around the buffer, bail, and warn 1005 * it all the way around the buffer, bail, and warn
910 * about it. 1006 * about it.
911 */ 1007 */
912 if (unlikely(next_page == cpu_buffer->commit_page)) { 1008 if (unlikely(next_page == commit_page)) {
913 WARN_ON_ONCE(1); 1009 WARN_ON_ONCE(1);
914 goto out_unlock; 1010 goto out_unlock;
915 } 1011 }
@@ -940,12 +1036,12 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
940 */ 1036 */
941 if (tail_page == cpu_buffer->tail_page) { 1037 if (tail_page == cpu_buffer->tail_page) {
942 local_set(&next_page->write, 0); 1038 local_set(&next_page->write, 0);
943 local_set(&next_page->commit, 0); 1039 local_set(&next_page->page->commit, 0);
944 cpu_buffer->tail_page = next_page; 1040 cpu_buffer->tail_page = next_page;
945 1041
946 /* reread the time stamp */ 1042 /* reread the time stamp */
947 *ts = ring_buffer_time_stamp(cpu_buffer->cpu); 1043 *ts = ring_buffer_time_stamp(cpu_buffer->cpu);
948 cpu_buffer->tail_page->time_stamp = *ts; 1044 cpu_buffer->tail_page->page->time_stamp = *ts;
949 } 1045 }
950 1046
951 /* 1047 /*
@@ -970,7 +1066,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
970 rb_set_commit_to_write(cpu_buffer); 1066 rb_set_commit_to_write(cpu_buffer);
971 } 1067 }
972 1068
973 spin_unlock_irqrestore(&cpu_buffer->lock, flags); 1069 __raw_spin_unlock(&cpu_buffer->lock);
1070 local_irq_restore(flags);
974 1071
975 /* fail and let the caller try again */ 1072 /* fail and let the caller try again */
976 return ERR_PTR(-EAGAIN); 1073 return ERR_PTR(-EAGAIN);
@@ -978,7 +1075,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
978 1075
979 /* We reserved something on the buffer */ 1076 /* We reserved something on the buffer */
980 1077
981 BUG_ON(write > BUF_PAGE_SIZE); 1078 if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
1079 return NULL;
982 1080
983 event = __rb_page_index(tail_page, tail); 1081 event = __rb_page_index(tail_page, tail);
984 rb_update_event(event, type, length); 1082 rb_update_event(event, type, length);
@@ -988,12 +1086,13 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
988 * this page's time stamp. 1086 * this page's time stamp.
989 */ 1087 */
990 if (!tail && rb_is_commit(cpu_buffer, event)) 1088 if (!tail && rb_is_commit(cpu_buffer, event))
991 cpu_buffer->commit_page->time_stamp = *ts; 1089 cpu_buffer->commit_page->page->time_stamp = *ts;
992 1090
993 return event; 1091 return event;
994 1092
995 out_unlock: 1093 out_unlock:
996 spin_unlock_irqrestore(&cpu_buffer->lock, flags); 1094 __raw_spin_unlock(&cpu_buffer->lock);
1095 local_irq_restore(flags);
997 return NULL; 1096 return NULL;
998} 1097}
999 1098
@@ -1038,7 +1137,7 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1038 event->time_delta = *delta & TS_MASK; 1137 event->time_delta = *delta & TS_MASK;
1039 event->array[0] = *delta >> TS_SHIFT; 1138 event->array[0] = *delta >> TS_SHIFT;
1040 } else { 1139 } else {
1041 cpu_buffer->commit_page->time_stamp = *ts; 1140 cpu_buffer->commit_page->page->time_stamp = *ts;
1042 event->time_delta = 0; 1141 event->time_delta = 0;
1043 event->array[0] = 0; 1142 event->array[0] = 0;
1044 } 1143 }
@@ -1076,10 +1175,8 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1076 * storm or we have something buggy. 1175 * storm or we have something buggy.
1077 * Bail! 1176 * Bail!
1078 */ 1177 */
1079 if (unlikely(++nr_loops > 1000)) { 1178 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
1080 RB_WARN_ON(cpu_buffer, 1);
1081 return NULL; 1179 return NULL;
1082 }
1083 1180
1084 ts = ring_buffer_time_stamp(cpu_buffer->cpu); 1181 ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1085 1182
@@ -1175,15 +1272,14 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
1175 struct ring_buffer_event *event; 1272 struct ring_buffer_event *event;
1176 int cpu, resched; 1273 int cpu, resched;
1177 1274
1178 if (ring_buffers_off) 1275 if (ring_buffer_flags != RB_BUFFERS_ON)
1179 return NULL; 1276 return NULL;
1180 1277
1181 if (atomic_read(&buffer->record_disabled)) 1278 if (atomic_read(&buffer->record_disabled))
1182 return NULL; 1279 return NULL;
1183 1280
1184 /* If we are tracing schedule, we don't want to recurse */ 1281 /* If we are tracing schedule, we don't want to recurse */
1185 resched = need_resched(); 1282 resched = ftrace_preempt_disable();
1186 preempt_disable_notrace();
1187 1283
1188 cpu = raw_smp_processor_id(); 1284 cpu = raw_smp_processor_id();
1189 1285
@@ -1214,12 +1310,10 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
1214 return event; 1310 return event;
1215 1311
1216 out: 1312 out:
1217 if (resched) 1313 ftrace_preempt_enable(resched);
1218 preempt_enable_notrace();
1219 else
1220 preempt_enable_notrace();
1221 return NULL; 1314 return NULL;
1222} 1315}
1316EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
1223 1317
1224static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, 1318static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1225 struct ring_buffer_event *event) 1319 struct ring_buffer_event *event)
@@ -1259,16 +1353,14 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1259 /* 1353 /*
1260 * Only the last preempt count needs to restore preemption. 1354 * Only the last preempt count needs to restore preemption.
1261 */ 1355 */
1262 if (preempt_count() == 1) { 1356 if (preempt_count() == 1)
1263 if (per_cpu(rb_need_resched, cpu)) 1357 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1264 preempt_enable_no_resched_notrace(); 1358 else
1265 else
1266 preempt_enable_notrace();
1267 } else
1268 preempt_enable_no_resched_notrace(); 1359 preempt_enable_no_resched_notrace();
1269 1360
1270 return 0; 1361 return 0;
1271} 1362}
1363EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
1272 1364
1273/** 1365/**
1274 * ring_buffer_write - write data to the buffer without reserving 1366 * ring_buffer_write - write data to the buffer without reserving
@@ -1294,14 +1386,13 @@ int ring_buffer_write(struct ring_buffer *buffer,
1294 int ret = -EBUSY; 1386 int ret = -EBUSY;
1295 int cpu, resched; 1387 int cpu, resched;
1296 1388
1297 if (ring_buffers_off) 1389 if (ring_buffer_flags != RB_BUFFERS_ON)
1298 return -EBUSY; 1390 return -EBUSY;
1299 1391
1300 if (atomic_read(&buffer->record_disabled)) 1392 if (atomic_read(&buffer->record_disabled))
1301 return -EBUSY; 1393 return -EBUSY;
1302 1394
1303 resched = need_resched(); 1395 resched = ftrace_preempt_disable();
1304 preempt_disable_notrace();
1305 1396
1306 cpu = raw_smp_processor_id(); 1397 cpu = raw_smp_processor_id();
1307 1398
@@ -1327,13 +1418,11 @@ int ring_buffer_write(struct ring_buffer *buffer,
1327 1418
1328 ret = 0; 1419 ret = 0;
1329 out: 1420 out:
1330 if (resched) 1421 ftrace_preempt_enable(resched);
1331 preempt_enable_no_resched_notrace();
1332 else
1333 preempt_enable_notrace();
1334 1422
1335 return ret; 1423 return ret;
1336} 1424}
1425EXPORT_SYMBOL_GPL(ring_buffer_write);
1337 1426
1338static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) 1427static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1339{ 1428{
@@ -1360,6 +1449,7 @@ void ring_buffer_record_disable(struct ring_buffer *buffer)
1360{ 1449{
1361 atomic_inc(&buffer->record_disabled); 1450 atomic_inc(&buffer->record_disabled);
1362} 1451}
1452EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
1363 1453
1364/** 1454/**
1365 * ring_buffer_record_enable - enable writes to the buffer 1455 * ring_buffer_record_enable - enable writes to the buffer
@@ -1372,6 +1462,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer)
1372{ 1462{
1373 atomic_dec(&buffer->record_disabled); 1463 atomic_dec(&buffer->record_disabled);
1374} 1464}
1465EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
1375 1466
1376/** 1467/**
1377 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer 1468 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
@@ -1393,6 +1484,7 @@ void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1393 cpu_buffer = buffer->buffers[cpu]; 1484 cpu_buffer = buffer->buffers[cpu];
1394 atomic_inc(&cpu_buffer->record_disabled); 1485 atomic_inc(&cpu_buffer->record_disabled);
1395} 1486}
1487EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
1396 1488
1397/** 1489/**
1398 * ring_buffer_record_enable_cpu - enable writes to the buffer 1490 * ring_buffer_record_enable_cpu - enable writes to the buffer
@@ -1412,6 +1504,7 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1412 cpu_buffer = buffer->buffers[cpu]; 1504 cpu_buffer = buffer->buffers[cpu];
1413 atomic_dec(&cpu_buffer->record_disabled); 1505 atomic_dec(&cpu_buffer->record_disabled);
1414} 1506}
1507EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
1415 1508
1416/** 1509/**
1417 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer 1510 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
@@ -1428,6 +1521,7 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1428 cpu_buffer = buffer->buffers[cpu]; 1521 cpu_buffer = buffer->buffers[cpu];
1429 return cpu_buffer->entries; 1522 return cpu_buffer->entries;
1430} 1523}
1524EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
1431 1525
1432/** 1526/**
1433 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer 1527 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
@@ -1444,6 +1538,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1444 cpu_buffer = buffer->buffers[cpu]; 1538 cpu_buffer = buffer->buffers[cpu];
1445 return cpu_buffer->overrun; 1539 return cpu_buffer->overrun;
1446} 1540}
1541EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
1447 1542
1448/** 1543/**
1449 * ring_buffer_entries - get the number of entries in a buffer 1544 * ring_buffer_entries - get the number of entries in a buffer
@@ -1466,6 +1561,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer)
1466 1561
1467 return entries; 1562 return entries;
1468} 1563}
1564EXPORT_SYMBOL_GPL(ring_buffer_entries);
1469 1565
1470/** 1566/**
1471 * ring_buffer_overrun_cpu - get the number of overruns in buffer 1567 * ring_buffer_overrun_cpu - get the number of overruns in buffer
@@ -1488,15 +1584,9 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1488 1584
1489 return overruns; 1585 return overruns;
1490} 1586}
1587EXPORT_SYMBOL_GPL(ring_buffer_overruns);
1491 1588
1492/** 1589static void rb_iter_reset(struct ring_buffer_iter *iter)
1493 * ring_buffer_iter_reset - reset an iterator
1494 * @iter: The iterator to reset
1495 *
1496 * Resets the iterator, so that it will start from the beginning
1497 * again.
1498 */
1499void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1500{ 1590{
1501 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 1591 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1502 1592
@@ -1511,10 +1601,28 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1511 if (iter->head) 1601 if (iter->head)
1512 iter->read_stamp = cpu_buffer->read_stamp; 1602 iter->read_stamp = cpu_buffer->read_stamp;
1513 else 1603 else
1514 iter->read_stamp = iter->head_page->time_stamp; 1604 iter->read_stamp = iter->head_page->page->time_stamp;
1515} 1605}
1516 1606
1517/** 1607/**
1608 * ring_buffer_iter_reset - reset an iterator
1609 * @iter: The iterator to reset
1610 *
1611 * Resets the iterator, so that it will start from the beginning
1612 * again.
1613 */
1614void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1615{
1616 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1617 unsigned long flags;
1618
1619 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1620 rb_iter_reset(iter);
1621 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1622}
1623EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
1624
1625/**
1518 * ring_buffer_iter_empty - check if an iterator has no more to read 1626 * ring_buffer_iter_empty - check if an iterator has no more to read
1519 * @iter: The iterator to check 1627 * @iter: The iterator to check
1520 */ 1628 */
@@ -1527,6 +1635,7 @@ int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
1527 return iter->head_page == cpu_buffer->commit_page && 1635 return iter->head_page == cpu_buffer->commit_page &&
1528 iter->head == rb_commit_index(cpu_buffer); 1636 iter->head == rb_commit_index(cpu_buffer);
1529} 1637}
1638EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
1530 1639
1531static void 1640static void
1532rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, 1641rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
@@ -1597,7 +1706,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1597 unsigned long flags; 1706 unsigned long flags;
1598 int nr_loops = 0; 1707 int nr_loops = 0;
1599 1708
1600 spin_lock_irqsave(&cpu_buffer->lock, flags); 1709 local_irq_save(flags);
1710 __raw_spin_lock(&cpu_buffer->lock);
1601 1711
1602 again: 1712 again:
1603 /* 1713 /*
@@ -1606,8 +1716,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1606 * a case where we will loop three times. There should be no 1716 * a case where we will loop three times. There should be no
1607 * reason to loop four times (that I know of). 1717 * reason to loop four times (that I know of).
1608 */ 1718 */
1609 if (unlikely(++nr_loops > 3)) { 1719 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
1610 RB_WARN_ON(cpu_buffer, 1);
1611 reader = NULL; 1720 reader = NULL;
1612 goto out; 1721 goto out;
1613 } 1722 }
@@ -1619,8 +1728,9 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1619 goto out; 1728 goto out;
1620 1729
1621 /* Never should we have an index greater than the size */ 1730 /* Never should we have an index greater than the size */
1622 RB_WARN_ON(cpu_buffer, 1731 if (RB_WARN_ON(cpu_buffer,
1623 cpu_buffer->reader_page->read > rb_page_size(reader)); 1732 cpu_buffer->reader_page->read > rb_page_size(reader)))
1733 goto out;
1624 1734
1625 /* check if we caught up to the tail */ 1735 /* check if we caught up to the tail */
1626 reader = NULL; 1736 reader = NULL;
@@ -1637,7 +1747,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1637 cpu_buffer->reader_page->list.prev = reader->list.prev; 1747 cpu_buffer->reader_page->list.prev = reader->list.prev;
1638 1748
1639 local_set(&cpu_buffer->reader_page->write, 0); 1749 local_set(&cpu_buffer->reader_page->write, 0);
1640 local_set(&cpu_buffer->reader_page->commit, 0); 1750 local_set(&cpu_buffer->reader_page->page->commit, 0);
1641 1751
1642 /* Make the reader page now replace the head */ 1752 /* Make the reader page now replace the head */
1643 reader->list.prev->next = &cpu_buffer->reader_page->list; 1753 reader->list.prev->next = &cpu_buffer->reader_page->list;
@@ -1659,7 +1769,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1659 goto again; 1769 goto again;
1660 1770
1661 out: 1771 out:
1662 spin_unlock_irqrestore(&cpu_buffer->lock, flags); 1772 __raw_spin_unlock(&cpu_buffer->lock);
1773 local_irq_restore(flags);
1663 1774
1664 return reader; 1775 return reader;
1665} 1776}
@@ -1673,7 +1784,8 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
1673 reader = rb_get_reader_page(cpu_buffer); 1784 reader = rb_get_reader_page(cpu_buffer);
1674 1785
1675 /* This function should not be called when buffer is empty */ 1786 /* This function should not be called when buffer is empty */
1676 BUG_ON(!reader); 1787 if (RB_WARN_ON(cpu_buffer, !reader))
1788 return;
1677 1789
1678 event = rb_reader_event(cpu_buffer); 1790 event = rb_reader_event(cpu_buffer);
1679 1791
@@ -1700,7 +1812,9 @@ static void rb_advance_iter(struct ring_buffer_iter *iter)
1700 * Check if we are at the end of the buffer. 1812 * Check if we are at the end of the buffer.
1701 */ 1813 */
1702 if (iter->head >= rb_page_size(iter->head_page)) { 1814 if (iter->head >= rb_page_size(iter->head_page)) {
1703 BUG_ON(iter->head_page == cpu_buffer->commit_page); 1815 if (RB_WARN_ON(buffer,
1816 iter->head_page == cpu_buffer->commit_page))
1817 return;
1704 rb_inc_iter(iter); 1818 rb_inc_iter(iter);
1705 return; 1819 return;
1706 } 1820 }
@@ -1713,8 +1827,10 @@ static void rb_advance_iter(struct ring_buffer_iter *iter)
1713 * This should not be called to advance the header if we are 1827 * This should not be called to advance the header if we are
1714 * at the tail of the buffer. 1828 * at the tail of the buffer.
1715 */ 1829 */
1716 BUG_ON((iter->head_page == cpu_buffer->commit_page) && 1830 if (RB_WARN_ON(cpu_buffer,
1717 (iter->head + length > rb_commit_index(cpu_buffer))); 1831 (iter->head_page == cpu_buffer->commit_page) &&
1832 (iter->head + length > rb_commit_index(cpu_buffer))))
1833 return;
1718 1834
1719 rb_update_iter_read_stamp(iter, event); 1835 rb_update_iter_read_stamp(iter, event);
1720 1836
@@ -1726,17 +1842,8 @@ static void rb_advance_iter(struct ring_buffer_iter *iter)
1726 rb_advance_iter(iter); 1842 rb_advance_iter(iter);
1727} 1843}
1728 1844
1729/** 1845static struct ring_buffer_event *
1730 * ring_buffer_peek - peek at the next event to be read 1846rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1731 * @buffer: The ring buffer to read
1732 * @cpu: The cpu to peak at
1733 * @ts: The timestamp counter of this event.
1734 *
1735 * This will return the event that will be read next, but does
1736 * not consume the data.
1737 */
1738struct ring_buffer_event *
1739ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1740{ 1847{
1741 struct ring_buffer_per_cpu *cpu_buffer; 1848 struct ring_buffer_per_cpu *cpu_buffer;
1742 struct ring_buffer_event *event; 1849 struct ring_buffer_event *event;
@@ -1757,10 +1864,8 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1757 * can have. Nesting 10 deep of interrupts is clearly 1864 * can have. Nesting 10 deep of interrupts is clearly
1758 * an anomaly. 1865 * an anomaly.
1759 */ 1866 */
1760 if (unlikely(++nr_loops > 10)) { 1867 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
1761 RB_WARN_ON(cpu_buffer, 1);
1762 return NULL; 1868 return NULL;
1763 }
1764 1869
1765 reader = rb_get_reader_page(cpu_buffer); 1870 reader = rb_get_reader_page(cpu_buffer);
1766 if (!reader) 1871 if (!reader)
@@ -1797,17 +1902,10 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1797 1902
1798 return NULL; 1903 return NULL;
1799} 1904}
1905EXPORT_SYMBOL_GPL(ring_buffer_peek);
1800 1906
1801/** 1907static struct ring_buffer_event *
1802 * ring_buffer_iter_peek - peek at the next event to be read 1908rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1803 * @iter: The ring buffer iterator
1804 * @ts: The timestamp counter of this event.
1805 *
1806 * This will return the event that will be read next, but does
1807 * not increment the iterator.
1808 */
1809struct ring_buffer_event *
1810ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1811{ 1909{
1812 struct ring_buffer *buffer; 1910 struct ring_buffer *buffer;
1813 struct ring_buffer_per_cpu *cpu_buffer; 1911 struct ring_buffer_per_cpu *cpu_buffer;
@@ -1829,10 +1927,8 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1829 * can have. Nesting 10 deep of interrupts is clearly 1927 * can have. Nesting 10 deep of interrupts is clearly
1830 * an anomaly. 1928 * an anomaly.
1831 */ 1929 */
1832 if (unlikely(++nr_loops > 10)) { 1930 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
1833 RB_WARN_ON(cpu_buffer, 1);
1834 return NULL; 1931 return NULL;
1835 }
1836 1932
1837 if (rb_per_cpu_empty(cpu_buffer)) 1933 if (rb_per_cpu_empty(cpu_buffer))
1838 return NULL; 1934 return NULL;
@@ -1867,6 +1963,52 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1867 1963
1868 return NULL; 1964 return NULL;
1869} 1965}
1966EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
1967
1968/**
1969 * ring_buffer_peek - peek at the next event to be read
1970 * @buffer: The ring buffer to read
1971 * @cpu: The cpu to peak at
1972 * @ts: The timestamp counter of this event.
1973 *
1974 * This will return the event that will be read next, but does
1975 * not consume the data.
1976 */
1977struct ring_buffer_event *
1978ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1979{
1980 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
1981 struct ring_buffer_event *event;
1982 unsigned long flags;
1983
1984 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1985 event = rb_buffer_peek(buffer, cpu, ts);
1986 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1987
1988 return event;
1989}
1990
1991/**
1992 * ring_buffer_iter_peek - peek at the next event to be read
1993 * @iter: The ring buffer iterator
1994 * @ts: The timestamp counter of this event.
1995 *
1996 * This will return the event that will be read next, but does
1997 * not increment the iterator.
1998 */
1999struct ring_buffer_event *
2000ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
2001{
2002 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2003 struct ring_buffer_event *event;
2004 unsigned long flags;
2005
2006 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2007 event = rb_iter_peek(iter, ts);
2008 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2009
2010 return event;
2011}
1870 2012
1871/** 2013/**
1872 * ring_buffer_consume - return an event and consume it 2014 * ring_buffer_consume - return an event and consume it
@@ -1879,21 +2021,27 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1879struct ring_buffer_event * 2021struct ring_buffer_event *
1880ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) 2022ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
1881{ 2023{
1882 struct ring_buffer_per_cpu *cpu_buffer; 2024 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
1883 struct ring_buffer_event *event; 2025 struct ring_buffer_event *event;
2026 unsigned long flags;
1884 2027
1885 if (!cpu_isset(cpu, buffer->cpumask)) 2028 if (!cpu_isset(cpu, buffer->cpumask))
1886 return NULL; 2029 return NULL;
1887 2030
1888 event = ring_buffer_peek(buffer, cpu, ts); 2031 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2032
2033 event = rb_buffer_peek(buffer, cpu, ts);
1889 if (!event) 2034 if (!event)
1890 return NULL; 2035 goto out;
1891 2036
1892 cpu_buffer = buffer->buffers[cpu];
1893 rb_advance_reader(cpu_buffer); 2037 rb_advance_reader(cpu_buffer);
1894 2038
2039 out:
2040 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2041
1895 return event; 2042 return event;
1896} 2043}
2044EXPORT_SYMBOL_GPL(ring_buffer_consume);
1897 2045
1898/** 2046/**
1899 * ring_buffer_read_start - start a non consuming read of the buffer 2047 * ring_buffer_read_start - start a non consuming read of the buffer
@@ -1928,12 +2076,15 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
1928 atomic_inc(&cpu_buffer->record_disabled); 2076 atomic_inc(&cpu_buffer->record_disabled);
1929 synchronize_sched(); 2077 synchronize_sched();
1930 2078
1931 spin_lock_irqsave(&cpu_buffer->lock, flags); 2079 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1932 ring_buffer_iter_reset(iter); 2080 __raw_spin_lock(&cpu_buffer->lock);
1933 spin_unlock_irqrestore(&cpu_buffer->lock, flags); 2081 rb_iter_reset(iter);
2082 __raw_spin_unlock(&cpu_buffer->lock);
2083 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1934 2084
1935 return iter; 2085 return iter;
1936} 2086}
2087EXPORT_SYMBOL_GPL(ring_buffer_read_start);
1937 2088
1938/** 2089/**
1939 * ring_buffer_finish - finish reading the iterator of the buffer 2090 * ring_buffer_finish - finish reading the iterator of the buffer
@@ -1950,6 +2101,7 @@ ring_buffer_read_finish(struct ring_buffer_iter *iter)
1950 atomic_dec(&cpu_buffer->record_disabled); 2101 atomic_dec(&cpu_buffer->record_disabled);
1951 kfree(iter); 2102 kfree(iter);
1952} 2103}
2104EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
1953 2105
1954/** 2106/**
1955 * ring_buffer_read - read the next item in the ring buffer by the iterator 2107 * ring_buffer_read - read the next item in the ring buffer by the iterator
@@ -1962,15 +2114,21 @@ struct ring_buffer_event *
1962ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) 2114ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
1963{ 2115{
1964 struct ring_buffer_event *event; 2116 struct ring_buffer_event *event;
2117 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2118 unsigned long flags;
1965 2119
1966 event = ring_buffer_iter_peek(iter, ts); 2120 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2121 event = rb_iter_peek(iter, ts);
1967 if (!event) 2122 if (!event)
1968 return NULL; 2123 goto out;
1969 2124
1970 rb_advance_iter(iter); 2125 rb_advance_iter(iter);
2126 out:
2127 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1971 2128
1972 return event; 2129 return event;
1973} 2130}
2131EXPORT_SYMBOL_GPL(ring_buffer_read);
1974 2132
1975/** 2133/**
1976 * ring_buffer_size - return the size of the ring buffer (in bytes) 2134 * ring_buffer_size - return the size of the ring buffer (in bytes)
@@ -1980,6 +2138,7 @@ unsigned long ring_buffer_size(struct ring_buffer *buffer)
1980{ 2138{
1981 return BUF_PAGE_SIZE * buffer->pages; 2139 return BUF_PAGE_SIZE * buffer->pages;
1982} 2140}
2141EXPORT_SYMBOL_GPL(ring_buffer_size);
1983 2142
1984static void 2143static void
1985rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) 2144rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
@@ -1987,7 +2146,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
1987 cpu_buffer->head_page 2146 cpu_buffer->head_page
1988 = list_entry(cpu_buffer->pages.next, struct buffer_page, list); 2147 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
1989 local_set(&cpu_buffer->head_page->write, 0); 2148 local_set(&cpu_buffer->head_page->write, 0);
1990 local_set(&cpu_buffer->head_page->commit, 0); 2149 local_set(&cpu_buffer->head_page->page->commit, 0);
1991 2150
1992 cpu_buffer->head_page->read = 0; 2151 cpu_buffer->head_page->read = 0;
1993 2152
@@ -1996,7 +2155,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
1996 2155
1997 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 2156 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1998 local_set(&cpu_buffer->reader_page->write, 0); 2157 local_set(&cpu_buffer->reader_page->write, 0);
1999 local_set(&cpu_buffer->reader_page->commit, 0); 2158 local_set(&cpu_buffer->reader_page->page->commit, 0);
2000 cpu_buffer->reader_page->read = 0; 2159 cpu_buffer->reader_page->read = 0;
2001 2160
2002 cpu_buffer->overrun = 0; 2161 cpu_buffer->overrun = 0;
@@ -2016,12 +2175,17 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2016 if (!cpu_isset(cpu, buffer->cpumask)) 2175 if (!cpu_isset(cpu, buffer->cpumask))
2017 return; 2176 return;
2018 2177
2019 spin_lock_irqsave(&cpu_buffer->lock, flags); 2178 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2179
2180 __raw_spin_lock(&cpu_buffer->lock);
2020 2181
2021 rb_reset_cpu(cpu_buffer); 2182 rb_reset_cpu(cpu_buffer);
2022 2183
2023 spin_unlock_irqrestore(&cpu_buffer->lock, flags); 2184 __raw_spin_unlock(&cpu_buffer->lock);
2185
2186 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2024} 2187}
2188EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
2025 2189
2026/** 2190/**
2027 * ring_buffer_reset - reset a ring buffer 2191 * ring_buffer_reset - reset a ring buffer
@@ -2034,6 +2198,7 @@ void ring_buffer_reset(struct ring_buffer *buffer)
2034 for_each_buffer_cpu(buffer, cpu) 2198 for_each_buffer_cpu(buffer, cpu)
2035 ring_buffer_reset_cpu(buffer, cpu); 2199 ring_buffer_reset_cpu(buffer, cpu);
2036} 2200}
2201EXPORT_SYMBOL_GPL(ring_buffer_reset);
2037 2202
2038/** 2203/**
2039 * rind_buffer_empty - is the ring buffer empty? 2204 * rind_buffer_empty - is the ring buffer empty?
@@ -2052,6 +2217,7 @@ int ring_buffer_empty(struct ring_buffer *buffer)
2052 } 2217 }
2053 return 1; 2218 return 1;
2054} 2219}
2220EXPORT_SYMBOL_GPL(ring_buffer_empty);
2055 2221
2056/** 2222/**
2057 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? 2223 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
@@ -2068,6 +2234,7 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2068 cpu_buffer = buffer->buffers[cpu]; 2234 cpu_buffer = buffer->buffers[cpu];
2069 return rb_per_cpu_empty(cpu_buffer); 2235 return rb_per_cpu_empty(cpu_buffer);
2070} 2236}
2237EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
2071 2238
2072/** 2239/**
2073 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers 2240 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
@@ -2090,8 +2257,7 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2090 return -EINVAL; 2257 return -EINVAL;
2091 2258
2092 /* At least make sure the two buffers are somewhat the same */ 2259 /* At least make sure the two buffers are somewhat the same */
2093 if (buffer_a->size != buffer_b->size || 2260 if (buffer_a->pages != buffer_b->pages)
2094 buffer_a->pages != buffer_b->pages)
2095 return -EINVAL; 2261 return -EINVAL;
2096 2262
2097 cpu_buffer_a = buffer_a->buffers[cpu]; 2263 cpu_buffer_a = buffer_a->buffers[cpu];
@@ -2117,17 +2283,180 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2117 2283
2118 return 0; 2284 return 0;
2119} 2285}
2286EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
2287
2288static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
2289 struct buffer_data_page *bpage)
2290{
2291 struct ring_buffer_event *event;
2292 unsigned long head;
2293
2294 __raw_spin_lock(&cpu_buffer->lock);
2295 for (head = 0; head < local_read(&bpage->commit);
2296 head += rb_event_length(event)) {
2297
2298 event = __rb_data_page_index(bpage, head);
2299 if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
2300 return;
2301 /* Only count data entries */
2302 if (event->type != RINGBUF_TYPE_DATA)
2303 continue;
2304 cpu_buffer->entries--;
2305 }
2306 __raw_spin_unlock(&cpu_buffer->lock);
2307}
2308
2309/**
2310 * ring_buffer_alloc_read_page - allocate a page to read from buffer
2311 * @buffer: the buffer to allocate for.
2312 *
2313 * This function is used in conjunction with ring_buffer_read_page.
2314 * When reading a full page from the ring buffer, these functions
2315 * can be used to speed up the process. The calling function should
2316 * allocate a few pages first with this function. Then when it
2317 * needs to get pages from the ring buffer, it passes the result
2318 * of this function into ring_buffer_read_page, which will swap
2319 * the page that was allocated, with the read page of the buffer.
2320 *
2321 * Returns:
2322 * The page allocated, or NULL on error.
2323 */
2324void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
2325{
2326 unsigned long addr;
2327 struct buffer_data_page *bpage;
2328
2329 addr = __get_free_page(GFP_KERNEL);
2330 if (!addr)
2331 return NULL;
2332
2333 bpage = (void *)addr;
2334
2335 return bpage;
2336}
2337
2338/**
2339 * ring_buffer_free_read_page - free an allocated read page
2340 * @buffer: the buffer the page was allocate for
2341 * @data: the page to free
2342 *
2343 * Free a page allocated from ring_buffer_alloc_read_page.
2344 */
2345void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
2346{
2347 free_page((unsigned long)data);
2348}
2349
2350/**
2351 * ring_buffer_read_page - extract a page from the ring buffer
2352 * @buffer: buffer to extract from
2353 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
2354 * @cpu: the cpu of the buffer to extract
2355 * @full: should the extraction only happen when the page is full.
2356 *
2357 * This function will pull out a page from the ring buffer and consume it.
2358 * @data_page must be the address of the variable that was returned
2359 * from ring_buffer_alloc_read_page. This is because the page might be used
2360 * to swap with a page in the ring buffer.
2361 *
2362 * for example:
2363 * rpage = ring_buffer_alloc_page(buffer);
2364 * if (!rpage)
2365 * return error;
2366 * ret = ring_buffer_read_page(buffer, &rpage, cpu, 0);
2367 * if (ret)
2368 * process_page(rpage);
2369 *
2370 * When @full is set, the function will not return true unless
2371 * the writer is off the reader page.
2372 *
2373 * Note: it is up to the calling functions to handle sleeps and wakeups.
2374 * The ring buffer can be used anywhere in the kernel and can not
2375 * blindly call wake_up. The layer that uses the ring buffer must be
2376 * responsible for that.
2377 *
2378 * Returns:
2379 * 1 if data has been transferred
2380 * 0 if no data has been transferred.
2381 */
2382int ring_buffer_read_page(struct ring_buffer *buffer,
2383 void **data_page, int cpu, int full)
2384{
2385 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2386 struct ring_buffer_event *event;
2387 struct buffer_data_page *bpage;
2388 unsigned long flags;
2389 int ret = 0;
2390
2391 if (!data_page)
2392 return 0;
2393
2394 bpage = *data_page;
2395 if (!bpage)
2396 return 0;
2397
2398 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2399
2400 /*
2401 * rb_buffer_peek will get the next ring buffer if
2402 * the current reader page is empty.
2403 */
2404 event = rb_buffer_peek(buffer, cpu, NULL);
2405 if (!event)
2406 goto out;
2407
2408 /* check for data */
2409 if (!local_read(&cpu_buffer->reader_page->page->commit))
2410 goto out;
2411 /*
2412 * If the writer is already off of the read page, then simply
2413 * switch the read page with the given page. Otherwise
2414 * we need to copy the data from the reader to the writer.
2415 */
2416 if (cpu_buffer->reader_page == cpu_buffer->commit_page) {
2417 unsigned int read = cpu_buffer->reader_page->read;
2418
2419 if (full)
2420 goto out;
2421 /* The writer is still on the reader page, we must copy */
2422 bpage = cpu_buffer->reader_page->page;
2423 memcpy(bpage->data,
2424 cpu_buffer->reader_page->page->data + read,
2425 local_read(&bpage->commit) - read);
2426
2427 /* consume what was read */
2428 cpu_buffer->reader_page += read;
2429
2430 } else {
2431 /* swap the pages */
2432 rb_init_page(bpage);
2433 bpage = cpu_buffer->reader_page->page;
2434 cpu_buffer->reader_page->page = *data_page;
2435 cpu_buffer->reader_page->read = 0;
2436 *data_page = bpage;
2437 }
2438 ret = 1;
2439
2440 /* update the entry counter */
2441 rb_remove_entries(cpu_buffer, bpage);
2442 out:
2443 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2444
2445 return ret;
2446}
2120 2447
2121static ssize_t 2448static ssize_t
2122rb_simple_read(struct file *filp, char __user *ubuf, 2449rb_simple_read(struct file *filp, char __user *ubuf,
2123 size_t cnt, loff_t *ppos) 2450 size_t cnt, loff_t *ppos)
2124{ 2451{
2125 int *p = filp->private_data; 2452 long *p = filp->private_data;
2126 char buf[64]; 2453 char buf[64];
2127 int r; 2454 int r;
2128 2455
2129 /* !ring_buffers_off == tracing_on */ 2456 if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
2130 r = sprintf(buf, "%d\n", !*p); 2457 r = sprintf(buf, "permanently disabled\n");
2458 else
2459 r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
2131 2460
2132 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 2461 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2133} 2462}
@@ -2136,7 +2465,7 @@ static ssize_t
2136rb_simple_write(struct file *filp, const char __user *ubuf, 2465rb_simple_write(struct file *filp, const char __user *ubuf,
2137 size_t cnt, loff_t *ppos) 2466 size_t cnt, loff_t *ppos)
2138{ 2467{
2139 int *p = filp->private_data; 2468 long *p = filp->private_data;
2140 char buf[64]; 2469 char buf[64];
2141 long val; 2470 long val;
2142 int ret; 2471 int ret;
@@ -2153,8 +2482,10 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
2153 if (ret < 0) 2482 if (ret < 0)
2154 return ret; 2483 return ret;
2155 2484
2156 /* !ring_buffers_off == tracing_on */ 2485 if (val)
2157 *p = !val; 2486 set_bit(RB_BUFFERS_ON_BIT, p);
2487 else
2488 clear_bit(RB_BUFFERS_ON_BIT, p);
2158 2489
2159 (*ppos)++; 2490 (*ppos)++;
2160 2491
@@ -2176,7 +2507,7 @@ static __init int rb_init_debugfs(void)
2176 d_tracer = tracing_init_dentry(); 2507 d_tracer = tracing_init_dentry();
2177 2508
2178 entry = debugfs_create_file("tracing_on", 0644, d_tracer, 2509 entry = debugfs_create_file("tracing_on", 0644, d_tracer,
2179 &ring_buffers_off, &rb_simple_fops); 2510 &ring_buffer_flags, &rb_simple_fops);
2180 if (!entry) 2511 if (!entry)
2181 pr_warning("Could not create debugfs 'tracing_on' entry\n"); 2512 pr_warning("Could not create debugfs 'tracing_on' entry\n");
2182 2513