aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2008-12-02 23:50:03 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-03 02:56:24 -0500
commit044fa782ebb9472cf5253e95d9a625fd4c0bdd99 (patch)
tree19282ded5d8a1b48690de199cec34d73730cd130 /kernel/trace/ring_buffer.c
parent14a866c567e040ccf6240d68b083dd1dbbde63e6 (diff)
ring-buffer: change "page" variable names to "bpage"
Impact: clean up Andrew Morton pointed out that the kernel convention of a variable named page should be of type page struct. The ring buffer uses a variable named "page" for a pointer to something else. This patch converts those to be called "bpage" (as in "buffer page"). Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c130
1 files changed, 65 insertions, 65 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 50b74d3a5c32..7f69cfeaadf7 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -208,9 +208,9 @@ struct buffer_page {
208 struct buffer_data_page *page; /* Actual data page */ 208 struct buffer_data_page *page; /* Actual data page */
209}; 209};
210 210
211static void rb_init_page(struct buffer_data_page *page) 211static void rb_init_page(struct buffer_data_page *bpage)
212{ 212{
213 local_set(&page->commit, 0); 213 local_set(&bpage->commit, 0);
214} 214}
215 215
216/* 216/*
@@ -298,19 +298,19 @@ struct ring_buffer_iter {
298static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) 298static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
299{ 299{
300 struct list_head *head = &cpu_buffer->pages; 300 struct list_head *head = &cpu_buffer->pages;
301 struct buffer_page *page, *tmp; 301 struct buffer_page *bpage, *tmp;
302 302
303 if (RB_WARN_ON(cpu_buffer, head->next->prev != head)) 303 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
304 return -1; 304 return -1;
305 if (RB_WARN_ON(cpu_buffer, head->prev->next != head)) 305 if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
306 return -1; 306 return -1;
307 307
308 list_for_each_entry_safe(page, tmp, head, list) { 308 list_for_each_entry_safe(bpage, tmp, head, list) {
309 if (RB_WARN_ON(cpu_buffer, 309 if (RB_WARN_ON(cpu_buffer,
310 page->list.next->prev != &page->list)) 310 bpage->list.next->prev != &bpage->list))
311 return -1; 311 return -1;
312 if (RB_WARN_ON(cpu_buffer, 312 if (RB_WARN_ON(cpu_buffer,
313 page->list.prev->next != &page->list)) 313 bpage->list.prev->next != &bpage->list))
314 return -1; 314 return -1;
315 } 315 }
316 316
@@ -321,23 +321,23 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
321 unsigned nr_pages) 321 unsigned nr_pages)
322{ 322{
323 struct list_head *head = &cpu_buffer->pages; 323 struct list_head *head = &cpu_buffer->pages;
324 struct buffer_page *page, *tmp; 324 struct buffer_page *bpage, *tmp;
325 unsigned long addr; 325 unsigned long addr;
326 LIST_HEAD(pages); 326 LIST_HEAD(pages);
327 unsigned i; 327 unsigned i;
328 328
329 for (i = 0; i < nr_pages; i++) { 329 for (i = 0; i < nr_pages; i++) {
330 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()), 330 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
331 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu)); 331 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
332 if (!page) 332 if (!bpage)
333 goto free_pages; 333 goto free_pages;
334 list_add(&page->list, &pages); 334 list_add(&bpage->list, &pages);
335 335
336 addr = __get_free_page(GFP_KERNEL); 336 addr = __get_free_page(GFP_KERNEL);
337 if (!addr) 337 if (!addr)
338 goto free_pages; 338 goto free_pages;
339 page->page = (void *)addr; 339 bpage->page = (void *)addr;
340 rb_init_page(page->page); 340 rb_init_page(bpage->page);
341 } 341 }
342 342
343 list_splice(&pages, head); 343 list_splice(&pages, head);
@@ -347,9 +347,9 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
347 return 0; 347 return 0;
348 348
349 free_pages: 349 free_pages:
350 list_for_each_entry_safe(page, tmp, &pages, list) { 350 list_for_each_entry_safe(bpage, tmp, &pages, list) {
351 list_del_init(&page->list); 351 list_del_init(&bpage->list);
352 free_buffer_page(page); 352 free_buffer_page(bpage);
353 } 353 }
354 return -ENOMEM; 354 return -ENOMEM;
355} 355}
@@ -358,7 +358,7 @@ static struct ring_buffer_per_cpu *
358rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) 358rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
359{ 359{
360 struct ring_buffer_per_cpu *cpu_buffer; 360 struct ring_buffer_per_cpu *cpu_buffer;
361 struct buffer_page *page; 361 struct buffer_page *bpage;
362 unsigned long addr; 362 unsigned long addr;
363 int ret; 363 int ret;
364 364
@@ -373,17 +373,17 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
373 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 373 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
374 INIT_LIST_HEAD(&cpu_buffer->pages); 374 INIT_LIST_HEAD(&cpu_buffer->pages);
375 375
376 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()), 376 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
377 GFP_KERNEL, cpu_to_node(cpu)); 377 GFP_KERNEL, cpu_to_node(cpu));
378 if (!page) 378 if (!bpage)
379 goto fail_free_buffer; 379 goto fail_free_buffer;
380 380
381 cpu_buffer->reader_page = page; 381 cpu_buffer->reader_page = bpage;
382 addr = __get_free_page(GFP_KERNEL); 382 addr = __get_free_page(GFP_KERNEL);
383 if (!addr) 383 if (!addr)
384 goto fail_free_reader; 384 goto fail_free_reader;
385 page->page = (void *)addr; 385 bpage->page = (void *)addr;
386 rb_init_page(page->page); 386 rb_init_page(bpage->page);
387 387
388 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 388 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
389 389
@@ -408,14 +408,14 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
408static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) 408static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
409{ 409{
410 struct list_head *head = &cpu_buffer->pages; 410 struct list_head *head = &cpu_buffer->pages;
411 struct buffer_page *page, *tmp; 411 struct buffer_page *bpage, *tmp;
412 412
413 list_del_init(&cpu_buffer->reader_page->list); 413 list_del_init(&cpu_buffer->reader_page->list);
414 free_buffer_page(cpu_buffer->reader_page); 414 free_buffer_page(cpu_buffer->reader_page);
415 415
416 list_for_each_entry_safe(page, tmp, head, list) { 416 list_for_each_entry_safe(bpage, tmp, head, list) {
417 list_del_init(&page->list); 417 list_del_init(&bpage->list);
418 free_buffer_page(page); 418 free_buffer_page(bpage);
419 } 419 }
420 kfree(cpu_buffer); 420 kfree(cpu_buffer);
421} 421}
@@ -512,7 +512,7 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
512static void 512static void
513rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) 513rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
514{ 514{
515 struct buffer_page *page; 515 struct buffer_page *bpage;
516 struct list_head *p; 516 struct list_head *p;
517 unsigned i; 517 unsigned i;
518 518
@@ -523,9 +523,9 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
523 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages))) 523 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
524 return; 524 return;
525 p = cpu_buffer->pages.next; 525 p = cpu_buffer->pages.next;
526 page = list_entry(p, struct buffer_page, list); 526 bpage = list_entry(p, struct buffer_page, list);
527 list_del_init(&page->list); 527 list_del_init(&bpage->list);
528 free_buffer_page(page); 528 free_buffer_page(bpage);
529 } 529 }
530 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages))) 530 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
531 return; 531 return;
@@ -542,7 +542,7 @@ static void
542rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, 542rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
543 struct list_head *pages, unsigned nr_pages) 543 struct list_head *pages, unsigned nr_pages)
544{ 544{
545 struct buffer_page *page; 545 struct buffer_page *bpage;
546 struct list_head *p; 546 struct list_head *p;
547 unsigned i; 547 unsigned i;
548 548
@@ -553,9 +553,9 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
553 if (RB_WARN_ON(cpu_buffer, list_empty(pages))) 553 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
554 return; 554 return;
555 p = pages->next; 555 p = pages->next;
556 page = list_entry(p, struct buffer_page, list); 556 bpage = list_entry(p, struct buffer_page, list);
557 list_del_init(&page->list); 557 list_del_init(&bpage->list);
558 list_add_tail(&page->list, &cpu_buffer->pages); 558 list_add_tail(&bpage->list, &cpu_buffer->pages);
559 } 559 }
560 rb_reset_cpu(cpu_buffer); 560 rb_reset_cpu(cpu_buffer);
561 561
@@ -582,7 +582,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
582{ 582{
583 struct ring_buffer_per_cpu *cpu_buffer; 583 struct ring_buffer_per_cpu *cpu_buffer;
584 unsigned nr_pages, rm_pages, new_pages; 584 unsigned nr_pages, rm_pages, new_pages;
585 struct buffer_page *page, *tmp; 585 struct buffer_page *bpage, *tmp;
586 unsigned long buffer_size; 586 unsigned long buffer_size;
587 unsigned long addr; 587 unsigned long addr;
588 LIST_HEAD(pages); 588 LIST_HEAD(pages);
@@ -643,17 +643,17 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
643 643
644 for_each_buffer_cpu(buffer, cpu) { 644 for_each_buffer_cpu(buffer, cpu) {
645 for (i = 0; i < new_pages; i++) { 645 for (i = 0; i < new_pages; i++) {
646 page = kzalloc_node(ALIGN(sizeof(*page), 646 bpage = kzalloc_node(ALIGN(sizeof(*bpage),
647 cache_line_size()), 647 cache_line_size()),
648 GFP_KERNEL, cpu_to_node(cpu)); 648 GFP_KERNEL, cpu_to_node(cpu));
649 if (!page) 649 if (!bpage)
650 goto free_pages; 650 goto free_pages;
651 list_add(&page->list, &pages); 651 list_add(&bpage->list, &pages);
652 addr = __get_free_page(GFP_KERNEL); 652 addr = __get_free_page(GFP_KERNEL);
653 if (!addr) 653 if (!addr)
654 goto free_pages; 654 goto free_pages;
655 page->page = (void *)addr; 655 bpage->page = (void *)addr;
656 rb_init_page(page->page); 656 rb_init_page(bpage->page);
657 } 657 }
658 } 658 }
659 659
@@ -674,9 +674,9 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
674 return size; 674 return size;
675 675
676 free_pages: 676 free_pages:
677 list_for_each_entry_safe(page, tmp, &pages, list) { 677 list_for_each_entry_safe(bpage, tmp, &pages, list) {
678 list_del_init(&page->list); 678 list_del_init(&bpage->list);
679 free_buffer_page(page); 679 free_buffer_page(bpage);
680 } 680 }
681 mutex_unlock(&buffer->mutex); 681 mutex_unlock(&buffer->mutex);
682 return -ENOMEM; 682 return -ENOMEM;
@@ -688,14 +688,14 @@ static inline int rb_null_event(struct ring_buffer_event *event)
688} 688}
689 689
690static inline void * 690static inline void *
691__rb_data_page_index(struct buffer_data_page *page, unsigned index) 691__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
692{ 692{
693 return page->data + index; 693 return bpage->data + index;
694} 694}
695 695
696static inline void *__rb_page_index(struct buffer_page *page, unsigned index) 696static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
697{ 697{
698 return page->page->data + index; 698 return bpage->page->data + index;
699} 699}
700 700
701static inline struct ring_buffer_event * 701static inline struct ring_buffer_event *
@@ -771,14 +771,14 @@ static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
771} 771}
772 772
773static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer, 773static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
774 struct buffer_page **page) 774 struct buffer_page **bpage)
775{ 775{
776 struct list_head *p = (*page)->list.next; 776 struct list_head *p = (*bpage)->list.next;
777 777
778 if (p == &cpu_buffer->pages) 778 if (p == &cpu_buffer->pages)
779 p = p->next; 779 p = p->next;
780 780
781 *page = list_entry(p, struct buffer_page, list); 781 *bpage = list_entry(p, struct buffer_page, list);
782} 782}
783 783
784static inline unsigned 784static inline unsigned
@@ -2239,16 +2239,16 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2239} 2239}
2240 2240
2241static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer, 2241static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
2242 struct buffer_data_page *page) 2242 struct buffer_data_page *bpage)
2243{ 2243{
2244 struct ring_buffer_event *event; 2244 struct ring_buffer_event *event;
2245 unsigned long head; 2245 unsigned long head;
2246 2246
2247 __raw_spin_lock(&cpu_buffer->lock); 2247 __raw_spin_lock(&cpu_buffer->lock);
2248 for (head = 0; head < local_read(&page->commit); 2248 for (head = 0; head < local_read(&bpage->commit);
2249 head += rb_event_length(event)) { 2249 head += rb_event_length(event)) {
2250 2250
2251 event = __rb_data_page_index(page, head); 2251 event = __rb_data_page_index(bpage, head);
2252 if (RB_WARN_ON(cpu_buffer, rb_null_event(event))) 2252 if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
2253 return; 2253 return;
2254 /* Only count data entries */ 2254 /* Only count data entries */
@@ -2277,15 +2277,15 @@ static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
2277void *ring_buffer_alloc_read_page(struct ring_buffer *buffer) 2277void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
2278{ 2278{
2279 unsigned long addr; 2279 unsigned long addr;
2280 struct buffer_data_page *page; 2280 struct buffer_data_page *bpage;
2281 2281
2282 addr = __get_free_page(GFP_KERNEL); 2282 addr = __get_free_page(GFP_KERNEL);
2283 if (!addr) 2283 if (!addr)
2284 return NULL; 2284 return NULL;
2285 2285
2286 page = (void *)addr; 2286 bpage = (void *)addr;
2287 2287
2288 return page; 2288 return bpage;
2289} 2289}
2290 2290
2291/** 2291/**
@@ -2337,15 +2337,15 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
2337{ 2337{
2338 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 2338 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2339 struct ring_buffer_event *event; 2339 struct ring_buffer_event *event;
2340 struct buffer_data_page *page; 2340 struct buffer_data_page *bpage;
2341 unsigned long flags; 2341 unsigned long flags;
2342 int ret = 0; 2342 int ret = 0;
2343 2343
2344 if (!data_page) 2344 if (!data_page)
2345 return 0; 2345 return 0;
2346 2346
2347 page = *data_page; 2347 bpage = *data_page;
2348 if (!page) 2348 if (!bpage)
2349 return 0; 2349 return 0;
2350 2350
2351 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2351 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
@@ -2372,26 +2372,26 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
2372 if (full) 2372 if (full)
2373 goto out; 2373 goto out;
2374 /* The writer is still on the reader page, we must copy */ 2374 /* The writer is still on the reader page, we must copy */
2375 page = cpu_buffer->reader_page->page; 2375 bpage = cpu_buffer->reader_page->page;
2376 memcpy(page->data, 2376 memcpy(bpage->data,
2377 cpu_buffer->reader_page->page->data + read, 2377 cpu_buffer->reader_page->page->data + read,
2378 local_read(&page->commit) - read); 2378 local_read(&bpage->commit) - read);
2379 2379
2380 /* consume what was read */ 2380 /* consume what was read */
2381 cpu_buffer->reader_page += read; 2381 cpu_buffer->reader_page += read;
2382 2382
2383 } else { 2383 } else {
2384 /* swap the pages */ 2384 /* swap the pages */
2385 rb_init_page(page); 2385 rb_init_page(bpage);
2386 page = cpu_buffer->reader_page->page; 2386 bpage = cpu_buffer->reader_page->page;
2387 cpu_buffer->reader_page->page = *data_page; 2387 cpu_buffer->reader_page->page = *data_page;
2388 cpu_buffer->reader_page->read = 0; 2388 cpu_buffer->reader_page->read = 0;
2389 *data_page = page; 2389 *data_page = bpage;
2390 } 2390 }
2391 ret = 1; 2391 ret = 1;
2392 2392
2393 /* update the entry counter */ 2393 /* update the entry counter */
2394 rb_remove_entries(cpu_buffer, page); 2394 rb_remove_entries(cpu_buffer, bpage);
2395 out: 2395 out:
2396 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2396 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2397 2397