aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorVaibhav Nagarnaik <vnagarnaik@google.com>2012-05-18 16:29:51 -0400
committerSteven Rostedt <rostedt@goodmis.org>2012-05-19 08:28:50 -0400
commit05fdd70d2fe1e34d8b80ec56d6e3272d9293653e (patch)
treeeb9165b01bfa96341edcdf15acfa358bbc38268f /kernel/trace
parentb732d439cb43336cd6d7e804ecb2c81193ef63b0 (diff)
ring-buffer: Merge separate resize loops
There are 2 separate loops to resize cpu buffers that are online and offline. Merge them to make the code look better. Also change the name from update_completion to update_done to allow shorter lines. Link: http://lkml.kernel.org/r/1337372991-14783-1-git-send-email-vnagarnaik@google.com Cc: Laurent Chavey <chavey@google.com> Cc: Justin Teravest <teravest@google.com> Cc: David Sharp <dhsharp@google.com> Signed-off-by: Vaibhav Nagarnaik <vnagarnaik@google.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ring_buffer.c41
1 files changed, 15 insertions, 26 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 68388f876d43..6420cda62336 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -473,7 +473,7 @@ struct ring_buffer_per_cpu {
473 int nr_pages_to_update; 473 int nr_pages_to_update;
474 struct list_head new_pages; /* new pages to add */ 474 struct list_head new_pages; /* new pages to add */
475 struct work_struct update_pages_work; 475 struct work_struct update_pages_work;
476 struct completion update_completion; 476 struct completion update_done;
477}; 477};
478 478
479struct ring_buffer { 479struct ring_buffer {
@@ -1058,7 +1058,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
1058 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); 1058 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1059 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 1059 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1060 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); 1060 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
1061 init_completion(&cpu_buffer->update_completion); 1061 init_completion(&cpu_buffer->update_done);
1062 1062
1063 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 1063 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1064 GFP_KERNEL, cpu_to_node(cpu)); 1064 GFP_KERNEL, cpu_to_node(cpu));
@@ -1461,7 +1461,7 @@ static void update_pages_handler(struct work_struct *work)
1461 struct ring_buffer_per_cpu *cpu_buffer = container_of(work, 1461 struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
1462 struct ring_buffer_per_cpu, update_pages_work); 1462 struct ring_buffer_per_cpu, update_pages_work);
1463 rb_update_pages(cpu_buffer); 1463 rb_update_pages(cpu_buffer);
1464 complete(&cpu_buffer->update_completion); 1464 complete(&cpu_buffer->update_done);
1465} 1465}
1466 1466
1467/** 1467/**
@@ -1534,39 +1534,29 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
1534 get_online_cpus(); 1534 get_online_cpus();
1535 /* 1535 /*
1536 * Fire off all the required work handlers 1536 * Fire off all the required work handlers
1537 * Look out for offline CPUs 1537 * We can't schedule on offline CPUs, but it's not necessary
1538 */
1539 for_each_buffer_cpu(buffer, cpu) {
1540 cpu_buffer = buffer->buffers[cpu];
1541 if (!cpu_buffer->nr_pages_to_update ||
1542 !cpu_online(cpu))
1543 continue;
1544
1545 schedule_work_on(cpu, &cpu_buffer->update_pages_work);
1546 }
1547 /*
1548 * This loop is for the CPUs that are not online.
1549 * We can't schedule anything on them, but it's not necessary
1550 * since we can change their buffer sizes without any race. 1538 * since we can change their buffer sizes without any race.
1551 */ 1539 */
1552 for_each_buffer_cpu(buffer, cpu) { 1540 for_each_buffer_cpu(buffer, cpu) {
1553 cpu_buffer = buffer->buffers[cpu]; 1541 cpu_buffer = buffer->buffers[cpu];
1554 if (!cpu_buffer->nr_pages_to_update || 1542 if (!cpu_buffer->nr_pages_to_update)
1555 cpu_online(cpu))
1556 continue; 1543 continue;
1557 1544
1558 rb_update_pages(cpu_buffer); 1545 if (cpu_online(cpu))
1546 schedule_work_on(cpu,
1547 &cpu_buffer->update_pages_work);
1548 else
1549 rb_update_pages(cpu_buffer);
1559 } 1550 }
1560 1551
1561 /* wait for all the updates to complete */ 1552 /* wait for all the updates to complete */
1562 for_each_buffer_cpu(buffer, cpu) { 1553 for_each_buffer_cpu(buffer, cpu) {
1563 cpu_buffer = buffer->buffers[cpu]; 1554 cpu_buffer = buffer->buffers[cpu];
1564 if (!cpu_buffer->nr_pages_to_update || 1555 if (!cpu_buffer->nr_pages_to_update)
1565 !cpu_online(cpu))
1566 continue; 1556 continue;
1567 1557
1568 wait_for_completion(&cpu_buffer->update_completion); 1558 if (cpu_online(cpu))
1569 /* reset this value */ 1559 wait_for_completion(&cpu_buffer->update_done);
1570 cpu_buffer->nr_pages_to_update = 0; 1560 cpu_buffer->nr_pages_to_update = 0;
1571 } 1561 }
1572 1562
@@ -1593,13 +1583,12 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
1593 if (cpu_online(cpu_id)) { 1583 if (cpu_online(cpu_id)) {
1594 schedule_work_on(cpu_id, 1584 schedule_work_on(cpu_id,
1595 &cpu_buffer->update_pages_work); 1585 &cpu_buffer->update_pages_work);
1596 wait_for_completion(&cpu_buffer->update_completion); 1586 wait_for_completion(&cpu_buffer->update_done);
1597 } else 1587 } else
1598 rb_update_pages(cpu_buffer); 1588 rb_update_pages(cpu_buffer);
1599 1589
1600 put_online_cpus();
1601 /* reset this value */
1602 cpu_buffer->nr_pages_to_update = 0; 1590 cpu_buffer->nr_pages_to_update = 0;
1591 put_online_cpus();
1603 } 1592 }
1604 1593
1605 out: 1594 out: