diff options
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r-- | kernel/trace/ring_buffer.c | 135 |
1 files changed, 45 insertions, 90 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 9c143739b8d7..a7a055f167c7 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -479,9 +479,7 @@ struct ring_buffer { | |||
479 | 479 | ||
480 | struct ring_buffer_per_cpu **buffers; | 480 | struct ring_buffer_per_cpu **buffers; |
481 | 481 | ||
482 | #ifdef CONFIG_HOTPLUG_CPU | 482 | struct hlist_node node; |
483 | struct notifier_block cpu_notify; | ||
484 | #endif | ||
485 | u64 (*clock)(void); | 483 | u64 (*clock)(void); |
486 | 484 | ||
487 | struct rb_irq_work irq_work; | 485 | struct rb_irq_work irq_work; |
@@ -1274,11 +1272,6 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) | |||
1274 | kfree(cpu_buffer); | 1272 | kfree(cpu_buffer); |
1275 | } | 1273 | } |
1276 | 1274 | ||
1277 | #ifdef CONFIG_HOTPLUG_CPU | ||
1278 | static int rb_cpu_notify(struct notifier_block *self, | ||
1279 | unsigned long action, void *hcpu); | ||
1280 | #endif | ||
1281 | |||
1282 | /** | 1275 | /** |
1283 | * __ring_buffer_alloc - allocate a new ring_buffer | 1276 | * __ring_buffer_alloc - allocate a new ring_buffer |
1284 | * @size: the size in bytes per cpu that is needed. | 1277 | * @size: the size in bytes per cpu that is needed. |
@@ -1296,6 +1289,7 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, | |||
1296 | long nr_pages; | 1289 | long nr_pages; |
1297 | int bsize; | 1290 | int bsize; |
1298 | int cpu; | 1291 | int cpu; |
1292 | int ret; | ||
1299 | 1293 | ||
1300 | /* keep it in its own cache line */ | 1294 | /* keep it in its own cache line */ |
1301 | buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), | 1295 | buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), |
@@ -1318,17 +1312,6 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, | |||
1318 | if (nr_pages < 2) | 1312 | if (nr_pages < 2) |
1319 | nr_pages = 2; | 1313 | nr_pages = 2; |
1320 | 1314 | ||
1321 | /* | ||
1322 | * In case of non-hotplug cpu, if the ring-buffer is allocated | ||
1323 | * in early initcall, it will not be notified of secondary cpus. | ||
1324 | * In that off case, we need to allocate for all possible cpus. | ||
1325 | */ | ||
1326 | #ifdef CONFIG_HOTPLUG_CPU | ||
1327 | cpu_notifier_register_begin(); | ||
1328 | cpumask_copy(buffer->cpumask, cpu_online_mask); | ||
1329 | #else | ||
1330 | cpumask_copy(buffer->cpumask, cpu_possible_mask); | ||
1331 | #endif | ||
1332 | buffer->cpus = nr_cpu_ids; | 1315 | buffer->cpus = nr_cpu_ids; |
1333 | 1316 | ||
1334 | bsize = sizeof(void *) * nr_cpu_ids; | 1317 | bsize = sizeof(void *) * nr_cpu_ids; |
@@ -1337,19 +1320,15 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, | |||
1337 | if (!buffer->buffers) | 1320 | if (!buffer->buffers) |
1338 | goto fail_free_cpumask; | 1321 | goto fail_free_cpumask; |
1339 | 1322 | ||
1340 | for_each_buffer_cpu(buffer, cpu) { | 1323 | cpu = raw_smp_processor_id(); |
1341 | buffer->buffers[cpu] = | 1324 | cpumask_set_cpu(cpu, buffer->cpumask); |
1342 | rb_allocate_cpu_buffer(buffer, nr_pages, cpu); | 1325 | buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu); |
1343 | if (!buffer->buffers[cpu]) | 1326 | if (!buffer->buffers[cpu]) |
1344 | goto fail_free_buffers; | 1327 | goto fail_free_buffers; |
1345 | } | ||
1346 | 1328 | ||
1347 | #ifdef CONFIG_HOTPLUG_CPU | 1329 | ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); |
1348 | buffer->cpu_notify.notifier_call = rb_cpu_notify; | 1330 | if (ret < 0) |
1349 | buffer->cpu_notify.priority = 0; | 1331 | goto fail_free_buffers; |
1350 | __register_cpu_notifier(&buffer->cpu_notify); | ||
1351 | cpu_notifier_register_done(); | ||
1352 | #endif | ||
1353 | 1332 | ||
1354 | mutex_init(&buffer->mutex); | 1333 | mutex_init(&buffer->mutex); |
1355 | 1334 | ||
@@ -1364,9 +1343,6 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, | |||
1364 | 1343 | ||
1365 | fail_free_cpumask: | 1344 | fail_free_cpumask: |
1366 | free_cpumask_var(buffer->cpumask); | 1345 | free_cpumask_var(buffer->cpumask); |
1367 | #ifdef CONFIG_HOTPLUG_CPU | ||
1368 | cpu_notifier_register_done(); | ||
1369 | #endif | ||
1370 | 1346 | ||
1371 | fail_free_buffer: | 1347 | fail_free_buffer: |
1372 | kfree(buffer); | 1348 | kfree(buffer); |
@@ -1383,18 +1359,11 @@ ring_buffer_free(struct ring_buffer *buffer) | |||
1383 | { | 1359 | { |
1384 | int cpu; | 1360 | int cpu; |
1385 | 1361 | ||
1386 | #ifdef CONFIG_HOTPLUG_CPU | 1362 | cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); |
1387 | cpu_notifier_register_begin(); | ||
1388 | __unregister_cpu_notifier(&buffer->cpu_notify); | ||
1389 | #endif | ||
1390 | 1363 | ||
1391 | for_each_buffer_cpu(buffer, cpu) | 1364 | for_each_buffer_cpu(buffer, cpu) |
1392 | rb_free_cpu_buffer(buffer->buffers[cpu]); | 1365 | rb_free_cpu_buffer(buffer->buffers[cpu]); |
1393 | 1366 | ||
1394 | #ifdef CONFIG_HOTPLUG_CPU | ||
1395 | cpu_notifier_register_done(); | ||
1396 | #endif | ||
1397 | |||
1398 | kfree(buffer->buffers); | 1367 | kfree(buffer->buffers); |
1399 | free_cpumask_var(buffer->cpumask); | 1368 | free_cpumask_var(buffer->cpumask); |
1400 | 1369 | ||
@@ -4633,62 +4602,48 @@ int ring_buffer_read_page(struct ring_buffer *buffer, | |||
4633 | } | 4602 | } |
4634 | EXPORT_SYMBOL_GPL(ring_buffer_read_page); | 4603 | EXPORT_SYMBOL_GPL(ring_buffer_read_page); |
4635 | 4604 | ||
4636 | #ifdef CONFIG_HOTPLUG_CPU | 4605 | /* |
4637 | static int rb_cpu_notify(struct notifier_block *self, | 4606 | * We only allocate new buffers, never free them if the CPU goes down. |
4638 | unsigned long action, void *hcpu) | 4607 | * If we were to free the buffer, then the user would lose any trace that was in |
4608 | * the buffer. | ||
4609 | */ | ||
4610 | int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node) | ||
4639 | { | 4611 | { |
4640 | struct ring_buffer *buffer = | 4612 | struct ring_buffer *buffer; |
4641 | container_of(self, struct ring_buffer, cpu_notify); | ||
4642 | long cpu = (long)hcpu; | ||
4643 | long nr_pages_same; | 4613 | long nr_pages_same; |
4644 | int cpu_i; | 4614 | int cpu_i; |
4645 | unsigned long nr_pages; | 4615 | unsigned long nr_pages; |
4646 | 4616 | ||
4647 | switch (action) { | 4617 | buffer = container_of(node, struct ring_buffer, node); |
4648 | case CPU_UP_PREPARE: | 4618 | if (cpumask_test_cpu(cpu, buffer->cpumask)) |
4649 | case CPU_UP_PREPARE_FROZEN: | 4619 | return 0; |
4650 | if (cpumask_test_cpu(cpu, buffer->cpumask)) | 4620 | |
4651 | return NOTIFY_OK; | 4621 | nr_pages = 0; |
4652 | 4622 | nr_pages_same = 1; | |
4653 | nr_pages = 0; | 4623 | /* check if all cpu sizes are same */ |
4654 | nr_pages_same = 1; | 4624 | for_each_buffer_cpu(buffer, cpu_i) { |
4655 | /* check if all cpu sizes are same */ | 4625 | /* fill in the size from first enabled cpu */ |
4656 | for_each_buffer_cpu(buffer, cpu_i) { | 4626 | if (nr_pages == 0) |
4657 | /* fill in the size from first enabled cpu */ | 4627 | nr_pages = buffer->buffers[cpu_i]->nr_pages; |
4658 | if (nr_pages == 0) | 4628 | if (nr_pages != buffer->buffers[cpu_i]->nr_pages) { |
4659 | nr_pages = buffer->buffers[cpu_i]->nr_pages; | 4629 | nr_pages_same = 0; |
4660 | if (nr_pages != buffer->buffers[cpu_i]->nr_pages) { | 4630 | break; |
4661 | nr_pages_same = 0; | ||
4662 | break; | ||
4663 | } | ||
4664 | } | ||
4665 | /* allocate minimum pages, user can later expand it */ | ||
4666 | if (!nr_pages_same) | ||
4667 | nr_pages = 2; | ||
4668 | buffer->buffers[cpu] = | ||
4669 | rb_allocate_cpu_buffer(buffer, nr_pages, cpu); | ||
4670 | if (!buffer->buffers[cpu]) { | ||
4671 | WARN(1, "failed to allocate ring buffer on CPU %ld\n", | ||
4672 | cpu); | ||
4673 | return NOTIFY_OK; | ||
4674 | } | 4631 | } |
4675 | smp_wmb(); | ||
4676 | cpumask_set_cpu(cpu, buffer->cpumask); | ||
4677 | break; | ||
4678 | case CPU_DOWN_PREPARE: | ||
4679 | case CPU_DOWN_PREPARE_FROZEN: | ||
4680 | /* | ||
4681 | * Do nothing. | ||
4682 | * If we were to free the buffer, then the user would | ||
4683 | * lose any trace that was in the buffer. | ||
4684 | */ | ||
4685 | break; | ||
4686 | default: | ||
4687 | break; | ||
4688 | } | 4632 | } |
4689 | return NOTIFY_OK; | 4633 | /* allocate minimum pages, user can later expand it */ |
4634 | if (!nr_pages_same) | ||
4635 | nr_pages = 2; | ||
4636 | buffer->buffers[cpu] = | ||
4637 | rb_allocate_cpu_buffer(buffer, nr_pages, cpu); | ||
4638 | if (!buffer->buffers[cpu]) { | ||
4639 | WARN(1, "failed to allocate ring buffer on CPU %u\n", | ||
4640 | cpu); | ||
4641 | return -ENOMEM; | ||
4642 | } | ||
4643 | smp_wmb(); | ||
4644 | cpumask_set_cpu(cpu, buffer->cpumask); | ||
4645 | return 0; | ||
4690 | } | 4646 | } |
4691 | #endif | ||
4692 | 4647 | ||
4693 | #ifdef CONFIG_RING_BUFFER_STARTUP_TEST | 4648 | #ifdef CONFIG_RING_BUFFER_STARTUP_TEST |
4694 | /* | 4649 | /* |