diff options
-rw-r--r-- | kernel/trace/ring_buffer.c | 266 | ||||
-rw-r--r-- | kernel/trace/trace.c | 6 |
2 files changed, 216 insertions, 56 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 178858492a89..d07c2888396f 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/hash.h> | 17 | #include <linux/hash.h> |
18 | #include <linux/list.h> | 18 | #include <linux/list.h> |
19 | #include <linux/cpu.h> | ||
19 | #include <linux/fs.h> | 20 | #include <linux/fs.h> |
20 | 21 | ||
21 | #include "trace.h" | 22 | #include "trace.h" |
@@ -301,6 +302,10 @@ struct ring_buffer { | |||
301 | struct mutex mutex; | 302 | struct mutex mutex; |
302 | 303 | ||
303 | struct ring_buffer_per_cpu **buffers; | 304 | struct ring_buffer_per_cpu **buffers; |
305 | |||
306 | #ifdef CONFIG_HOTPLUG | ||
307 | struct notifier_block cpu_notify; | ||
308 | #endif | ||
304 | }; | 309 | }; |
305 | 310 | ||
306 | struct ring_buffer_iter { | 311 | struct ring_buffer_iter { |
@@ -459,6 +464,11 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) | |||
459 | */ | 464 | */ |
460 | extern int ring_buffer_page_too_big(void); | 465 | extern int ring_buffer_page_too_big(void); |
461 | 466 | ||
467 | #ifdef CONFIG_HOTPLUG | ||
468 | static int __cpuinit rb_cpu_notify(struct notifier_block *self, | ||
469 | unsigned long action, void *hcpu); | ||
470 | #endif | ||
471 | |||
462 | /** | 472 | /** |
463 | * ring_buffer_alloc - allocate a new ring_buffer | 473 | * ring_buffer_alloc - allocate a new ring_buffer |
464 | * @size: the size in bytes per cpu that is needed. | 474 | * @size: the size in bytes per cpu that is needed. |
@@ -496,7 +506,8 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags) | |||
496 | if (buffer->pages == 1) | 506 | if (buffer->pages == 1) |
497 | buffer->pages++; | 507 | buffer->pages++; |
498 | 508 | ||
499 | cpumask_copy(buffer->cpumask, cpu_possible_mask); | 509 | get_online_cpus(); |
510 | cpumask_copy(buffer->cpumask, cpu_online_mask); | ||
500 | buffer->cpus = nr_cpu_ids; | 511 | buffer->cpus = nr_cpu_ids; |
501 | 512 | ||
502 | bsize = sizeof(void *) * nr_cpu_ids; | 513 | bsize = sizeof(void *) * nr_cpu_ids; |
@@ -512,6 +523,13 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags) | |||
512 | goto fail_free_buffers; | 523 | goto fail_free_buffers; |
513 | } | 524 | } |
514 | 525 | ||
526 | #ifdef CONFIG_HOTPLUG | ||
527 | buffer->cpu_notify.notifier_call = rb_cpu_notify; | ||
528 | buffer->cpu_notify.priority = 0; | ||
529 | register_cpu_notifier(&buffer->cpu_notify); | ||
530 | #endif | ||
531 | |||
532 | put_online_cpus(); | ||
515 | mutex_init(&buffer->mutex); | 533 | mutex_init(&buffer->mutex); |
516 | 534 | ||
517 | return buffer; | 535 | return buffer; |
@@ -525,6 +543,7 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags) | |||
525 | 543 | ||
526 | fail_free_cpumask: | 544 | fail_free_cpumask: |
527 | free_cpumask_var(buffer->cpumask); | 545 | free_cpumask_var(buffer->cpumask); |
546 | put_online_cpus(); | ||
528 | 547 | ||
529 | fail_free_buffer: | 548 | fail_free_buffer: |
530 | kfree(buffer); | 549 | kfree(buffer); |
@@ -541,9 +560,17 @@ ring_buffer_free(struct ring_buffer *buffer) | |||
541 | { | 560 | { |
542 | int cpu; | 561 | int cpu; |
543 | 562 | ||
563 | get_online_cpus(); | ||
564 | |||
565 | #ifdef CONFIG_HOTPLUG | ||
566 | unregister_cpu_notifier(&buffer->cpu_notify); | ||
567 | #endif | ||
568 | |||
544 | for_each_buffer_cpu(buffer, cpu) | 569 | for_each_buffer_cpu(buffer, cpu) |
545 | rb_free_cpu_buffer(buffer->buffers[cpu]); | 570 | rb_free_cpu_buffer(buffer->buffers[cpu]); |
546 | 571 | ||
572 | put_online_cpus(); | ||
573 | |||
547 | free_cpumask_var(buffer->cpumask); | 574 | free_cpumask_var(buffer->cpumask); |
548 | 575 | ||
549 | kfree(buffer); | 576 | kfree(buffer); |
@@ -649,16 +676,15 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
649 | return size; | 676 | return size; |
650 | 677 | ||
651 | mutex_lock(&buffer->mutex); | 678 | mutex_lock(&buffer->mutex); |
679 | get_online_cpus(); | ||
652 | 680 | ||
653 | nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); | 681 | nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); |
654 | 682 | ||
655 | if (size < buffer_size) { | 683 | if (size < buffer_size) { |
656 | 684 | ||
657 | /* easy case, just free pages */ | 685 | /* easy case, just free pages */ |
658 | if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) { | 686 | if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) |
659 | mutex_unlock(&buffer->mutex); | 687 | goto out_fail; |
660 | return -1; | ||
661 | } | ||
662 | 688 | ||
663 | rm_pages = buffer->pages - nr_pages; | 689 | rm_pages = buffer->pages - nr_pages; |
664 | 690 | ||
@@ -677,10 +703,8 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
677 | * add these pages to the cpu_buffers. Otherwise we just free | 703 | * add these pages to the cpu_buffers. Otherwise we just free |
678 | * them all and return -ENOMEM; | 704 | * them all and return -ENOMEM; |
679 | */ | 705 | */ |
680 | if (RB_WARN_ON(buffer, nr_pages <= buffer->pages)) { | 706 | if (RB_WARN_ON(buffer, nr_pages <= buffer->pages)) |
681 | mutex_unlock(&buffer->mutex); | 707 | goto out_fail; |
682 | return -1; | ||
683 | } | ||
684 | 708 | ||
685 | new_pages = nr_pages - buffer->pages; | 709 | new_pages = nr_pages - buffer->pages; |
686 | 710 | ||
@@ -705,13 +729,12 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
705 | rb_insert_pages(cpu_buffer, &pages, new_pages); | 729 | rb_insert_pages(cpu_buffer, &pages, new_pages); |
706 | } | 730 | } |
707 | 731 | ||
708 | if (RB_WARN_ON(buffer, !list_empty(&pages))) { | 732 | if (RB_WARN_ON(buffer, !list_empty(&pages))) |
709 | mutex_unlock(&buffer->mutex); | 733 | goto out_fail; |
710 | return -1; | ||
711 | } | ||
712 | 734 | ||
713 | out: | 735 | out: |
714 | buffer->pages = nr_pages; | 736 | buffer->pages = nr_pages; |
737 | put_online_cpus(); | ||
715 | mutex_unlock(&buffer->mutex); | 738 | mutex_unlock(&buffer->mutex); |
716 | 739 | ||
717 | return size; | 740 | return size; |
@@ -721,8 +744,18 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
721 | list_del_init(&bpage->list); | 744 | list_del_init(&bpage->list); |
722 | free_buffer_page(bpage); | 745 | free_buffer_page(bpage); |
723 | } | 746 | } |
747 | put_online_cpus(); | ||
724 | mutex_unlock(&buffer->mutex); | 748 | mutex_unlock(&buffer->mutex); |
725 | return -ENOMEM; | 749 | return -ENOMEM; |
750 | |||
751 | /* | ||
752 | * Something went totally wrong, and we are too paranoid | ||
753 | * to even clean up the mess. | ||
754 | */ | ||
755 | out_fail: | ||
756 | put_online_cpus(); | ||
757 | mutex_unlock(&buffer->mutex); | ||
758 | return -1; | ||
726 | } | 759 | } |
727 | EXPORT_SYMBOL_GPL(ring_buffer_resize); | 760 | EXPORT_SYMBOL_GPL(ring_buffer_resize); |
728 | 761 | ||
@@ -1528,11 +1561,15 @@ void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu) | |||
1528 | { | 1561 | { |
1529 | struct ring_buffer_per_cpu *cpu_buffer; | 1562 | struct ring_buffer_per_cpu *cpu_buffer; |
1530 | 1563 | ||
1564 | get_online_cpus(); | ||
1565 | |||
1531 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 1566 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
1532 | return; | 1567 | goto out; |
1533 | 1568 | ||
1534 | cpu_buffer = buffer->buffers[cpu]; | 1569 | cpu_buffer = buffer->buffers[cpu]; |
1535 | atomic_inc(&cpu_buffer->record_disabled); | 1570 | atomic_inc(&cpu_buffer->record_disabled); |
1571 | out: | ||
1572 | put_online_cpus(); | ||
1536 | } | 1573 | } |
1537 | EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu); | 1574 | EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu); |
1538 | 1575 | ||
@@ -1548,11 +1585,15 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) | |||
1548 | { | 1585 | { |
1549 | struct ring_buffer_per_cpu *cpu_buffer; | 1586 | struct ring_buffer_per_cpu *cpu_buffer; |
1550 | 1587 | ||
1588 | get_online_cpus(); | ||
1589 | |||
1551 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 1590 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
1552 | return; | 1591 | goto out; |
1553 | 1592 | ||
1554 | cpu_buffer = buffer->buffers[cpu]; | 1593 | cpu_buffer = buffer->buffers[cpu]; |
1555 | atomic_dec(&cpu_buffer->record_disabled); | 1594 | atomic_dec(&cpu_buffer->record_disabled); |
1595 | out: | ||
1596 | put_online_cpus(); | ||
1556 | } | 1597 | } |
1557 | EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu); | 1598 | EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu); |
1558 | 1599 | ||
@@ -1564,12 +1605,19 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu); | |||
1564 | unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu) | 1605 | unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu) |
1565 | { | 1606 | { |
1566 | struct ring_buffer_per_cpu *cpu_buffer; | 1607 | struct ring_buffer_per_cpu *cpu_buffer; |
1608 | unsigned long ret = 0; | ||
1609 | |||
1610 | get_online_cpus(); | ||
1567 | 1611 | ||
1568 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 1612 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
1569 | return 0; | 1613 | goto out; |
1570 | 1614 | ||
1571 | cpu_buffer = buffer->buffers[cpu]; | 1615 | cpu_buffer = buffer->buffers[cpu]; |
1572 | return cpu_buffer->entries; | 1616 | ret = cpu_buffer->entries; |
1617 | out: | ||
1618 | put_online_cpus(); | ||
1619 | |||
1620 | return ret; | ||
1573 | } | 1621 | } |
1574 | EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); | 1622 | EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); |
1575 | 1623 | ||
@@ -1581,12 +1629,19 @@ EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); | |||
1581 | unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu) | 1629 | unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu) |
1582 | { | 1630 | { |
1583 | struct ring_buffer_per_cpu *cpu_buffer; | 1631 | struct ring_buffer_per_cpu *cpu_buffer; |
1632 | unsigned long ret = 0; | ||
1633 | |||
1634 | get_online_cpus(); | ||
1584 | 1635 | ||
1585 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 1636 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
1586 | return 0; | 1637 | goto out; |
1587 | 1638 | ||
1588 | cpu_buffer = buffer->buffers[cpu]; | 1639 | cpu_buffer = buffer->buffers[cpu]; |
1589 | return cpu_buffer->overrun; | 1640 | ret = cpu_buffer->overrun; |
1641 | out: | ||
1642 | put_online_cpus(); | ||
1643 | |||
1644 | return ret; | ||
1590 | } | 1645 | } |
1591 | EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu); | 1646 | EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu); |
1592 | 1647 | ||
@@ -1603,12 +1658,16 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer) | |||
1603 | unsigned long entries = 0; | 1658 | unsigned long entries = 0; |
1604 | int cpu; | 1659 | int cpu; |
1605 | 1660 | ||
1661 | get_online_cpus(); | ||
1662 | |||
1606 | /* if you care about this being correct, lock the buffer */ | 1663 | /* if you care about this being correct, lock the buffer */ |
1607 | for_each_buffer_cpu(buffer, cpu) { | 1664 | for_each_buffer_cpu(buffer, cpu) { |
1608 | cpu_buffer = buffer->buffers[cpu]; | 1665 | cpu_buffer = buffer->buffers[cpu]; |
1609 | entries += cpu_buffer->entries; | 1666 | entries += cpu_buffer->entries; |
1610 | } | 1667 | } |
1611 | 1668 | ||
1669 | put_online_cpus(); | ||
1670 | |||
1612 | return entries; | 1671 | return entries; |
1613 | } | 1672 | } |
1614 | EXPORT_SYMBOL_GPL(ring_buffer_entries); | 1673 | EXPORT_SYMBOL_GPL(ring_buffer_entries); |
@@ -1626,12 +1685,16 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer) | |||
1626 | unsigned long overruns = 0; | 1685 | unsigned long overruns = 0; |
1627 | int cpu; | 1686 | int cpu; |
1628 | 1687 | ||
1688 | get_online_cpus(); | ||
1689 | |||
1629 | /* if you care about this being correct, lock the buffer */ | 1690 | /* if you care about this being correct, lock the buffer */ |
1630 | for_each_buffer_cpu(buffer, cpu) { | 1691 | for_each_buffer_cpu(buffer, cpu) { |
1631 | cpu_buffer = buffer->buffers[cpu]; | 1692 | cpu_buffer = buffer->buffers[cpu]; |
1632 | overruns += cpu_buffer->overrun; | 1693 | overruns += cpu_buffer->overrun; |
1633 | } | 1694 | } |
1634 | 1695 | ||
1696 | put_online_cpus(); | ||
1697 | |||
1635 | return overruns; | 1698 | return overruns; |
1636 | } | 1699 | } |
1637 | EXPORT_SYMBOL_GPL(ring_buffer_overruns); | 1700 | EXPORT_SYMBOL_GPL(ring_buffer_overruns); |
@@ -1663,9 +1726,14 @@ static void rb_iter_reset(struct ring_buffer_iter *iter) | |||
1663 | */ | 1726 | */ |
1664 | void ring_buffer_iter_reset(struct ring_buffer_iter *iter) | 1727 | void ring_buffer_iter_reset(struct ring_buffer_iter *iter) |
1665 | { | 1728 | { |
1666 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | 1729 | struct ring_buffer_per_cpu *cpu_buffer; |
1667 | unsigned long flags; | 1730 | unsigned long flags; |
1668 | 1731 | ||
1732 | if (!iter) | ||
1733 | return; | ||
1734 | |||
1735 | cpu_buffer = iter->cpu_buffer; | ||
1736 | |||
1669 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 1737 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
1670 | rb_iter_reset(iter); | 1738 | rb_iter_reset(iter); |
1671 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 1739 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
@@ -1900,9 +1968,6 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
1900 | struct buffer_page *reader; | 1968 | struct buffer_page *reader; |
1901 | int nr_loops = 0; | 1969 | int nr_loops = 0; |
1902 | 1970 | ||
1903 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | ||
1904 | return NULL; | ||
1905 | |||
1906 | cpu_buffer = buffer->buffers[cpu]; | 1971 | cpu_buffer = buffer->buffers[cpu]; |
1907 | 1972 | ||
1908 | again: | 1973 | again: |
@@ -2028,13 +2093,21 @@ struct ring_buffer_event * | |||
2028 | ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | 2093 | ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) |
2029 | { | 2094 | { |
2030 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; | 2095 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; |
2031 | struct ring_buffer_event *event; | 2096 | struct ring_buffer_event *event = NULL; |
2032 | unsigned long flags; | 2097 | unsigned long flags; |
2033 | 2098 | ||
2099 | get_online_cpus(); | ||
2100 | |||
2101 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | ||
2102 | goto out; | ||
2103 | |||
2034 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 2104 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
2035 | event = rb_buffer_peek(buffer, cpu, ts); | 2105 | event = rb_buffer_peek(buffer, cpu, ts); |
2036 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 2106 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
2037 | 2107 | ||
2108 | out: | ||
2109 | put_online_cpus(); | ||
2110 | |||
2038 | return event; | 2111 | return event; |
2039 | } | 2112 | } |
2040 | 2113 | ||
@@ -2071,24 +2144,31 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | |||
2071 | struct ring_buffer_event * | 2144 | struct ring_buffer_event * |
2072 | ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) | 2145 | ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) |
2073 | { | 2146 | { |
2074 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; | 2147 | struct ring_buffer_per_cpu *cpu_buffer; |
2075 | struct ring_buffer_event *event; | 2148 | struct ring_buffer_event *event = NULL; |
2076 | unsigned long flags; | 2149 | unsigned long flags; |
2077 | 2150 | ||
2151 | /* might be called in atomic */ | ||
2152 | preempt_disable(); | ||
2153 | |||
2078 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 2154 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
2079 | return NULL; | 2155 | goto out; |
2080 | 2156 | ||
2157 | cpu_buffer = buffer->buffers[cpu]; | ||
2081 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 2158 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
2082 | 2159 | ||
2083 | event = rb_buffer_peek(buffer, cpu, ts); | 2160 | event = rb_buffer_peek(buffer, cpu, ts); |
2084 | if (!event) | 2161 | if (!event) |
2085 | goto out; | 2162 | goto out_unlock; |
2086 | 2163 | ||
2087 | rb_advance_reader(cpu_buffer); | 2164 | rb_advance_reader(cpu_buffer); |
2088 | 2165 | ||
2089 | out: | 2166 | out_unlock: |
2090 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 2167 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
2091 | 2168 | ||
2169 | out: | ||
2170 | preempt_enable(); | ||
2171 | |||
2092 | return event; | 2172 | return event; |
2093 | } | 2173 | } |
2094 | EXPORT_SYMBOL_GPL(ring_buffer_consume); | 2174 | EXPORT_SYMBOL_GPL(ring_buffer_consume); |
@@ -2109,15 +2189,17 @@ struct ring_buffer_iter * | |||
2109 | ring_buffer_read_start(struct ring_buffer *buffer, int cpu) | 2189 | ring_buffer_read_start(struct ring_buffer *buffer, int cpu) |
2110 | { | 2190 | { |
2111 | struct ring_buffer_per_cpu *cpu_buffer; | 2191 | struct ring_buffer_per_cpu *cpu_buffer; |
2112 | struct ring_buffer_iter *iter; | 2192 | struct ring_buffer_iter *iter = NULL; |
2113 | unsigned long flags; | 2193 | unsigned long flags; |
2114 | 2194 | ||
2195 | get_online_cpus(); | ||
2196 | |||
2115 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 2197 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
2116 | return NULL; | 2198 | goto out; |
2117 | 2199 | ||
2118 | iter = kmalloc(sizeof(*iter), GFP_KERNEL); | 2200 | iter = kmalloc(sizeof(*iter), GFP_KERNEL); |
2119 | if (!iter) | 2201 | if (!iter) |
2120 | return NULL; | 2202 | goto out; |
2121 | 2203 | ||
2122 | cpu_buffer = buffer->buffers[cpu]; | 2204 | cpu_buffer = buffer->buffers[cpu]; |
2123 | 2205 | ||
@@ -2132,6 +2214,9 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu) | |||
2132 | __raw_spin_unlock(&cpu_buffer->lock); | 2214 | __raw_spin_unlock(&cpu_buffer->lock); |
2133 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 2215 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
2134 | 2216 | ||
2217 | out: | ||
2218 | put_online_cpus(); | ||
2219 | |||
2135 | return iter; | 2220 | return iter; |
2136 | } | 2221 | } |
2137 | EXPORT_SYMBOL_GPL(ring_buffer_read_start); | 2222 | EXPORT_SYMBOL_GPL(ring_buffer_read_start); |
@@ -2224,9 +2309,13 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) | |||
2224 | { | 2309 | { |
2225 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; | 2310 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; |
2226 | unsigned long flags; | 2311 | unsigned long flags; |
2312 | int resched; | ||
2313 | |||
2314 | /* Can't use get_online_cpus because this can be in atomic */ | ||
2315 | resched = ftrace_preempt_disable(); | ||
2227 | 2316 | ||
2228 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 2317 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
2229 | return; | 2318 | goto out; |
2230 | 2319 | ||
2231 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 2320 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
2232 | 2321 | ||
@@ -2237,6 +2326,8 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) | |||
2237 | __raw_spin_unlock(&cpu_buffer->lock); | 2326 | __raw_spin_unlock(&cpu_buffer->lock); |
2238 | 2327 | ||
2239 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 2328 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
2329 | out: | ||
2330 | ftrace_preempt_enable(resched); | ||
2240 | } | 2331 | } |
2241 | EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); | 2332 | EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); |
2242 | 2333 | ||
@@ -2246,10 +2337,16 @@ EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); | |||
2246 | */ | 2337 | */ |
2247 | void ring_buffer_reset(struct ring_buffer *buffer) | 2338 | void ring_buffer_reset(struct ring_buffer *buffer) |
2248 | { | 2339 | { |
2340 | int resched; | ||
2249 | int cpu; | 2341 | int cpu; |
2250 | 2342 | ||
2343 | /* Can't use get_online_cpus because this can be in atomic */ | ||
2344 | resched = ftrace_preempt_disable(); | ||
2345 | |||
2251 | for_each_buffer_cpu(buffer, cpu) | 2346 | for_each_buffer_cpu(buffer, cpu) |
2252 | ring_buffer_reset_cpu(buffer, cpu); | 2347 | ring_buffer_reset_cpu(buffer, cpu); |
2348 | |||
2349 | ftrace_preempt_enable(resched); | ||
2253 | } | 2350 | } |
2254 | EXPORT_SYMBOL_GPL(ring_buffer_reset); | 2351 | EXPORT_SYMBOL_GPL(ring_buffer_reset); |
2255 | 2352 | ||
@@ -2262,12 +2359,17 @@ int ring_buffer_empty(struct ring_buffer *buffer) | |||
2262 | struct ring_buffer_per_cpu *cpu_buffer; | 2359 | struct ring_buffer_per_cpu *cpu_buffer; |
2263 | int cpu; | 2360 | int cpu; |
2264 | 2361 | ||
2362 | get_online_cpus(); | ||
2363 | |||
2265 | /* yes this is racy, but if you don't like the race, lock the buffer */ | 2364 | /* yes this is racy, but if you don't like the race, lock the buffer */ |
2266 | for_each_buffer_cpu(buffer, cpu) { | 2365 | for_each_buffer_cpu(buffer, cpu) { |
2267 | cpu_buffer = buffer->buffers[cpu]; | 2366 | cpu_buffer = buffer->buffers[cpu]; |
2268 | if (!rb_per_cpu_empty(cpu_buffer)) | 2367 | if (!rb_per_cpu_empty(cpu_buffer)) |
2269 | return 0; | 2368 | return 0; |
2270 | } | 2369 | } |
2370 | |||
2371 | put_online_cpus(); | ||
2372 | |||
2271 | return 1; | 2373 | return 1; |
2272 | } | 2374 | } |
2273 | EXPORT_SYMBOL_GPL(ring_buffer_empty); | 2375 | EXPORT_SYMBOL_GPL(ring_buffer_empty); |
@@ -2280,12 +2382,20 @@ EXPORT_SYMBOL_GPL(ring_buffer_empty); | |||
2280 | int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) | 2382 | int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) |
2281 | { | 2383 | { |
2282 | struct ring_buffer_per_cpu *cpu_buffer; | 2384 | struct ring_buffer_per_cpu *cpu_buffer; |
2385 | int ret = 1; | ||
2386 | |||
2387 | get_online_cpus(); | ||
2283 | 2388 | ||
2284 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 2389 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
2285 | return 1; | 2390 | goto out; |
2286 | 2391 | ||
2287 | cpu_buffer = buffer->buffers[cpu]; | 2392 | cpu_buffer = buffer->buffers[cpu]; |
2288 | return rb_per_cpu_empty(cpu_buffer); | 2393 | ret = rb_per_cpu_empty(cpu_buffer); |
2394 | |||
2395 | out: | ||
2396 | put_online_cpus(); | ||
2397 | |||
2398 | return ret; | ||
2289 | } | 2399 | } |
2290 | EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu); | 2400 | EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu); |
2291 | 2401 | ||
@@ -2304,32 +2414,37 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, | |||
2304 | { | 2414 | { |
2305 | struct ring_buffer_per_cpu *cpu_buffer_a; | 2415 | struct ring_buffer_per_cpu *cpu_buffer_a; |
2306 | struct ring_buffer_per_cpu *cpu_buffer_b; | 2416 | struct ring_buffer_per_cpu *cpu_buffer_b; |
2417 | int ret = -EINVAL; | ||
2418 | |||
2419 | get_online_cpus(); | ||
2307 | 2420 | ||
2308 | if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || | 2421 | if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || |
2309 | !cpumask_test_cpu(cpu, buffer_b->cpumask)) | 2422 | !cpumask_test_cpu(cpu, buffer_b->cpumask)) |
2310 | return -EINVAL; | 2423 | goto out; |
2311 | 2424 | ||
2312 | /* At least make sure the two buffers are somewhat the same */ | 2425 | /* At least make sure the two buffers are somewhat the same */ |
2313 | if (buffer_a->pages != buffer_b->pages) | 2426 | if (buffer_a->pages != buffer_b->pages) |
2314 | return -EINVAL; | 2427 | goto out; |
2428 | |||
2429 | ret = -EAGAIN; | ||
2315 | 2430 | ||
2316 | if (ring_buffer_flags != RB_BUFFERS_ON) | 2431 | if (ring_buffer_flags != RB_BUFFERS_ON) |
2317 | return -EAGAIN; | 2432 | goto out; |
2318 | 2433 | ||
2319 | if (atomic_read(&buffer_a->record_disabled)) | 2434 | if (atomic_read(&buffer_a->record_disabled)) |
2320 | return -EAGAIN; | 2435 | goto out; |
2321 | 2436 | ||
2322 | if (atomic_read(&buffer_b->record_disabled)) | 2437 | if (atomic_read(&buffer_b->record_disabled)) |
2323 | return -EAGAIN; | 2438 | goto out; |
2324 | 2439 | ||
2325 | cpu_buffer_a = buffer_a->buffers[cpu]; | 2440 | cpu_buffer_a = buffer_a->buffers[cpu]; |
2326 | cpu_buffer_b = buffer_b->buffers[cpu]; | 2441 | cpu_buffer_b = buffer_b->buffers[cpu]; |
2327 | 2442 | ||
2328 | if (atomic_read(&cpu_buffer_a->record_disabled)) | 2443 | if (atomic_read(&cpu_buffer_a->record_disabled)) |
2329 | return -EAGAIN; | 2444 | goto out; |
2330 | 2445 | ||
2331 | if (atomic_read(&cpu_buffer_b->record_disabled)) | 2446 | if (atomic_read(&cpu_buffer_b->record_disabled)) |
2332 | return -EAGAIN; | 2447 | goto out; |
2333 | 2448 | ||
2334 | /* | 2449 | /* |
2335 | * We can't do a synchronize_sched here because this | 2450 | * We can't do a synchronize_sched here because this |
@@ -2349,7 +2464,11 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, | |||
2349 | atomic_dec(&cpu_buffer_a->record_disabled); | 2464 | atomic_dec(&cpu_buffer_a->record_disabled); |
2350 | atomic_dec(&cpu_buffer_b->record_disabled); | 2465 | atomic_dec(&cpu_buffer_b->record_disabled); |
2351 | 2466 | ||
2352 | return 0; | 2467 | ret = 0; |
2468 | out: | ||
2469 | put_online_cpus(); | ||
2470 | |||
2471 | return ret; | ||
2353 | } | 2472 | } |
2354 | EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); | 2473 | EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); |
2355 | 2474 | ||
@@ -2464,27 +2583,32 @@ int ring_buffer_read_page(struct ring_buffer *buffer, | |||
2464 | u64 save_timestamp; | 2583 | u64 save_timestamp; |
2465 | int ret = -1; | 2584 | int ret = -1; |
2466 | 2585 | ||
2586 | get_online_cpus(); | ||
2587 | |||
2588 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | ||
2589 | goto out; | ||
2590 | |||
2467 | /* | 2591 | /* |
2468 | * If len is not big enough to hold the page header, then | 2592 | * If len is not big enough to hold the page header, then |
2469 | * we can not copy anything. | 2593 | * we can not copy anything. |
2470 | */ | 2594 | */ |
2471 | if (len <= BUF_PAGE_HDR_SIZE) | 2595 | if (len <= BUF_PAGE_HDR_SIZE) |
2472 | return -1; | 2596 | goto out; |
2473 | 2597 | ||
2474 | len -= BUF_PAGE_HDR_SIZE; | 2598 | len -= BUF_PAGE_HDR_SIZE; |
2475 | 2599 | ||
2476 | if (!data_page) | 2600 | if (!data_page) |
2477 | return -1; | 2601 | goto out; |
2478 | 2602 | ||
2479 | bpage = *data_page; | 2603 | bpage = *data_page; |
2480 | if (!bpage) | 2604 | if (!bpage) |
2481 | return -1; | 2605 | goto out; |
2482 | 2606 | ||
2483 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 2607 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
2484 | 2608 | ||
2485 | reader = rb_get_reader_page(cpu_buffer); | 2609 | reader = rb_get_reader_page(cpu_buffer); |
2486 | if (!reader) | 2610 | if (!reader) |
2487 | goto out; | 2611 | goto out_unlock; |
2488 | 2612 | ||
2489 | event = rb_reader_event(cpu_buffer); | 2613 | event = rb_reader_event(cpu_buffer); |
2490 | 2614 | ||
@@ -2506,7 +2630,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer, | |||
2506 | unsigned int size; | 2630 | unsigned int size; |
2507 | 2631 | ||
2508 | if (full) | 2632 | if (full) |
2509 | goto out; | 2633 | goto out_unlock; |
2510 | 2634 | ||
2511 | if (len > (commit - read)) | 2635 | if (len > (commit - read)) |
2512 | len = (commit - read); | 2636 | len = (commit - read); |
@@ -2514,7 +2638,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer, | |||
2514 | size = rb_event_length(event); | 2638 | size = rb_event_length(event); |
2515 | 2639 | ||
2516 | if (len < size) | 2640 | if (len < size) |
2517 | goto out; | 2641 | goto out_unlock; |
2518 | 2642 | ||
2519 | /* save the current timestamp, since the user will need it */ | 2643 | /* save the current timestamp, since the user will need it */ |
2520 | save_timestamp = cpu_buffer->read_stamp; | 2644 | save_timestamp = cpu_buffer->read_stamp; |
@@ -2553,9 +2677,12 @@ int ring_buffer_read_page(struct ring_buffer *buffer, | |||
2553 | } | 2677 | } |
2554 | ret = read; | 2678 | ret = read; |
2555 | 2679 | ||
2556 | out: | 2680 | out_unlock: |
2557 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 2681 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
2558 | 2682 | ||
2683 | out: | ||
2684 | put_online_cpus(); | ||
2685 | |||
2559 | return ret; | 2686 | return ret; |
2560 | } | 2687 | } |
2561 | 2688 | ||
@@ -2629,3 +2756,42 @@ static __init int rb_init_debugfs(void) | |||
2629 | } | 2756 | } |
2630 | 2757 | ||
2631 | fs_initcall(rb_init_debugfs); | 2758 | fs_initcall(rb_init_debugfs); |
2759 | |||
2760 | #ifdef CONFIG_HOTPLUG | ||
2761 | static int __cpuinit rb_cpu_notify(struct notifier_block *self, | ||
2762 | unsigned long action, void *hcpu) | ||
2763 | { | ||
2764 | struct ring_buffer *buffer = | ||
2765 | container_of(self, struct ring_buffer, cpu_notify); | ||
2766 | long cpu = (long)hcpu; | ||
2767 | |||
2768 | switch (action) { | ||
2769 | case CPU_UP_PREPARE: | ||
2770 | case CPU_UP_PREPARE_FROZEN: | ||
2771 | if (cpu_isset(cpu, *buffer->cpumask)) | ||
2772 | return NOTIFY_OK; | ||
2773 | |||
2774 | buffer->buffers[cpu] = | ||
2775 | rb_allocate_cpu_buffer(buffer, cpu); | ||
2776 | if (!buffer->buffers[cpu]) { | ||
2777 | WARN(1, "failed to allocate ring buffer on CPU %ld\n", | ||
2778 | cpu); | ||
2779 | return NOTIFY_OK; | ||
2780 | } | ||
2781 | smp_wmb(); | ||
2782 | cpu_set(cpu, *buffer->cpumask); | ||
2783 | break; | ||
2784 | case CPU_DOWN_PREPARE: | ||
2785 | case CPU_DOWN_PREPARE_FROZEN: | ||
2786 | /* | ||
2787 | * Do nothing. | ||
2788 | * If we were to free the buffer, then the user would | ||
2789 | * lose any trace that was in the buffer. | ||
2790 | */ | ||
2791 | break; | ||
2792 | default: | ||
2793 | break; | ||
2794 | } | ||
2795 | return NOTIFY_OK; | ||
2796 | } | ||
2797 | #endif | ||
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index e60f4be10d64..14c98f6a47bc 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -1805,17 +1805,11 @@ __tracing_open(struct inode *inode, struct file *file) | |||
1805 | 1805 | ||
1806 | iter->buffer_iter[cpu] = | 1806 | iter->buffer_iter[cpu] = |
1807 | ring_buffer_read_start(iter->tr->buffer, cpu); | 1807 | ring_buffer_read_start(iter->tr->buffer, cpu); |
1808 | |||
1809 | if (!iter->buffer_iter[cpu]) | ||
1810 | goto fail_buffer; | ||
1811 | } | 1808 | } |
1812 | } else { | 1809 | } else { |
1813 | cpu = iter->cpu_file; | 1810 | cpu = iter->cpu_file; |
1814 | iter->buffer_iter[cpu] = | 1811 | iter->buffer_iter[cpu] = |
1815 | ring_buffer_read_start(iter->tr->buffer, cpu); | 1812 | ring_buffer_read_start(iter->tr->buffer, cpu); |
1816 | |||
1817 | if (!iter->buffer_iter[cpu]) | ||
1818 | goto fail; | ||
1819 | } | 1813 | } |
1820 | 1814 | ||
1821 | /* TODO stop tracer */ | 1815 | /* TODO stop tracer */ |