diff options
author | Steven Rostedt <srostedt@redhat.com> | 2009-03-12 13:13:49 -0400 |
---|---|---|
committer | Steven Rostedt <srostedt@redhat.com> | 2009-03-12 21:14:59 -0400 |
commit | 8aabee573dff131a085c63de7667eacd94ba4ccb (patch) | |
tree | 923c02126de3e55148df132a89a7d2151df855b7 | |
parent | 59222efe2d184956464abe5b637bc842ff053b93 (diff) |
ring-buffer: remove unneeded get_online_cpus
Impact: speed up and remove possible races
The get_online_cpus was added to the ring buffer because the original
design would free the ring buffer on a CPU that was being taken
off line. The final design kept the ring buffer around even when the
CPU was taken off line. This is to allow a user to still read the
information on that ring buffer.
Most of the get_online_cpus are no longer needed since the ring buffer will
not disappear from the use cases.
Reported-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
-rw-r--r-- | kernel/trace/ring_buffer.c | 90 |
1 files changed, 14 insertions, 76 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 035b56c3a6c9..2c36be9fac2e 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -1561,15 +1561,11 @@ void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu) | |||
1561 | { | 1561 | { |
1562 | struct ring_buffer_per_cpu *cpu_buffer; | 1562 | struct ring_buffer_per_cpu *cpu_buffer; |
1563 | 1563 | ||
1564 | get_online_cpus(); | ||
1565 | |||
1566 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 1564 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
1567 | goto out; | 1565 | return; |
1568 | 1566 | ||
1569 | cpu_buffer = buffer->buffers[cpu]; | 1567 | cpu_buffer = buffer->buffers[cpu]; |
1570 | atomic_inc(&cpu_buffer->record_disabled); | 1568 | atomic_inc(&cpu_buffer->record_disabled); |
1571 | out: | ||
1572 | put_online_cpus(); | ||
1573 | } | 1569 | } |
1574 | EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu); | 1570 | EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu); |
1575 | 1571 | ||
@@ -1585,15 +1581,11 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) | |||
1585 | { | 1581 | { |
1586 | struct ring_buffer_per_cpu *cpu_buffer; | 1582 | struct ring_buffer_per_cpu *cpu_buffer; |
1587 | 1583 | ||
1588 | get_online_cpus(); | ||
1589 | |||
1590 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 1584 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
1591 | goto out; | 1585 | return; |
1592 | 1586 | ||
1593 | cpu_buffer = buffer->buffers[cpu]; | 1587 | cpu_buffer = buffer->buffers[cpu]; |
1594 | atomic_dec(&cpu_buffer->record_disabled); | 1588 | atomic_dec(&cpu_buffer->record_disabled); |
1595 | out: | ||
1596 | put_online_cpus(); | ||
1597 | } | 1589 | } |
1598 | EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu); | 1590 | EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu); |
1599 | 1591 | ||
@@ -1605,17 +1597,13 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu); | |||
1605 | unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu) | 1597 | unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu) |
1606 | { | 1598 | { |
1607 | struct ring_buffer_per_cpu *cpu_buffer; | 1599 | struct ring_buffer_per_cpu *cpu_buffer; |
1608 | unsigned long ret = 0; | 1600 | unsigned long ret; |
1609 | |||
1610 | get_online_cpus(); | ||
1611 | 1601 | ||
1612 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 1602 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
1613 | goto out; | 1603 | return 0; |
1614 | 1604 | ||
1615 | cpu_buffer = buffer->buffers[cpu]; | 1605 | cpu_buffer = buffer->buffers[cpu]; |
1616 | ret = cpu_buffer->entries; | 1606 | ret = cpu_buffer->entries; |
1617 | out: | ||
1618 | put_online_cpus(); | ||
1619 | 1607 | ||
1620 | return ret; | 1608 | return ret; |
1621 | } | 1609 | } |
@@ -1629,17 +1617,13 @@ EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); | |||
1629 | unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu) | 1617 | unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu) |
1630 | { | 1618 | { |
1631 | struct ring_buffer_per_cpu *cpu_buffer; | 1619 | struct ring_buffer_per_cpu *cpu_buffer; |
1632 | unsigned long ret = 0; | 1620 | unsigned long ret; |
1633 | |||
1634 | get_online_cpus(); | ||
1635 | 1621 | ||
1636 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 1622 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
1637 | goto out; | 1623 | return 0; |
1638 | 1624 | ||
1639 | cpu_buffer = buffer->buffers[cpu]; | 1625 | cpu_buffer = buffer->buffers[cpu]; |
1640 | ret = cpu_buffer->overrun; | 1626 | ret = cpu_buffer->overrun; |
1641 | out: | ||
1642 | put_online_cpus(); | ||
1643 | 1627 | ||
1644 | return ret; | 1628 | return ret; |
1645 | } | 1629 | } |
@@ -1658,16 +1642,12 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer) | |||
1658 | unsigned long entries = 0; | 1642 | unsigned long entries = 0; |
1659 | int cpu; | 1643 | int cpu; |
1660 | 1644 | ||
1661 | get_online_cpus(); | ||
1662 | |||
1663 | /* if you care about this being correct, lock the buffer */ | 1645 | /* if you care about this being correct, lock the buffer */ |
1664 | for_each_buffer_cpu(buffer, cpu) { | 1646 | for_each_buffer_cpu(buffer, cpu) { |
1665 | cpu_buffer = buffer->buffers[cpu]; | 1647 | cpu_buffer = buffer->buffers[cpu]; |
1666 | entries += cpu_buffer->entries; | 1648 | entries += cpu_buffer->entries; |
1667 | } | 1649 | } |
1668 | 1650 | ||
1669 | put_online_cpus(); | ||
1670 | |||
1671 | return entries; | 1651 | return entries; |
1672 | } | 1652 | } |
1673 | EXPORT_SYMBOL_GPL(ring_buffer_entries); | 1653 | EXPORT_SYMBOL_GPL(ring_buffer_entries); |
@@ -1685,16 +1665,12 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer) | |||
1685 | unsigned long overruns = 0; | 1665 | unsigned long overruns = 0; |
1686 | int cpu; | 1666 | int cpu; |
1687 | 1667 | ||
1688 | get_online_cpus(); | ||
1689 | |||
1690 | /* if you care about this being correct, lock the buffer */ | 1668 | /* if you care about this being correct, lock the buffer */ |
1691 | for_each_buffer_cpu(buffer, cpu) { | 1669 | for_each_buffer_cpu(buffer, cpu) { |
1692 | cpu_buffer = buffer->buffers[cpu]; | 1670 | cpu_buffer = buffer->buffers[cpu]; |
1693 | overruns += cpu_buffer->overrun; | 1671 | overruns += cpu_buffer->overrun; |
1694 | } | 1672 | } |
1695 | 1673 | ||
1696 | put_online_cpus(); | ||
1697 | |||
1698 | return overruns; | 1674 | return overruns; |
1699 | } | 1675 | } |
1700 | EXPORT_SYMBOL_GPL(ring_buffer_overruns); | 1676 | EXPORT_SYMBOL_GPL(ring_buffer_overruns); |
@@ -2093,21 +2069,16 @@ struct ring_buffer_event * | |||
2093 | ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | 2069 | ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) |
2094 | { | 2070 | { |
2095 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; | 2071 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; |
2096 | struct ring_buffer_event *event = NULL; | 2072 | struct ring_buffer_event *event; |
2097 | unsigned long flags; | 2073 | unsigned long flags; |
2098 | 2074 | ||
2099 | get_online_cpus(); | ||
2100 | |||
2101 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 2075 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
2102 | goto out; | 2076 | return NULL; |
2103 | 2077 | ||
2104 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 2078 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
2105 | event = rb_buffer_peek(buffer, cpu, ts); | 2079 | event = rb_buffer_peek(buffer, cpu, ts); |
2106 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 2080 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
2107 | 2081 | ||
2108 | out: | ||
2109 | put_online_cpus(); | ||
2110 | |||
2111 | return event; | 2082 | return event; |
2112 | } | 2083 | } |
2113 | 2084 | ||
@@ -2189,17 +2160,15 @@ struct ring_buffer_iter * | |||
2189 | ring_buffer_read_start(struct ring_buffer *buffer, int cpu) | 2160 | ring_buffer_read_start(struct ring_buffer *buffer, int cpu) |
2190 | { | 2161 | { |
2191 | struct ring_buffer_per_cpu *cpu_buffer; | 2162 | struct ring_buffer_per_cpu *cpu_buffer; |
2192 | struct ring_buffer_iter *iter = NULL; | 2163 | struct ring_buffer_iter *iter; |
2193 | unsigned long flags; | 2164 | unsigned long flags; |
2194 | 2165 | ||
2195 | get_online_cpus(); | ||
2196 | |||
2197 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 2166 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
2198 | goto out; | 2167 | return NULL; |
2199 | 2168 | ||
2200 | iter = kmalloc(sizeof(*iter), GFP_KERNEL); | 2169 | iter = kmalloc(sizeof(*iter), GFP_KERNEL); |
2201 | if (!iter) | 2170 | if (!iter) |
2202 | goto out; | 2171 | return NULL; |
2203 | 2172 | ||
2204 | cpu_buffer = buffer->buffers[cpu]; | 2173 | cpu_buffer = buffer->buffers[cpu]; |
2205 | 2174 | ||
@@ -2214,9 +2183,6 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu) | |||
2214 | __raw_spin_unlock(&cpu_buffer->lock); | 2183 | __raw_spin_unlock(&cpu_buffer->lock); |
2215 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 2184 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
2216 | 2185 | ||
2217 | out: | ||
2218 | put_online_cpus(); | ||
2219 | |||
2220 | return iter; | 2186 | return iter; |
2221 | } | 2187 | } |
2222 | EXPORT_SYMBOL_GPL(ring_buffer_read_start); | 2188 | EXPORT_SYMBOL_GPL(ring_buffer_read_start); |
@@ -2309,13 +2275,9 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) | |||
2309 | { | 2275 | { |
2310 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; | 2276 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; |
2311 | unsigned long flags; | 2277 | unsigned long flags; |
2312 | int resched; | ||
2313 | |||
2314 | /* Can't use get_online_cpus because this can be in atomic */ | ||
2315 | resched = ftrace_preempt_disable(); | ||
2316 | 2278 | ||
2317 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 2279 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
2318 | goto out; | 2280 | return; |
2319 | 2281 | ||
2320 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 2282 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
2321 | 2283 | ||
@@ -2326,8 +2288,6 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) | |||
2326 | __raw_spin_unlock(&cpu_buffer->lock); | 2288 | __raw_spin_unlock(&cpu_buffer->lock); |
2327 | 2289 | ||
2328 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 2290 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
2329 | out: | ||
2330 | ftrace_preempt_enable(resched); | ||
2331 | } | 2291 | } |
2332 | EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); | 2292 | EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); |
2333 | 2293 | ||
@@ -2337,16 +2297,10 @@ EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); | |||
2337 | */ | 2297 | */ |
2338 | void ring_buffer_reset(struct ring_buffer *buffer) | 2298 | void ring_buffer_reset(struct ring_buffer *buffer) |
2339 | { | 2299 | { |
2340 | int resched; | ||
2341 | int cpu; | 2300 | int cpu; |
2342 | 2301 | ||
2343 | /* Can't use get_online_cpus because this can be in atomic */ | ||
2344 | resched = ftrace_preempt_disable(); | ||
2345 | |||
2346 | for_each_buffer_cpu(buffer, cpu) | 2302 | for_each_buffer_cpu(buffer, cpu) |
2347 | ring_buffer_reset_cpu(buffer, cpu); | 2303 | ring_buffer_reset_cpu(buffer, cpu); |
2348 | |||
2349 | ftrace_preempt_enable(resched); | ||
2350 | } | 2304 | } |
2351 | EXPORT_SYMBOL_GPL(ring_buffer_reset); | 2305 | EXPORT_SYMBOL_GPL(ring_buffer_reset); |
2352 | 2306 | ||
@@ -2359,8 +2313,6 @@ int ring_buffer_empty(struct ring_buffer *buffer) | |||
2359 | struct ring_buffer_per_cpu *cpu_buffer; | 2313 | struct ring_buffer_per_cpu *cpu_buffer; |
2360 | int cpu; | 2314 | int cpu; |
2361 | 2315 | ||
2362 | get_online_cpus(); | ||
2363 | |||
2364 | /* yes this is racy, but if you don't like the race, lock the buffer */ | 2316 | /* yes this is racy, but if you don't like the race, lock the buffer */ |
2365 | for_each_buffer_cpu(buffer, cpu) { | 2317 | for_each_buffer_cpu(buffer, cpu) { |
2366 | cpu_buffer = buffer->buffers[cpu]; | 2318 | cpu_buffer = buffer->buffers[cpu]; |
@@ -2368,8 +2320,6 @@ int ring_buffer_empty(struct ring_buffer *buffer) | |||
2368 | return 0; | 2320 | return 0; |
2369 | } | 2321 | } |
2370 | 2322 | ||
2371 | put_online_cpus(); | ||
2372 | |||
2373 | return 1; | 2323 | return 1; |
2374 | } | 2324 | } |
2375 | EXPORT_SYMBOL_GPL(ring_buffer_empty); | 2325 | EXPORT_SYMBOL_GPL(ring_buffer_empty); |
@@ -2382,18 +2332,14 @@ EXPORT_SYMBOL_GPL(ring_buffer_empty); | |||
2382 | int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) | 2332 | int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) |
2383 | { | 2333 | { |
2384 | struct ring_buffer_per_cpu *cpu_buffer; | 2334 | struct ring_buffer_per_cpu *cpu_buffer; |
2385 | int ret = 1; | 2335 | int ret; |
2386 | |||
2387 | get_online_cpus(); | ||
2388 | 2336 | ||
2389 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 2337 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
2390 | goto out; | 2338 | return 1; |
2391 | 2339 | ||
2392 | cpu_buffer = buffer->buffers[cpu]; | 2340 | cpu_buffer = buffer->buffers[cpu]; |
2393 | ret = rb_per_cpu_empty(cpu_buffer); | 2341 | ret = rb_per_cpu_empty(cpu_buffer); |
2394 | 2342 | ||
2395 | out: | ||
2396 | put_online_cpus(); | ||
2397 | 2343 | ||
2398 | return ret; | 2344 | return ret; |
2399 | } | 2345 | } |
@@ -2416,8 +2362,6 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, | |||
2416 | struct ring_buffer_per_cpu *cpu_buffer_b; | 2362 | struct ring_buffer_per_cpu *cpu_buffer_b; |
2417 | int ret = -EINVAL; | 2363 | int ret = -EINVAL; |
2418 | 2364 | ||
2419 | get_online_cpus(); | ||
2420 | |||
2421 | if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || | 2365 | if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || |
2422 | !cpumask_test_cpu(cpu, buffer_b->cpumask)) | 2366 | !cpumask_test_cpu(cpu, buffer_b->cpumask)) |
2423 | goto out; | 2367 | goto out; |
@@ -2466,8 +2410,6 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, | |||
2466 | 2410 | ||
2467 | ret = 0; | 2411 | ret = 0; |
2468 | out: | 2412 | out: |
2469 | put_online_cpus(); | ||
2470 | |||
2471 | return ret; | 2413 | return ret; |
2472 | } | 2414 | } |
2473 | EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); | 2415 | EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); |
@@ -2583,8 +2525,6 @@ int ring_buffer_read_page(struct ring_buffer *buffer, | |||
2583 | u64 save_timestamp; | 2525 | u64 save_timestamp; |
2584 | int ret = -1; | 2526 | int ret = -1; |
2585 | 2527 | ||
2586 | get_online_cpus(); | ||
2587 | |||
2588 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 2528 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
2589 | goto out; | 2529 | goto out; |
2590 | 2530 | ||
@@ -2681,8 +2621,6 @@ int ring_buffer_read_page(struct ring_buffer *buffer, | |||
2681 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 2621 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
2682 | 2622 | ||
2683 | out: | 2623 | out: |
2684 | put_online_cpus(); | ||
2685 | |||
2686 | return ret; | 2624 | return ret; |
2687 | } | 2625 | } |
2688 | 2626 | ||