diff options
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r-- | kernel/trace/ring_buffer.c | 38 |
1 files changed, 8 insertions, 30 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 7f6059c5aa94..c3d3cd9c2a53 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -2234,8 +2234,6 @@ static void trace_recursive_unlock(void) | |||
2234 | 2234 | ||
2235 | #endif | 2235 | #endif |
2236 | 2236 | ||
2237 | static DEFINE_PER_CPU(int, rb_need_resched); | ||
2238 | |||
2239 | /** | 2237 | /** |
2240 | * ring_buffer_lock_reserve - reserve a part of the buffer | 2238 | * ring_buffer_lock_reserve - reserve a part of the buffer |
2241 | * @buffer: the ring buffer to reserve from | 2239 | * @buffer: the ring buffer to reserve from |
@@ -2256,13 +2254,13 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length) | |||
2256 | { | 2254 | { |
2257 | struct ring_buffer_per_cpu *cpu_buffer; | 2255 | struct ring_buffer_per_cpu *cpu_buffer; |
2258 | struct ring_buffer_event *event; | 2256 | struct ring_buffer_event *event; |
2259 | int cpu, resched; | 2257 | int cpu; |
2260 | 2258 | ||
2261 | if (ring_buffer_flags != RB_BUFFERS_ON) | 2259 | if (ring_buffer_flags != RB_BUFFERS_ON) |
2262 | return NULL; | 2260 | return NULL; |
2263 | 2261 | ||
2264 | /* If we are tracing schedule, we don't want to recurse */ | 2262 | /* If we are tracing schedule, we don't want to recurse */ |
2265 | resched = ftrace_preempt_disable(); | 2263 | preempt_disable_notrace(); |
2266 | 2264 | ||
2267 | if (atomic_read(&buffer->record_disabled)) | 2265 | if (atomic_read(&buffer->record_disabled)) |
2268 | goto out_nocheck; | 2266 | goto out_nocheck; |
@@ -2287,21 +2285,13 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length) | |||
2287 | if (!event) | 2285 | if (!event) |
2288 | goto out; | 2286 | goto out; |
2289 | 2287 | ||
2290 | /* | ||
2291 | * Need to store resched state on this cpu. | ||
2292 | * Only the first needs to. | ||
2293 | */ | ||
2294 | |||
2295 | if (preempt_count() == 1) | ||
2296 | per_cpu(rb_need_resched, cpu) = resched; | ||
2297 | |||
2298 | return event; | 2288 | return event; |
2299 | 2289 | ||
2300 | out: | 2290 | out: |
2301 | trace_recursive_unlock(); | 2291 | trace_recursive_unlock(); |
2302 | 2292 | ||
2303 | out_nocheck: | 2293 | out_nocheck: |
2304 | ftrace_preempt_enable(resched); | 2294 | preempt_enable_notrace(); |
2305 | return NULL; | 2295 | return NULL; |
2306 | } | 2296 | } |
2307 | EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve); | 2297 | EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve); |
@@ -2347,13 +2337,7 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer, | |||
2347 | 2337 | ||
2348 | trace_recursive_unlock(); | 2338 | trace_recursive_unlock(); |
2349 | 2339 | ||
2350 | /* | 2340 | preempt_enable_notrace(); |
2351 | * Only the last preempt count needs to restore preemption. | ||
2352 | */ | ||
2353 | if (preempt_count() == 1) | ||
2354 | ftrace_preempt_enable(per_cpu(rb_need_resched, cpu)); | ||
2355 | else | ||
2356 | preempt_enable_no_resched_notrace(); | ||
2357 | 2341 | ||
2358 | return 0; | 2342 | return 0; |
2359 | } | 2343 | } |
@@ -2461,13 +2445,7 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer, | |||
2461 | 2445 | ||
2462 | trace_recursive_unlock(); | 2446 | trace_recursive_unlock(); |
2463 | 2447 | ||
2464 | /* | 2448 | preempt_enable_notrace(); |
2465 | * Only the last preempt count needs to restore preemption. | ||
2466 | */ | ||
2467 | if (preempt_count() == 1) | ||
2468 | ftrace_preempt_enable(per_cpu(rb_need_resched, cpu)); | ||
2469 | else | ||
2470 | preempt_enable_no_resched_notrace(); | ||
2471 | 2449 | ||
2472 | } | 2450 | } |
2473 | EXPORT_SYMBOL_GPL(ring_buffer_discard_commit); | 2451 | EXPORT_SYMBOL_GPL(ring_buffer_discard_commit); |
@@ -2493,12 +2471,12 @@ int ring_buffer_write(struct ring_buffer *buffer, | |||
2493 | struct ring_buffer_event *event; | 2471 | struct ring_buffer_event *event; |
2494 | void *body; | 2472 | void *body; |
2495 | int ret = -EBUSY; | 2473 | int ret = -EBUSY; |
2496 | int cpu, resched; | 2474 | int cpu; |
2497 | 2475 | ||
2498 | if (ring_buffer_flags != RB_BUFFERS_ON) | 2476 | if (ring_buffer_flags != RB_BUFFERS_ON) |
2499 | return -EBUSY; | 2477 | return -EBUSY; |
2500 | 2478 | ||
2501 | resched = ftrace_preempt_disable(); | 2479 | preempt_disable_notrace(); |
2502 | 2480 | ||
2503 | if (atomic_read(&buffer->record_disabled)) | 2481 | if (atomic_read(&buffer->record_disabled)) |
2504 | goto out; | 2482 | goto out; |
@@ -2528,7 +2506,7 @@ int ring_buffer_write(struct ring_buffer *buffer, | |||
2528 | 2506 | ||
2529 | ret = 0; | 2507 | ret = 0; |
2530 | out: | 2508 | out: |
2531 | ftrace_preempt_enable(resched); | 2509 | preempt_enable_notrace(); |
2532 | 2510 | ||
2533 | return ret; | 2511 | return ret; |
2534 | } | 2512 | } |