diff options
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r-- | kernel/trace/ring_buffer.c | 31 |
1 files changed, 20 insertions, 11 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 8c1b2d290718..d1187ef20caf 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/cpu.h> | 20 | #include <linux/cpu.h> |
21 | #include <linux/fs.h> | 21 | #include <linux/fs.h> |
22 | 22 | ||
23 | #include <asm/local.h> | ||
23 | #include "trace.h" | 24 | #include "trace.h" |
24 | 25 | ||
25 | /* | 26 | /* |
@@ -206,6 +207,14 @@ EXPORT_SYMBOL_GPL(tracing_is_on); | |||
206 | #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) | 207 | #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) |
207 | #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */ | 208 | #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */ |
208 | 209 | ||
210 | #if !defined(CONFIG_64BIT) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) | ||
211 | # define RB_FORCE_8BYTE_ALIGNMENT 0 | ||
212 | # define RB_ARCH_ALIGNMENT RB_ALIGNMENT | ||
213 | #else | ||
214 | # define RB_FORCE_8BYTE_ALIGNMENT 1 | ||
215 | # define RB_ARCH_ALIGNMENT 8U | ||
216 | #endif | ||
217 | |||
209 | /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */ | 218 | /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */ |
210 | #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX | 219 | #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX |
211 | 220 | ||
@@ -1546,7 +1555,7 @@ rb_update_event(struct ring_buffer_event *event, | |||
1546 | 1555 | ||
1547 | case 0: | 1556 | case 0: |
1548 | length -= RB_EVNT_HDR_SIZE; | 1557 | length -= RB_EVNT_HDR_SIZE; |
1549 | if (length > RB_MAX_SMALL_DATA) | 1558 | if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) |
1550 | event->array[0] = length; | 1559 | event->array[0] = length; |
1551 | else | 1560 | else |
1552 | event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); | 1561 | event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); |
@@ -1721,11 +1730,11 @@ static unsigned rb_calculate_event_length(unsigned length) | |||
1721 | if (!length) | 1730 | if (!length) |
1722 | length = 1; | 1731 | length = 1; |
1723 | 1732 | ||
1724 | if (length > RB_MAX_SMALL_DATA) | 1733 | if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) |
1725 | length += sizeof(event.array[0]); | 1734 | length += sizeof(event.array[0]); |
1726 | 1735 | ||
1727 | length += RB_EVNT_HDR_SIZE; | 1736 | length += RB_EVNT_HDR_SIZE; |
1728 | length = ALIGN(length, RB_ALIGNMENT); | 1737 | length = ALIGN(length, RB_ARCH_ALIGNMENT); |
1729 | 1738 | ||
1730 | return length; | 1739 | return length; |
1731 | } | 1740 | } |
@@ -2232,12 +2241,12 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length) | |||
2232 | if (ring_buffer_flags != RB_BUFFERS_ON) | 2241 | if (ring_buffer_flags != RB_BUFFERS_ON) |
2233 | return NULL; | 2242 | return NULL; |
2234 | 2243 | ||
2235 | if (atomic_read(&buffer->record_disabled)) | ||
2236 | return NULL; | ||
2237 | |||
2238 | /* If we are tracing schedule, we don't want to recurse */ | 2244 | /* If we are tracing schedule, we don't want to recurse */ |
2239 | resched = ftrace_preempt_disable(); | 2245 | resched = ftrace_preempt_disable(); |
2240 | 2246 | ||
2247 | if (atomic_read(&buffer->record_disabled)) | ||
2248 | goto out_nocheck; | ||
2249 | |||
2241 | if (trace_recursive_lock()) | 2250 | if (trace_recursive_lock()) |
2242 | goto out_nocheck; | 2251 | goto out_nocheck; |
2243 | 2252 | ||
@@ -2469,11 +2478,11 @@ int ring_buffer_write(struct ring_buffer *buffer, | |||
2469 | if (ring_buffer_flags != RB_BUFFERS_ON) | 2478 | if (ring_buffer_flags != RB_BUFFERS_ON) |
2470 | return -EBUSY; | 2479 | return -EBUSY; |
2471 | 2480 | ||
2472 | if (atomic_read(&buffer->record_disabled)) | ||
2473 | return -EBUSY; | ||
2474 | |||
2475 | resched = ftrace_preempt_disable(); | 2481 | resched = ftrace_preempt_disable(); |
2476 | 2482 | ||
2483 | if (atomic_read(&buffer->record_disabled)) | ||
2484 | goto out; | ||
2485 | |||
2477 | cpu = raw_smp_processor_id(); | 2486 | cpu = raw_smp_processor_id(); |
2478 | 2487 | ||
2479 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 2488 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
@@ -2541,7 +2550,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_disable); | |||
2541 | * @buffer: The ring buffer to enable writes | 2550 | * @buffer: The ring buffer to enable writes |
2542 | * | 2551 | * |
2543 | * Note, multiple disables will need the same number of enables | 2552 | * Note, multiple disables will need the same number of enables |
2544 | * to truely enable the writing (much like preempt_disable). | 2553 | * to truly enable the writing (much like preempt_disable). |
2545 | */ | 2554 | */ |
2546 | void ring_buffer_record_enable(struct ring_buffer *buffer) | 2555 | void ring_buffer_record_enable(struct ring_buffer *buffer) |
2547 | { | 2556 | { |
@@ -2577,7 +2586,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu); | |||
2577 | * @cpu: The CPU to enable. | 2586 | * @cpu: The CPU to enable. |
2578 | * | 2587 | * |
2579 | * Note, multiple disables will need the same number of enables | 2588 | * Note, multiple disables will need the same number of enables |
2580 | * to truely enable the writing (much like preempt_disable). | 2589 | * to truly enable the writing (much like preempt_disable). |
2581 | */ | 2590 | */ |
2582 | void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) | 2591 | void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) |
2583 | { | 2592 | { |