diff options
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r-- | kernel/trace/ring_buffer.c | 39 |
1 files changed, 25 insertions, 14 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 0287f9f52f5a..41ca394feb22 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/percpu.h> | 15 | #include <linux/percpu.h> |
16 | #include <linux/mutex.h> | 16 | #include <linux/mutex.h> |
17 | #include <linux/slab.h> | ||
17 | #include <linux/init.h> | 18 | #include <linux/init.h> |
18 | #include <linux/hash.h> | 19 | #include <linux/hash.h> |
19 | #include <linux/list.h> | 20 | #include <linux/list.h> |
@@ -207,6 +208,14 @@ EXPORT_SYMBOL_GPL(tracing_is_on); | |||
207 | #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) | 208 | #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) |
208 | #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */ | 209 | #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */ |
209 | 210 | ||
211 | #if !defined(CONFIG_64BIT) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) | ||
212 | # define RB_FORCE_8BYTE_ALIGNMENT 0 | ||
213 | # define RB_ARCH_ALIGNMENT RB_ALIGNMENT | ||
214 | #else | ||
215 | # define RB_FORCE_8BYTE_ALIGNMENT 1 | ||
216 | # define RB_ARCH_ALIGNMENT 8U | ||
217 | #endif | ||
218 | |||
210 | /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */ | 219 | /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */ |
211 | #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX | 220 | #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX |
212 | 221 | ||
@@ -1201,18 +1210,19 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | |||
1201 | 1210 | ||
1202 | for (i = 0; i < nr_pages; i++) { | 1211 | for (i = 0; i < nr_pages; i++) { |
1203 | if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages))) | 1212 | if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages))) |
1204 | return; | 1213 | goto out; |
1205 | p = cpu_buffer->pages->next; | 1214 | p = cpu_buffer->pages->next; |
1206 | bpage = list_entry(p, struct buffer_page, list); | 1215 | bpage = list_entry(p, struct buffer_page, list); |
1207 | list_del_init(&bpage->list); | 1216 | list_del_init(&bpage->list); |
1208 | free_buffer_page(bpage); | 1217 | free_buffer_page(bpage); |
1209 | } | 1218 | } |
1210 | if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages))) | 1219 | if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages))) |
1211 | return; | 1220 | goto out; |
1212 | 1221 | ||
1213 | rb_reset_cpu(cpu_buffer); | 1222 | rb_reset_cpu(cpu_buffer); |
1214 | rb_check_pages(cpu_buffer); | 1223 | rb_check_pages(cpu_buffer); |
1215 | 1224 | ||
1225 | out: | ||
1216 | spin_unlock_irq(&cpu_buffer->reader_lock); | 1226 | spin_unlock_irq(&cpu_buffer->reader_lock); |
1217 | } | 1227 | } |
1218 | 1228 | ||
@@ -1229,7 +1239,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
1229 | 1239 | ||
1230 | for (i = 0; i < nr_pages; i++) { | 1240 | for (i = 0; i < nr_pages; i++) { |
1231 | if (RB_WARN_ON(cpu_buffer, list_empty(pages))) | 1241 | if (RB_WARN_ON(cpu_buffer, list_empty(pages))) |
1232 | return; | 1242 | goto out; |
1233 | p = pages->next; | 1243 | p = pages->next; |
1234 | bpage = list_entry(p, struct buffer_page, list); | 1244 | bpage = list_entry(p, struct buffer_page, list); |
1235 | list_del_init(&bpage->list); | 1245 | list_del_init(&bpage->list); |
@@ -1238,6 +1248,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
1238 | rb_reset_cpu(cpu_buffer); | 1248 | rb_reset_cpu(cpu_buffer); |
1239 | rb_check_pages(cpu_buffer); | 1249 | rb_check_pages(cpu_buffer); |
1240 | 1250 | ||
1251 | out: | ||
1241 | spin_unlock_irq(&cpu_buffer->reader_lock); | 1252 | spin_unlock_irq(&cpu_buffer->reader_lock); |
1242 | } | 1253 | } |
1243 | 1254 | ||
@@ -1547,7 +1558,7 @@ rb_update_event(struct ring_buffer_event *event, | |||
1547 | 1558 | ||
1548 | case 0: | 1559 | case 0: |
1549 | length -= RB_EVNT_HDR_SIZE; | 1560 | length -= RB_EVNT_HDR_SIZE; |
1550 | if (length > RB_MAX_SMALL_DATA) | 1561 | if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) |
1551 | event->array[0] = length; | 1562 | event->array[0] = length; |
1552 | else | 1563 | else |
1553 | event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); | 1564 | event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); |
@@ -1722,11 +1733,11 @@ static unsigned rb_calculate_event_length(unsigned length) | |||
1722 | if (!length) | 1733 | if (!length) |
1723 | length = 1; | 1734 | length = 1; |
1724 | 1735 | ||
1725 | if (length > RB_MAX_SMALL_DATA) | 1736 | if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) |
1726 | length += sizeof(event.array[0]); | 1737 | length += sizeof(event.array[0]); |
1727 | 1738 | ||
1728 | length += RB_EVNT_HDR_SIZE; | 1739 | length += RB_EVNT_HDR_SIZE; |
1729 | length = ALIGN(length, RB_ALIGNMENT); | 1740 | length = ALIGN(length, RB_ARCH_ALIGNMENT); |
1730 | 1741 | ||
1731 | return length; | 1742 | return length; |
1732 | } | 1743 | } |
@@ -2233,12 +2244,12 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length) | |||
2233 | if (ring_buffer_flags != RB_BUFFERS_ON) | 2244 | if (ring_buffer_flags != RB_BUFFERS_ON) |
2234 | return NULL; | 2245 | return NULL; |
2235 | 2246 | ||
2236 | if (atomic_read(&buffer->record_disabled)) | ||
2237 | return NULL; | ||
2238 | |||
2239 | /* If we are tracing schedule, we don't want to recurse */ | 2247 | /* If we are tracing schedule, we don't want to recurse */ |
2240 | resched = ftrace_preempt_disable(); | 2248 | resched = ftrace_preempt_disable(); |
2241 | 2249 | ||
2250 | if (atomic_read(&buffer->record_disabled)) | ||
2251 | goto out_nocheck; | ||
2252 | |||
2242 | if (trace_recursive_lock()) | 2253 | if (trace_recursive_lock()) |
2243 | goto out_nocheck; | 2254 | goto out_nocheck; |
2244 | 2255 | ||
@@ -2470,11 +2481,11 @@ int ring_buffer_write(struct ring_buffer *buffer, | |||
2470 | if (ring_buffer_flags != RB_BUFFERS_ON) | 2481 | if (ring_buffer_flags != RB_BUFFERS_ON) |
2471 | return -EBUSY; | 2482 | return -EBUSY; |
2472 | 2483 | ||
2473 | if (atomic_read(&buffer->record_disabled)) | ||
2474 | return -EBUSY; | ||
2475 | |||
2476 | resched = ftrace_preempt_disable(); | 2484 | resched = ftrace_preempt_disable(); |
2477 | 2485 | ||
2486 | if (atomic_read(&buffer->record_disabled)) | ||
2487 | goto out; | ||
2488 | |||
2478 | cpu = raw_smp_processor_id(); | 2489 | cpu = raw_smp_processor_id(); |
2479 | 2490 | ||
2480 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 2491 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
@@ -2542,7 +2553,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_disable); | |||
2542 | * @buffer: The ring buffer to enable writes | 2553 | * @buffer: The ring buffer to enable writes |
2543 | * | 2554 | * |
2544 | * Note, multiple disables will need the same number of enables | 2555 | * Note, multiple disables will need the same number of enables |
2545 | * to truely enable the writing (much like preempt_disable). | 2556 | * to truly enable the writing (much like preempt_disable). |
2546 | */ | 2557 | */ |
2547 | void ring_buffer_record_enable(struct ring_buffer *buffer) | 2558 | void ring_buffer_record_enable(struct ring_buffer *buffer) |
2548 | { | 2559 | { |
@@ -2578,7 +2589,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu); | |||
2578 | * @cpu: The CPU to enable. | 2589 | * @cpu: The CPU to enable. |
2579 | * | 2590 | * |
2580 | * Note, multiple disables will need the same number of enables | 2591 | * Note, multiple disables will need the same number of enables |
2581 | * to truely enable the writing (much like preempt_disable). | 2592 | * to truly enable the writing (much like preempt_disable). |
2582 | */ | 2593 | */ |
2583 | void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) | 2594 | void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) |
2584 | { | 2595 | { |