aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c40
1 files changed, 26 insertions, 14 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 8c1b2d29071..41ca394feb2 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -14,12 +14,14 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/percpu.h> 15#include <linux/percpu.h>
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17#include <linux/slab.h>
17#include <linux/init.h> 18#include <linux/init.h>
18#include <linux/hash.h> 19#include <linux/hash.h>
19#include <linux/list.h> 20#include <linux/list.h>
20#include <linux/cpu.h> 21#include <linux/cpu.h>
21#include <linux/fs.h> 22#include <linux/fs.h>
22 23
24#include <asm/local.h>
23#include "trace.h" 25#include "trace.h"
24 26
25/* 27/*
@@ -206,6 +208,14 @@ EXPORT_SYMBOL_GPL(tracing_is_on);
206#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 208#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
207#define RB_EVNT_MIN_SIZE 8U /* two 32bit words */ 209#define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
208 210
211#if !defined(CONFIG_64BIT) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
212# define RB_FORCE_8BYTE_ALIGNMENT 0
213# define RB_ARCH_ALIGNMENT RB_ALIGNMENT
214#else
215# define RB_FORCE_8BYTE_ALIGNMENT 1
216# define RB_ARCH_ALIGNMENT 8U
217#endif
218
209/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */ 219/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
210#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX 220#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
211 221
@@ -1200,18 +1210,19 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1200 1210
1201 for (i = 0; i < nr_pages; i++) { 1211 for (i = 0; i < nr_pages; i++) {
1202 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages))) 1212 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
1203 return; 1213 goto out;
1204 p = cpu_buffer->pages->next; 1214 p = cpu_buffer->pages->next;
1205 bpage = list_entry(p, struct buffer_page, list); 1215 bpage = list_entry(p, struct buffer_page, list);
1206 list_del_init(&bpage->list); 1216 list_del_init(&bpage->list);
1207 free_buffer_page(bpage); 1217 free_buffer_page(bpage);
1208 } 1218 }
1209 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages))) 1219 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
1210 return; 1220 goto out;
1211 1221
1212 rb_reset_cpu(cpu_buffer); 1222 rb_reset_cpu(cpu_buffer);
1213 rb_check_pages(cpu_buffer); 1223 rb_check_pages(cpu_buffer);
1214 1224
1225out:
1215 spin_unlock_irq(&cpu_buffer->reader_lock); 1226 spin_unlock_irq(&cpu_buffer->reader_lock);
1216} 1227}
1217 1228
@@ -1228,7 +1239,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1228 1239
1229 for (i = 0; i < nr_pages; i++) { 1240 for (i = 0; i < nr_pages; i++) {
1230 if (RB_WARN_ON(cpu_buffer, list_empty(pages))) 1241 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
1231 return; 1242 goto out;
1232 p = pages->next; 1243 p = pages->next;
1233 bpage = list_entry(p, struct buffer_page, list); 1244 bpage = list_entry(p, struct buffer_page, list);
1234 list_del_init(&bpage->list); 1245 list_del_init(&bpage->list);
@@ -1237,6 +1248,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1237 rb_reset_cpu(cpu_buffer); 1248 rb_reset_cpu(cpu_buffer);
1238 rb_check_pages(cpu_buffer); 1249 rb_check_pages(cpu_buffer);
1239 1250
1251out:
1240 spin_unlock_irq(&cpu_buffer->reader_lock); 1252 spin_unlock_irq(&cpu_buffer->reader_lock);
1241} 1253}
1242 1254
@@ -1546,7 +1558,7 @@ rb_update_event(struct ring_buffer_event *event,
1546 1558
1547 case 0: 1559 case 0:
1548 length -= RB_EVNT_HDR_SIZE; 1560 length -= RB_EVNT_HDR_SIZE;
1549 if (length > RB_MAX_SMALL_DATA) 1561 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
1550 event->array[0] = length; 1562 event->array[0] = length;
1551 else 1563 else
1552 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); 1564 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
@@ -1721,11 +1733,11 @@ static unsigned rb_calculate_event_length(unsigned length)
1721 if (!length) 1733 if (!length)
1722 length = 1; 1734 length = 1;
1723 1735
1724 if (length > RB_MAX_SMALL_DATA) 1736 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
1725 length += sizeof(event.array[0]); 1737 length += sizeof(event.array[0]);
1726 1738
1727 length += RB_EVNT_HDR_SIZE; 1739 length += RB_EVNT_HDR_SIZE;
1728 length = ALIGN(length, RB_ALIGNMENT); 1740 length = ALIGN(length, RB_ARCH_ALIGNMENT);
1729 1741
1730 return length; 1742 return length;
1731} 1743}
@@ -2232,12 +2244,12 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
2232 if (ring_buffer_flags != RB_BUFFERS_ON) 2244 if (ring_buffer_flags != RB_BUFFERS_ON)
2233 return NULL; 2245 return NULL;
2234 2246
2235 if (atomic_read(&buffer->record_disabled))
2236 return NULL;
2237
2238 /* If we are tracing schedule, we don't want to recurse */ 2247 /* If we are tracing schedule, we don't want to recurse */
2239 resched = ftrace_preempt_disable(); 2248 resched = ftrace_preempt_disable();
2240 2249
2250 if (atomic_read(&buffer->record_disabled))
2251 goto out_nocheck;
2252
2241 if (trace_recursive_lock()) 2253 if (trace_recursive_lock())
2242 goto out_nocheck; 2254 goto out_nocheck;
2243 2255
@@ -2469,11 +2481,11 @@ int ring_buffer_write(struct ring_buffer *buffer,
2469 if (ring_buffer_flags != RB_BUFFERS_ON) 2481 if (ring_buffer_flags != RB_BUFFERS_ON)
2470 return -EBUSY; 2482 return -EBUSY;
2471 2483
2472 if (atomic_read(&buffer->record_disabled))
2473 return -EBUSY;
2474
2475 resched = ftrace_preempt_disable(); 2484 resched = ftrace_preempt_disable();
2476 2485
2486 if (atomic_read(&buffer->record_disabled))
2487 goto out;
2488
2477 cpu = raw_smp_processor_id(); 2489 cpu = raw_smp_processor_id();
2478 2490
2479 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2491 if (!cpumask_test_cpu(cpu, buffer->cpumask))
@@ -2541,7 +2553,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
2541 * @buffer: The ring buffer to enable writes 2553 * @buffer: The ring buffer to enable writes
2542 * 2554 *
2543 * Note, multiple disables will need the same number of enables 2555 * Note, multiple disables will need the same number of enables
2544 * to truely enable the writing (much like preempt_disable). 2556 * to truly enable the writing (much like preempt_disable).
2545 */ 2557 */
2546void ring_buffer_record_enable(struct ring_buffer *buffer) 2558void ring_buffer_record_enable(struct ring_buffer *buffer)
2547{ 2559{
@@ -2577,7 +2589,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
2577 * @cpu: The CPU to enable. 2589 * @cpu: The CPU to enable.
2578 * 2590 *
2579 * Note, multiple disables will need the same number of enables 2591 * Note, multiple disables will need the same number of enables
2580 * to truely enable the writing (much like preempt_disable). 2592 * to truly enable the writing (much like preempt_disable).
2581 */ 2593 */
2582void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) 2594void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
2583{ 2595{