aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
authorSteven Rostedt (Red Hat) <rostedt@goodmis.org>2015-05-28 13:21:34 -0400
committerSteven Rostedt <rostedt@goodmis.org>2015-05-28 16:47:39 -0400
commit3c6296f716ebef704b76070d90567ab4faa8462c (patch)
treefc9a587239395d5dc882c8f17756ba6aa29ba838 /kernel/trace/ring_buffer.c
parent289a5a25c5d0949e4d9b9ca4f334b6af5a83e090 (diff)
ring-buffer: Remove useless unused tracing_off_permanent()
The tracing_off_permanent() call is a way to disable all ring_buffers. Nothing uses it and nothing should use it, as tracing_off() and friends are better, as they disable the ring buffers related to tracing. The tracing_off_permanent() even disabled non tracing ring buffers. This is a bit drastic, and was added to handle NMIs doing outputs that could corrupt the ring buffer when only tracing used them. It is now obsolete and adds a little overhead, it should be removed. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c61
1 files changed, 0 insertions, 61 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index e9420fdc7409..0fc5add6423b 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -115,63 +115,11 @@ int ring_buffer_print_entry_header(struct trace_seq *s)
115 * 115 *
116 */ 116 */
117 117
118/*
119 * A fast way to enable or disable all ring buffers is to
120 * call tracing_on or tracing_off. Turning off the ring buffers
121 * prevents all ring buffers from being recorded to.
122 * Turning this switch on, makes it OK to write to the
123 * ring buffer, if the ring buffer is enabled itself.
124 *
125 * There's three layers that must be on in order to write
126 * to the ring buffer.
127 *
128 * 1) This global flag must be set.
129 * 2) The ring buffer must be enabled for recording.
130 * 3) The per cpu buffer must be enabled for recording.
131 *
132 * In case of an anomaly, this global flag has a bit set that
133 * will permantly disable all ring buffers.
134 */
135
136/*
137 * Global flag to disable all recording to ring buffers
138 * This has two bits: ON, DISABLED
139 *
140 * ON DISABLED
141 * ---- ----------
142 * 0 0 : ring buffers are off
143 * 1 0 : ring buffers are on
144 * X 1 : ring buffers are permanently disabled
145 */
146
147enum {
148 RB_BUFFERS_ON_BIT = 0,
149 RB_BUFFERS_DISABLED_BIT = 1,
150};
151
152enum {
153 RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
154 RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
155};
156
157static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
158
159/* Used for individual buffers (after the counter) */ 118/* Used for individual buffers (after the counter) */
160#define RB_BUFFER_OFF (1 << 20) 119#define RB_BUFFER_OFF (1 << 20)
161 120
162#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data) 121#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
163 122
164/**
165 * tracing_off_permanent - permanently disable ring buffers
166 *
167 * This function, once called, will disable all ring buffers
168 * permanently.
169 */
170void tracing_off_permanent(void)
171{
172 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
173}
174
175#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) 123#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
176#define RB_ALIGNMENT 4U 124#define RB_ALIGNMENT 4U
177#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 125#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
@@ -2728,9 +2676,6 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
2728 struct ring_buffer_event *event; 2676 struct ring_buffer_event *event;
2729 int cpu; 2677 int cpu;
2730 2678
2731 if (ring_buffer_flags != RB_BUFFERS_ON)
2732 return NULL;
2733
2734 /* If we are tracing schedule, we don't want to recurse */ 2679 /* If we are tracing schedule, we don't want to recurse */
2735 preempt_disable_notrace(); 2680 preempt_disable_notrace();
2736 2681
@@ -2992,9 +2937,6 @@ int ring_buffer_write(struct ring_buffer *buffer,
2992 int ret = -EBUSY; 2937 int ret = -EBUSY;
2993 int cpu; 2938 int cpu;
2994 2939
2995 if (ring_buffer_flags != RB_BUFFERS_ON)
2996 return -EBUSY;
2997
2998 preempt_disable_notrace(); 2940 preempt_disable_notrace();
2999 2941
3000 if (atomic_read(&buffer->record_disabled)) 2942 if (atomic_read(&buffer->record_disabled))
@@ -4350,9 +4292,6 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
4350 4292
4351 ret = -EAGAIN; 4293 ret = -EAGAIN;
4352 4294
4353 if (ring_buffer_flags != RB_BUFFERS_ON)
4354 goto out;
4355
4356 if (atomic_read(&buffer_a->record_disabled)) 4295 if (atomic_read(&buffer_a->record_disabled))
4357 goto out; 4296 goto out;
4358 4297