aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/Kconfig16
-rw-r--r--kernel/trace/Makefile1
-rw-r--r--kernel/trace/ring_buffer_benchmark.c379
3 files changed, 396 insertions, 0 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 450d3c2cfbd2..50f62a296e1d 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -471,6 +471,22 @@ config MMIOTRACE_TEST
471 471
472 Say N, unless you absolutely know what you are doing. 472 Say N, unless you absolutely know what you are doing.
473 473
474config RING_BUFFER_BENCHMARK
475 tristate "Ring buffer benchmark stress tester"
476 depends on RING_BUFFER
477 help
478 This option creates a test to stress the ring buffer and bench mark it.
479 It creates its own ring buffer such that it will not interfer with
480 any other users of the ring buffer (such as ftrace). It then creates
481 a producer and consumer that will run for 10 seconds and sleep for
482 10 seconds. Each interval it will print out the number of events
483 it recorded and give a rough estimate of how long each iteration took.
484
485 It does not disable interrupts or raise its priority, so it may be
486 affected by processes that are running.
487
488 If unsure, say N
489
474endif # FTRACE 490endif # FTRACE
475 491
476endif # TRACING_SUPPORT 492endif # TRACING_SUPPORT
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index fb9d7f964898..7c34cbfff96e 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -17,6 +17,7 @@ endif
17 17
18obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o 18obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o
19obj-$(CONFIG_RING_BUFFER) += ring_buffer.o 19obj-$(CONFIG_RING_BUFFER) += ring_buffer.o
20obj-$(CONFIG_RING_BUFFER_BENCHMARK) += ring_buffer_benchmark.o
20 21
21obj-$(CONFIG_TRACING) += trace.o 22obj-$(CONFIG_TRACING) += trace.o
22obj-$(CONFIG_TRACING) += trace_clock.o 23obj-$(CONFIG_TRACING) += trace_clock.o
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c
new file mode 100644
index 000000000000..747244acb8fd
--- /dev/null
+++ b/kernel/trace/ring_buffer_benchmark.c
@@ -0,0 +1,379 @@
1/*
2 * ring buffer tester and benchmark
3 *
4 * Copyright (C) 2009 Steven Rostedt <srostedt@redhat.com>
5 */
6#include <linux/ring_buffer.h>
7#include <linux/completion.h>
8#include <linux/kthread.h>
9#include <linux/module.h>
10#include <linux/time.h>
11
12struct rb_page {
13 u64 ts;
14 local_t commit;
15 char data[4080];
16};
17
18/* run time and sleep time in seconds */
19#define RUN_TIME 10
20#define SLEEP_TIME 10
21
22/* number of events for writer to wake up the reader */
23static int wakeup_interval = 100;
24
25static int reader_finish;
26static struct completion read_start;
27static struct completion read_done;
28
29static struct ring_buffer *buffer;
30static struct task_struct *producer;
31static struct task_struct *consumer;
32static unsigned long read;
33
34static int disable_reader;
35module_param(disable_reader, uint, 0644);
36MODULE_PARM_DESC(disable_reader, "only run producer");
37
38static int read_events;
39
40static int kill_test;
41
42#define KILL_TEST() \
43 do { \
44 if (!kill_test) { \
45 kill_test = 1; \
46 WARN_ON(1); \
47 } \
48 } while (0)
49
50enum event_status {
51 EVENT_FOUND,
52 EVENT_DROPPED,
53};
54
55static enum event_status read_event(int cpu)
56{
57 struct ring_buffer_event *event;
58 int *entry;
59 u64 ts;
60
61 event = ring_buffer_consume(buffer, cpu, &ts);
62 if (!event)
63 return EVENT_DROPPED;
64
65 entry = ring_buffer_event_data(event);
66 if (*entry != cpu) {
67 KILL_TEST();
68 return EVENT_DROPPED;
69 }
70
71 read++;
72 return EVENT_FOUND;
73}
74
75static enum event_status read_page(int cpu)
76{
77 struct ring_buffer_event *event;
78 struct rb_page *rpage;
79 unsigned long commit;
80 void *bpage;
81 int *entry;
82 int ret;
83 int inc;
84 int i;
85
86 bpage = ring_buffer_alloc_read_page(buffer);
87 ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1);
88 if (ret >= 0) {
89 rpage = bpage;
90 commit = local_read(&rpage->commit);
91 for (i = 0; i < commit && !kill_test; i += inc) {
92
93 if (i >= (PAGE_SIZE - offsetof(struct rb_page, data))) {
94 KILL_TEST();
95 break;
96 }
97
98 inc = -1;
99 event = (void *)&rpage->data[i];
100 switch (event->type_len) {
101 case RINGBUF_TYPE_PADDING:
102 /* We don't expect any padding */
103 KILL_TEST();
104 break;
105 case RINGBUF_TYPE_TIME_EXTEND:
106 inc = 8;
107 break;
108 case 0:
109 entry = ring_buffer_event_data(event);
110 if (*entry != cpu) {
111 KILL_TEST();
112 break;
113 }
114 read++;
115 if (!event->array[0]) {
116 KILL_TEST();
117 break;
118 }
119 inc = event->array[0];
120 break;
121 default:
122 entry = ring_buffer_event_data(event);
123 if (*entry != cpu) {
124 KILL_TEST();
125 break;
126 }
127 read++;
128 inc = ((event->type_len + 1) * 4);
129 }
130 if (kill_test)
131 break;
132
133 if (inc <= 0) {
134 KILL_TEST();
135 break;
136 }
137 }
138 }
139 ring_buffer_free_read_page(buffer, bpage);
140
141 if (ret < 0)
142 return EVENT_DROPPED;
143 return EVENT_FOUND;
144}
145
146static void ring_buffer_consumer(void)
147{
148 /* toggle between reading pages and events */
149 read_events ^= 1;
150
151 read = 0;
152 while (!reader_finish && !kill_test) {
153 int found;
154
155 do {
156 int cpu;
157
158 found = 0;
159 for_each_online_cpu(cpu) {
160 enum event_status stat;
161
162 if (read_events)
163 stat = read_event(cpu);
164 else
165 stat = read_page(cpu);
166
167 if (kill_test)
168 break;
169 if (stat == EVENT_FOUND)
170 found = 1;
171 }
172 } while (found && !kill_test);
173
174 set_current_state(TASK_INTERRUPTIBLE);
175 if (reader_finish)
176 break;
177
178 schedule();
179 __set_current_state(TASK_RUNNING);
180 }
181 reader_finish = 0;
182 complete(&read_done);
183}
184
185static void ring_buffer_producer(void)
186{
187 struct timeval start_tv;
188 struct timeval end_tv;
189 unsigned long long time;
190 unsigned long long entries;
191 unsigned long long overruns;
192 unsigned long missed = 0;
193 unsigned long hit = 0;
194 unsigned long avg;
195 int cnt = 0;
196
197 /*
198 * Hammer the buffer for 10 secs (this may
199 * make the system stall)
200 */
201 pr_info("Starting ring buffer hammer\n");
202 do_gettimeofday(&start_tv);
203 do {
204 struct ring_buffer_event *event;
205 int *entry;
206
207 event = ring_buffer_lock_reserve(buffer, 10);
208 if (!event) {
209 missed++;
210 } else {
211 hit++;
212 entry = ring_buffer_event_data(event);
213 *entry = smp_processor_id();
214 ring_buffer_unlock_commit(buffer, event);
215 }
216 do_gettimeofday(&end_tv);
217
218 if (consumer && !(++cnt % wakeup_interval))
219 wake_up_process(consumer);
220
221 } while (end_tv.tv_sec < (start_tv.tv_sec + RUN_TIME) && !kill_test);
222 pr_info("End ring buffer hammer\n");
223
224 if (consumer) {
225 /* Init both completions here to avoid races */
226 init_completion(&read_start);
227 init_completion(&read_done);
228 /* the completions must be visible before the finish var */
229 smp_wmb();
230 reader_finish = 1;
231 /* finish var visible before waking up the consumer */
232 smp_wmb();
233 wake_up_process(consumer);
234 wait_for_completion(&read_done);
235 }
236
237 time = end_tv.tv_sec - start_tv.tv_sec;
238 time *= 1000000;
239 time += (long long)((long)end_tv.tv_usec - (long)start_tv.tv_usec);
240
241 entries = ring_buffer_entries(buffer);
242 overruns = ring_buffer_overruns(buffer);
243
244 if (kill_test)
245 pr_info("ERROR!\n");
246 pr_info("Time: %lld (usecs)\n", time);
247 pr_info("Overruns: %lld\n", overruns);
248 if (disable_reader)
249 pr_info("Read: (reader disabled)\n");
250 else
251 pr_info("Read: %ld (by %s)\n", read,
252 read_events ? "events" : "pages");
253 pr_info("Entries: %lld\n", entries);
254 pr_info("Total: %lld\n", entries + overruns + read);
255 pr_info("Missed: %ld\n", missed);
256 pr_info("Hit: %ld\n", hit);
257
258 do_div(time, 1000);
259 if (time)
260 hit /= (long)time;
261 else
262 pr_info("TIME IS ZERO??\n");
263
264 pr_info("Entries per millisec: %ld\n", hit);
265
266 if (hit) {
267 avg = 1000000 / hit;
268 pr_info("%ld ns per entry\n", avg);
269 }
270}
271
272static void wait_to_die(void)
273{
274 set_current_state(TASK_INTERRUPTIBLE);
275 while (!kthread_should_stop()) {
276 schedule();
277 set_current_state(TASK_INTERRUPTIBLE);
278 }
279 __set_current_state(TASK_RUNNING);
280}
281
282static int ring_buffer_consumer_thread(void *arg)
283{
284 while (!kthread_should_stop() && !kill_test) {
285 complete(&read_start);
286
287 ring_buffer_consumer();
288
289 set_current_state(TASK_INTERRUPTIBLE);
290 if (kthread_should_stop() || kill_test)
291 break;
292
293 schedule();
294 __set_current_state(TASK_RUNNING);
295 }
296 __set_current_state(TASK_RUNNING);
297
298 if (kill_test)
299 wait_to_die();
300
301 return 0;
302}
303
304static int ring_buffer_producer_thread(void *arg)
305{
306 init_completion(&read_start);
307
308 while (!kthread_should_stop() && !kill_test) {
309 ring_buffer_reset(buffer);
310
311 if (consumer) {
312 smp_wmb();
313 wake_up_process(consumer);
314 wait_for_completion(&read_start);
315 }
316
317 ring_buffer_producer();
318
319 pr_info("Sleeping for 10 secs\n");
320 set_current_state(TASK_INTERRUPTIBLE);
321 schedule_timeout(HZ * SLEEP_TIME);
322 __set_current_state(TASK_RUNNING);
323 }
324
325 if (kill_test)
326 wait_to_die();
327
328 return 0;
329}
330
331static int __init ring_buffer_benchmark_init(void)
332{
333 int ret;
334
335 /* make a one meg buffer in overwite mode */
336 buffer = ring_buffer_alloc(1000000, RB_FL_OVERWRITE);
337 if (!buffer)
338 return -ENOMEM;
339
340 if (!disable_reader) {
341 consumer = kthread_create(ring_buffer_consumer_thread,
342 NULL, "rb_consumer");
343 ret = PTR_ERR(consumer);
344 if (IS_ERR(consumer))
345 goto out_fail;
346 }
347
348 producer = kthread_run(ring_buffer_producer_thread,
349 NULL, "rb_producer");
350 ret = PTR_ERR(producer);
351
352 if (IS_ERR(producer))
353 goto out_kill;
354
355 return 0;
356
357 out_kill:
358 if (consumer)
359 kthread_stop(consumer);
360
361 out_fail:
362 ring_buffer_free(buffer);
363 return ret;
364}
365
366static void __exit ring_buffer_benchmark_exit(void)
367{
368 kthread_stop(producer);
369 if (consumer)
370 kthread_stop(consumer);
371 ring_buffer_free(buffer);
372}
373
374module_init(ring_buffer_benchmark_init);
375module_exit(ring_buffer_benchmark_exit);
376
377MODULE_AUTHOR("Steven Rostedt");
378MODULE_DESCRIPTION("ring_buffer_benchmark");
379MODULE_LICENSE("GPL");