diff options
Diffstat (limited to 'kernel/trace/trace_selftest.c')
-rw-r--r-- | kernel/trace/trace_selftest.c | 101 |
1 files changed, 31 insertions, 70 deletions
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 0911b7e073bf..90bc752a7580 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
@@ -9,65 +9,29 @@ static inline int trace_valid_entry(struct trace_entry *entry) | |||
9 | case TRACE_FN: | 9 | case TRACE_FN: |
10 | case TRACE_CTX: | 10 | case TRACE_CTX: |
11 | case TRACE_WAKE: | 11 | case TRACE_WAKE: |
12 | case TRACE_CONT: | ||
12 | case TRACE_STACK: | 13 | case TRACE_STACK: |
14 | case TRACE_PRINT: | ||
13 | case TRACE_SPECIAL: | 15 | case TRACE_SPECIAL: |
14 | return 1; | 16 | return 1; |
15 | } | 17 | } |
16 | return 0; | 18 | return 0; |
17 | } | 19 | } |
18 | 20 | ||
19 | static int | 21 | static int trace_test_buffer_cpu(struct trace_array *tr, int cpu) |
20 | trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data) | ||
21 | { | 22 | { |
22 | struct trace_entry *entries; | 23 | struct ring_buffer_event *event; |
23 | struct page *page; | 24 | struct trace_entry *entry; |
24 | int idx = 0; | ||
25 | int i; | ||
26 | 25 | ||
27 | BUG_ON(list_empty(&data->trace_pages)); | 26 | while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) { |
28 | page = list_entry(data->trace_pages.next, struct page, lru); | 27 | entry = ring_buffer_event_data(event); |
29 | entries = page_address(page); | ||
30 | 28 | ||
31 | check_pages(data); | 29 | if (!trace_valid_entry(entry)) { |
32 | if (head_page(data) != entries) | ||
33 | goto failed; | ||
34 | |||
35 | /* | ||
36 | * The starting trace buffer always has valid elements, | ||
37 | * if any element exists. | ||
38 | */ | ||
39 | entries = head_page(data); | ||
40 | |||
41 | for (i = 0; i < tr->entries; i++) { | ||
42 | |||
43 | if (i < data->trace_idx && !trace_valid_entry(&entries[idx])) { | ||
44 | printk(KERN_CONT ".. invalid entry %d ", | 30 | printk(KERN_CONT ".. invalid entry %d ", |
45 | entries[idx].type); | 31 | entry->type); |
46 | goto failed; | 32 | goto failed; |
47 | } | 33 | } |
48 | |||
49 | idx++; | ||
50 | if (idx >= ENTRIES_PER_PAGE) { | ||
51 | page = virt_to_page(entries); | ||
52 | if (page->lru.next == &data->trace_pages) { | ||
53 | if (i != tr->entries - 1) { | ||
54 | printk(KERN_CONT ".. entries buffer mismatch"); | ||
55 | goto failed; | ||
56 | } | ||
57 | } else { | ||
58 | page = list_entry(page->lru.next, struct page, lru); | ||
59 | entries = page_address(page); | ||
60 | } | ||
61 | idx = 0; | ||
62 | } | ||
63 | } | ||
64 | |||
65 | page = virt_to_page(entries); | ||
66 | if (page->lru.next != &data->trace_pages) { | ||
67 | printk(KERN_CONT ".. too many entries"); | ||
68 | goto failed; | ||
69 | } | 34 | } |
70 | |||
71 | return 0; | 35 | return 0; |
72 | 36 | ||
73 | failed: | 37 | failed: |
@@ -89,13 +53,11 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |||
89 | /* Don't allow flipping of max traces now */ | 53 | /* Don't allow flipping of max traces now */ |
90 | raw_local_irq_save(flags); | 54 | raw_local_irq_save(flags); |
91 | __raw_spin_lock(&ftrace_max_lock); | 55 | __raw_spin_lock(&ftrace_max_lock); |
92 | for_each_possible_cpu(cpu) { | ||
93 | if (!head_page(tr->data[cpu])) | ||
94 | continue; | ||
95 | 56 | ||
96 | cnt += tr->data[cpu]->trace_idx; | 57 | cnt = ring_buffer_entries(tr->buffer); |
97 | 58 | ||
98 | ret = trace_test_buffer_cpu(tr, tr->data[cpu]); | 59 | for_each_possible_cpu(cpu) { |
60 | ret = trace_test_buffer_cpu(tr, cpu); | ||
99 | if (ret) | 61 | if (ret) |
100 | break; | 62 | break; |
101 | } | 63 | } |
@@ -108,7 +70,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |||
108 | return ret; | 70 | return ret; |
109 | } | 71 | } |
110 | 72 | ||
111 | #ifdef CONFIG_FTRACE | 73 | #ifdef CONFIG_FUNCTION_TRACER |
112 | 74 | ||
113 | #ifdef CONFIG_DYNAMIC_FTRACE | 75 | #ifdef CONFIG_DYNAMIC_FTRACE |
114 | 76 | ||
@@ -120,11 +82,11 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |||
120 | struct trace_array *tr, | 82 | struct trace_array *tr, |
121 | int (*func)(void)) | 83 | int (*func)(void)) |
122 | { | 84 | { |
123 | unsigned long count; | ||
124 | int ret; | ||
125 | int save_ftrace_enabled = ftrace_enabled; | 85 | int save_ftrace_enabled = ftrace_enabled; |
126 | int save_tracer_enabled = tracer_enabled; | 86 | int save_tracer_enabled = tracer_enabled; |
87 | unsigned long count; | ||
127 | char *func_name; | 88 | char *func_name; |
89 | int ret; | ||
128 | 90 | ||
129 | /* The ftrace test PASSED */ | 91 | /* The ftrace test PASSED */ |
130 | printk(KERN_CONT "PASSED\n"); | 92 | printk(KERN_CONT "PASSED\n"); |
@@ -137,13 +99,6 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |||
137 | /* passed in by parameter to fool gcc from optimizing */ | 99 | /* passed in by parameter to fool gcc from optimizing */ |
138 | func(); | 100 | func(); |
139 | 101 | ||
140 | /* update the records */ | ||
141 | ret = ftrace_force_update(); | ||
142 | if (ret) { | ||
143 | printk(KERN_CONT ".. ftraced failed .. "); | ||
144 | return ret; | ||
145 | } | ||
146 | |||
147 | /* | 102 | /* |
148 | * Some archs *cough*PowerPC*cough* add charachters to the | 103 | * Some archs *cough*PowerPC*cough* add charachters to the |
149 | * start of the function names. We simply put a '*' to | 104 | * start of the function names. We simply put a '*' to |
@@ -157,6 +112,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |||
157 | /* enable tracing */ | 112 | /* enable tracing */ |
158 | tr->ctrl = 1; | 113 | tr->ctrl = 1; |
159 | trace->init(tr); | 114 | trace->init(tr); |
115 | |||
160 | /* Sleep for a 1/10 of a second */ | 116 | /* Sleep for a 1/10 of a second */ |
161 | msleep(100); | 117 | msleep(100); |
162 | 118 | ||
@@ -212,21 +168,14 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |||
212 | int | 168 | int |
213 | trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | 169 | trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) |
214 | { | 170 | { |
215 | unsigned long count; | ||
216 | int ret; | ||
217 | int save_ftrace_enabled = ftrace_enabled; | 171 | int save_ftrace_enabled = ftrace_enabled; |
218 | int save_tracer_enabled = tracer_enabled; | 172 | int save_tracer_enabled = tracer_enabled; |
173 | unsigned long count; | ||
174 | int ret; | ||
219 | 175 | ||
220 | /* make sure msleep has been recorded */ | 176 | /* make sure msleep has been recorded */ |
221 | msleep(1); | 177 | msleep(1); |
222 | 178 | ||
223 | /* force the recorded functions to be traced */ | ||
224 | ret = ftrace_force_update(); | ||
225 | if (ret) { | ||
226 | printk(KERN_CONT ".. ftraced failed .. "); | ||
227 | return ret; | ||
228 | } | ||
229 | |||
230 | /* start the tracing */ | 179 | /* start the tracing */ |
231 | ftrace_enabled = 1; | 180 | ftrace_enabled = 1; |
232 | tracer_enabled = 1; | 181 | tracer_enabled = 1; |
@@ -263,7 +212,7 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | |||
263 | 212 | ||
264 | return ret; | 213 | return ret; |
265 | } | 214 | } |
266 | #endif /* CONFIG_FTRACE */ | 215 | #endif /* CONFIG_FUNCTION_TRACER */ |
267 | 216 | ||
268 | #ifdef CONFIG_IRQSOFF_TRACER | 217 | #ifdef CONFIG_IRQSOFF_TRACER |
269 | int | 218 | int |
@@ -415,6 +364,15 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array * | |||
415 | } | 364 | } |
416 | #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */ | 365 | #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */ |
417 | 366 | ||
367 | #ifdef CONFIG_NOP_TRACER | ||
368 | int | ||
369 | trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr) | ||
370 | { | ||
371 | /* What could possibly go wrong? */ | ||
372 | return 0; | ||
373 | } | ||
374 | #endif | ||
375 | |||
418 | #ifdef CONFIG_SCHED_TRACER | 376 | #ifdef CONFIG_SCHED_TRACER |
419 | static int trace_wakeup_test_thread(void *data) | 377 | static int trace_wakeup_test_thread(void *data) |
420 | { | 378 | { |
@@ -486,6 +444,9 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | |||
486 | 444 | ||
487 | wake_up_process(p); | 445 | wake_up_process(p); |
488 | 446 | ||
447 | /* give a little time to let the thread wake up */ | ||
448 | msleep(100); | ||
449 | |||
489 | /* stop the tracing. */ | 450 | /* stop the tracing. */ |
490 | tr->ctrl = 0; | 451 | tr->ctrl = 0; |
491 | trace->ctrl_update(tr); | 452 | trace->ctrl_update(tr); |