diff options
author | Steven Rostedt <rostedt@goodmis.org> | 2008-09-29 23:02:41 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-10-14 04:38:57 -0400 |
commit | 3928a8a2d98081d1bc3c0a84a2d70e29b90ecf1c (patch) | |
tree | 5c1fd6fd721e2e9a5c47facfd50fbc011b3db53a /kernel/trace/trace_selftest.c | |
parent | ed56829cb3195de499f97fa6108fe9134319bae6 (diff) |
ftrace: make work with new ring buffer
This patch ports ftrace over to the new ring buffer.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace_selftest.c')
-rw-r--r-- | kernel/trace/trace_selftest.c | 60 |
1 files changed, 10 insertions, 50 deletions
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 5ebd4b135498..09cf230d7eca 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
@@ -18,58 +18,20 @@ static inline int trace_valid_entry(struct trace_entry *entry) | |||
18 | return 0; | 18 | return 0; |
19 | } | 19 | } |
20 | 20 | ||
21 | static int | 21 | static int trace_test_buffer_cpu(struct trace_array *tr, int cpu) |
22 | trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data) | ||
23 | { | 22 | { |
24 | struct trace_entry *entries; | 23 | struct ring_buffer_event *event; |
25 | struct page *page; | 24 | struct trace_entry *entry; |
26 | int idx = 0; | ||
27 | int i; | ||
28 | 25 | ||
29 | BUG_ON(list_empty(&data->trace_pages)); | 26 | while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) { |
30 | page = list_entry(data->trace_pages.next, struct page, lru); | 27 | entry = ring_buffer_event_data(event); |
31 | entries = page_address(page); | ||
32 | 28 | ||
33 | check_pages(data); | 29 | if (!trace_valid_entry(entry)) { |
34 | if (head_page(data) != entries) | ||
35 | goto failed; | ||
36 | |||
37 | /* | ||
38 | * The starting trace buffer always has valid elements, | ||
39 | * if any element exists. | ||
40 | */ | ||
41 | entries = head_page(data); | ||
42 | |||
43 | for (i = 0; i < tr->entries; i++) { | ||
44 | |||
45 | if (i < data->trace_idx && !trace_valid_entry(&entries[idx])) { | ||
46 | printk(KERN_CONT ".. invalid entry %d ", | 30 | printk(KERN_CONT ".. invalid entry %d ", |
47 | entries[idx].type); | 31 | entry->type); |
48 | goto failed; | 32 | goto failed; |
49 | } | 33 | } |
50 | |||
51 | idx++; | ||
52 | if (idx >= ENTRIES_PER_PAGE) { | ||
53 | page = virt_to_page(entries); | ||
54 | if (page->lru.next == &data->trace_pages) { | ||
55 | if (i != tr->entries - 1) { | ||
56 | printk(KERN_CONT ".. entries buffer mismatch"); | ||
57 | goto failed; | ||
58 | } | ||
59 | } else { | ||
60 | page = list_entry(page->lru.next, struct page, lru); | ||
61 | entries = page_address(page); | ||
62 | } | ||
63 | idx = 0; | ||
64 | } | ||
65 | } | ||
66 | |||
67 | page = virt_to_page(entries); | ||
68 | if (page->lru.next != &data->trace_pages) { | ||
69 | printk(KERN_CONT ".. too many entries"); | ||
70 | goto failed; | ||
71 | } | 34 | } |
72 | |||
73 | return 0; | 35 | return 0; |
74 | 36 | ||
75 | failed: | 37 | failed: |
@@ -91,13 +53,11 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |||
91 | /* Don't allow flipping of max traces now */ | 53 | /* Don't allow flipping of max traces now */ |
92 | raw_local_irq_save(flags); | 54 | raw_local_irq_save(flags); |
93 | __raw_spin_lock(&ftrace_max_lock); | 55 | __raw_spin_lock(&ftrace_max_lock); |
94 | for_each_possible_cpu(cpu) { | ||
95 | if (!head_page(tr->data[cpu])) | ||
96 | continue; | ||
97 | 56 | ||
98 | cnt += tr->data[cpu]->trace_idx; | 57 | cnt = ring_buffer_entries(tr->buffer); |
99 | 58 | ||
100 | ret = trace_test_buffer_cpu(tr, tr->data[cpu]); | 59 | for_each_possible_cpu(cpu) { |
60 | ret = trace_test_buffer_cpu(tr, cpu); | ||
101 | if (ret) | 61 | if (ret) |
102 | break; | 62 | break; |
103 | } | 63 | } |