aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/trace.c32
-rw-r--r--kernel/trace/trace_selftest.c1
2 files changed, 24 insertions, 9 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 3271916ff033..0567f51bbea4 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -249,24 +249,32 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
249 tracing_record_cmdline(current); 249 tracing_record_cmdline(current);
250} 250}
251 251
252#define CHECK_COND(cond) \
253 if (unlikely(cond)) { \
254 tracing_disabled = 1; \
255 WARN_ON(1); \
256 return -1; \
257 }
258
252/** 259/**
253 * check_pages - integrity check of trace buffers 260 * check_pages - integrity check of trace buffers
254 * 261 *
255 * As a safty measure we check to make sure the data pages have not 262 * As a safty measure we check to make sure the data pages have not
256 * been corrupted. TODO: configure to disable this because it adds 263 * been corrupted.
257 * a bit of overhead.
258 */ 264 */
259void check_pages(struct trace_array_cpu *data) 265int check_pages(struct trace_array_cpu *data)
260{ 266{
261 struct page *page, *tmp; 267 struct page *page, *tmp;
262 268
263 BUG_ON(data->trace_pages.next->prev != &data->trace_pages); 269 CHECK_COND(data->trace_pages.next->prev != &data->trace_pages);
264 BUG_ON(data->trace_pages.prev->next != &data->trace_pages); 270 CHECK_COND(data->trace_pages.prev->next != &data->trace_pages);
265 271
266 list_for_each_entry_safe(page, tmp, &data->trace_pages, lru) { 272 list_for_each_entry_safe(page, tmp, &data->trace_pages, lru) {
267 BUG_ON(page->lru.next->prev != &page->lru); 273 CHECK_COND(page->lru.next->prev != &page->lru);
268 BUG_ON(page->lru.prev->next != &page->lru); 274 CHECK_COND(page->lru.prev->next != &page->lru);
269 } 275 }
276
277 return 0;
270} 278}
271 279
272/** 280/**
@@ -280,7 +288,6 @@ void *head_page(struct trace_array_cpu *data)
280{ 288{
281 struct page *page; 289 struct page *page;
282 290
283 check_pages(data);
284 if (list_empty(&data->trace_pages)) 291 if (list_empty(&data->trace_pages))
285 return NULL; 292 return NULL;
286 293
@@ -2566,7 +2573,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2566{ 2573{
2567 unsigned long val; 2574 unsigned long val;
2568 char buf[64]; 2575 char buf[64];
2569 int ret; 2576 int i, ret;
2570 2577
2571 if (cnt >= sizeof(buf)) 2578 if (cnt >= sizeof(buf))
2572 return -EINVAL; 2579 return -EINVAL;
@@ -2635,8 +2642,15 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2635 trace_free_page(); 2642 trace_free_page();
2636 } 2643 }
2637 2644
2645 /* check integrity */
2646 for_each_tracing_cpu(i)
2647 check_pages(global_trace.data[i]);
2648
2638 filp->f_pos += cnt; 2649 filp->f_pos += cnt;
2639 2650
2651 /* If check pages failed, return ENOMEM */
2652 if (tracing_disabled)
2653 cnt = -ENOMEM;
2640 out: 2654 out:
2641 max_tr.entries = global_trace.entries; 2655 max_tr.entries = global_trace.entries;
2642 mutex_unlock(&trace_types_lock); 2656 mutex_unlock(&trace_types_lock);
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 3877dd9102f1..18c5423bc977 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -28,6 +28,7 @@ trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data)
28 page = list_entry(data->trace_pages.next, struct page, lru); 28 page = list_entry(data->trace_pages.next, struct page, lru);
29 entries = page_address(page); 29 entries = page_address(page);
30 30
31 check_pages(data);
31 if (head_page(data) != entries) 32 if (head_page(data) != entries)
32 goto failed; 33 goto failed;
33 34