diff options
author | Steven Rostedt <rostedt@goodmis.org> | 2008-05-12 15:21:04 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2008-05-23 16:05:14 -0400 |
commit | 3eefae994d9224fb7771a3ddb683868363c23510 (patch) | |
tree | 0c7fe35765b485ff2a155c4ae1189199476a34b3 /kernel/trace/trace.c | |
parent | 6c6c27969a4c6024e6c8838829546c02aaddca18 (diff) |
ftrace: limit trace entries
Currently there is no protection from the root user to use up all of
memory for trace buffers. If the root user allocates too many entries,
the OOM killer might start kill off all tasks.
This patch adds an algorith to check the following condition:
pages_requested > (freeable_memory + current_trace_buffer_pages) / 4
If the above is met then the allocation fails. The above prevents more
than 1/4th of freeable memory from being used by trace buffers.
To determine the freeable_memory, I made determine_dirtyable_memory in
mm/page-writeback.c global.
Special thanks goes to Peter Zijlstra for suggesting the above calculation.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 38 |
1 files changed, 38 insertions, 0 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 82ced406aacf..2824cf48cdca 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/poll.h> | 27 | #include <linux/poll.h> |
28 | #include <linux/gfp.h> | 28 | #include <linux/gfp.h> |
29 | #include <linux/fs.h> | 29 | #include <linux/fs.h> |
30 | #include <linux/writeback.h> | ||
30 | 31 | ||
31 | #include <linux/stacktrace.h> | 32 | #include <linux/stacktrace.h> |
32 | 33 | ||
@@ -51,6 +52,8 @@ static int trace_free_page(void); | |||
51 | 52 | ||
52 | static int tracing_disabled = 1; | 53 | static int tracing_disabled = 1; |
53 | 54 | ||
55 | static unsigned long tracing_pages_allocated; | ||
56 | |||
54 | long | 57 | long |
55 | ns2usecs(cycle_t nsec) | 58 | ns2usecs(cycle_t nsec) |
56 | { | 59 | { |
@@ -2591,12 +2594,41 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
2591 | } | 2594 | } |
2592 | 2595 | ||
2593 | if (val > global_trace.entries) { | 2596 | if (val > global_trace.entries) { |
2597 | long pages_requested; | ||
2598 | unsigned long freeable_pages; | ||
2599 | |||
2600 | /* make sure we have enough memory before mapping */ | ||
2601 | pages_requested = | ||
2602 | (val + (ENTRIES_PER_PAGE-1)) / ENTRIES_PER_PAGE; | ||
2603 | |||
2604 | /* account for each buffer (and max_tr) */ | ||
2605 | pages_requested *= tracing_nr_buffers * 2; | ||
2606 | |||
2607 | /* Check for overflow */ | ||
2608 | if (pages_requested < 0) { | ||
2609 | cnt = -ENOMEM; | ||
2610 | goto out; | ||
2611 | } | ||
2612 | |||
2613 | freeable_pages = determine_dirtyable_memory(); | ||
2614 | |||
2615 | /* we only allow to request 1/4 of useable memory */ | ||
2616 | if (pages_requested > | ||
2617 | ((freeable_pages + tracing_pages_allocated) / 4)) { | ||
2618 | cnt = -ENOMEM; | ||
2619 | goto out; | ||
2620 | } | ||
2621 | |||
2594 | while (global_trace.entries < val) { | 2622 | while (global_trace.entries < val) { |
2595 | if (trace_alloc_page()) { | 2623 | if (trace_alloc_page()) { |
2596 | cnt = -ENOMEM; | 2624 | cnt = -ENOMEM; |
2597 | goto out; | 2625 | goto out; |
2598 | } | 2626 | } |
2627 | /* double check that we don't go over the known pages */ | ||
2628 | if (tracing_pages_allocated > pages_requested) | ||
2629 | break; | ||
2599 | } | 2630 | } |
2631 | |||
2600 | } else { | 2632 | } else { |
2601 | /* include the number of entries in val (inc of page entries) */ | 2633 | /* include the number of entries in val (inc of page entries) */ |
2602 | while (global_trace.entries > val + (ENTRIES_PER_PAGE - 1)) | 2634 | while (global_trace.entries > val + (ENTRIES_PER_PAGE - 1)) |
@@ -2776,6 +2808,7 @@ static int trace_alloc_page(void) | |||
2776 | struct page *page, *tmp; | 2808 | struct page *page, *tmp; |
2777 | LIST_HEAD(pages); | 2809 | LIST_HEAD(pages); |
2778 | void *array; | 2810 | void *array; |
2811 | unsigned pages_allocated = 0; | ||
2779 | int i; | 2812 | int i; |
2780 | 2813 | ||
2781 | /* first allocate a page for each CPU */ | 2814 | /* first allocate a page for each CPU */ |
@@ -2787,6 +2820,7 @@ static int trace_alloc_page(void) | |||
2787 | goto free_pages; | 2820 | goto free_pages; |
2788 | } | 2821 | } |
2789 | 2822 | ||
2823 | pages_allocated++; | ||
2790 | page = virt_to_page(array); | 2824 | page = virt_to_page(array); |
2791 | list_add(&page->lru, &pages); | 2825 | list_add(&page->lru, &pages); |
2792 | 2826 | ||
@@ -2798,6 +2832,7 @@ static int trace_alloc_page(void) | |||
2798 | "for trace buffer!\n"); | 2832 | "for trace buffer!\n"); |
2799 | goto free_pages; | 2833 | goto free_pages; |
2800 | } | 2834 | } |
2835 | pages_allocated++; | ||
2801 | page = virt_to_page(array); | 2836 | page = virt_to_page(array); |
2802 | list_add(&page->lru, &pages); | 2837 | list_add(&page->lru, &pages); |
2803 | #endif | 2838 | #endif |
@@ -2819,6 +2854,7 @@ static int trace_alloc_page(void) | |||
2819 | SetPageLRU(page); | 2854 | SetPageLRU(page); |
2820 | #endif | 2855 | #endif |
2821 | } | 2856 | } |
2857 | tracing_pages_allocated += pages_allocated; | ||
2822 | global_trace.entries += ENTRIES_PER_PAGE; | 2858 | global_trace.entries += ENTRIES_PER_PAGE; |
2823 | 2859 | ||
2824 | return 0; | 2860 | return 0; |
@@ -2853,6 +2889,8 @@ static int trace_free_page(void) | |||
2853 | page = list_entry(p, struct page, lru); | 2889 | page = list_entry(p, struct page, lru); |
2854 | ClearPageLRU(page); | 2890 | ClearPageLRU(page); |
2855 | list_del(&page->lru); | 2891 | list_del(&page->lru); |
2892 | tracing_pages_allocated--; | ||
2893 | tracing_pages_allocated--; | ||
2856 | __free_page(page); | 2894 | __free_page(page); |
2857 | 2895 | ||
2858 | tracing_reset(data); | 2896 | tracing_reset(data); |