diff options
author | Steven Rostedt <srostedt@redhat.com> | 2011-12-16 16:23:44 -0500 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2011-12-21 07:18:30 -0500 |
commit | a79008755497daff157f5294c02e3b940641cc11 (patch) | |
tree | 941e16ae559cbe82f371d07c09b03642bb6e83dd /kernel/trace | |
parent | 3208230983a0ee3d95be22d463257e530c684956 (diff) |
ftrace: Allocate the mcount record pages as groups
Allocate the mcount record pages as a group of pages as big
as can be allocated and waste no more than a single page.
Grouping the mcount pages as much as possible helps with cache
locality, as we do not need to redirect with descriptors as we
cross from page to page. It also allows us to do more with the
records later on (sort them with bigger benefits).
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/ftrace.c | 179 |
1 files changed, 128 insertions, 51 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index be6888f40d2b..2e7218869fe9 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -983,12 +983,13 @@ static DEFINE_MUTEX(ftrace_regex_lock); | |||
983 | 983 | ||
984 | struct ftrace_page { | 984 | struct ftrace_page { |
985 | struct ftrace_page *next; | 985 | struct ftrace_page *next; |
986 | struct dyn_ftrace *records; | ||
986 | int index; | 987 | int index; |
987 | struct dyn_ftrace records[]; | 988 | int size; |
988 | }; | 989 | }; |
989 | 990 | ||
990 | #define ENTRIES_PER_PAGE \ | 991 | #define ENTRY_SIZE sizeof(struct dyn_ftrace) |
991 | ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace)) | 992 | #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE) |
992 | 993 | ||
993 | /* estimate from running different kernels */ | 994 | /* estimate from running different kernels */ |
994 | #define NR_TO_INIT 10000 | 995 | #define NR_TO_INIT 10000 |
@@ -1421,14 +1422,10 @@ static void ftrace_hash_rec_enable(struct ftrace_ops *ops, | |||
1421 | 1422 | ||
1422 | static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) | 1423 | static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) |
1423 | { | 1424 | { |
1424 | if (ftrace_pages->index == ENTRIES_PER_PAGE) { | 1425 | if (ftrace_pages->index == ftrace_pages->size) { |
1425 | if (!ftrace_pages->next) { | 1426 | /* We should have allocated enough */ |
1426 | /* allocate another page */ | 1427 | if (WARN_ON(!ftrace_pages->next)) |
1427 | ftrace_pages->next = | 1428 | return NULL; |
1428 | (void *)get_zeroed_page(GFP_KERNEL); | ||
1429 | if (!ftrace_pages->next) | ||
1430 | return NULL; | ||
1431 | } | ||
1432 | ftrace_pages = ftrace_pages->next; | 1429 | ftrace_pages = ftrace_pages->next; |
1433 | } | 1430 | } |
1434 | 1431 | ||
@@ -2005,47 +2002,106 @@ static int ftrace_update_code(struct module *mod) | |||
2005 | return 0; | 2002 | return 0; |
2006 | } | 2003 | } |
2007 | 2004 | ||
2008 | static int __init ftrace_dyn_table_alloc(unsigned long num_to_init) | 2005 | static int ftrace_allocate_records(struct ftrace_page *pg, int count) |
2009 | { | 2006 | { |
2010 | struct ftrace_page *pg; | 2007 | int order; |
2011 | int cnt; | 2008 | int cnt; |
2012 | int i; | ||
2013 | 2009 | ||
2014 | /* allocate a few pages */ | 2010 | if (WARN_ON(!count)) |
2015 | ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL); | 2011 | return -EINVAL; |
2016 | if (!ftrace_pages_start) | 2012 | |
2017 | return -1; | 2013 | order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE)); |
2018 | 2014 | ||
2019 | /* | 2015 | /* |
2020 | * Allocate a few more pages. | 2016 | * We want to fill as much as possible. No more than a page |
2021 | * | 2017 | * may be empty. |
2022 | * TODO: have some parser search vmlinux before | ||
2023 | * final linking to find all calls to ftrace. | ||
2024 | * Then we can: | ||
2025 | * a) know how many pages to allocate. | ||
2026 | * and/or | ||
2027 | * b) set up the table then. | ||
2028 | * | ||
2029 | * The dynamic code is still necessary for | ||
2030 | * modules. | ||
2031 | */ | 2018 | */ |
2019 | while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE) | ||
2020 | order--; | ||
2032 | 2021 | ||
2033 | pg = ftrace_pages = ftrace_pages_start; | 2022 | again: |
2023 | pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); | ||
2034 | 2024 | ||
2035 | cnt = num_to_init / ENTRIES_PER_PAGE; | 2025 | if (!pg->records) { |
2036 | pr_info("ftrace: allocating %ld entries in %d pages\n", | 2026 | /* if we can't allocate this size, try something smaller */ |
2037 | num_to_init, cnt + 1); | 2027 | if (!order) |
2028 | return -ENOMEM; | ||
2029 | order >>= 1; | ||
2030 | goto again; | ||
2031 | } | ||
2038 | 2032 | ||
2039 | for (i = 0; i < cnt; i++) { | 2033 | cnt = (PAGE_SIZE << order) / ENTRY_SIZE; |
2040 | pg->next = (void *)get_zeroed_page(GFP_KERNEL); | 2034 | pg->size = cnt; |
2041 | 2035 | ||
2042 | /* If we fail, we'll try later anyway */ | 2036 | if (cnt > count) |
2043 | if (!pg->next) | 2037 | cnt = count; |
2038 | |||
2039 | return cnt; | ||
2040 | } | ||
2041 | |||
2042 | static struct ftrace_page * | ||
2043 | ftrace_allocate_pages(unsigned long num_to_init) | ||
2044 | { | ||
2045 | struct ftrace_page *start_pg; | ||
2046 | struct ftrace_page *pg; | ||
2047 | int order; | ||
2048 | int cnt; | ||
2049 | |||
2050 | if (!num_to_init) | ||
2051 | return 0; | ||
2052 | |||
2053 | start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL); | ||
2054 | if (!pg) | ||
2055 | return NULL; | ||
2056 | |||
2057 | /* | ||
2058 | * Try to allocate as much as possible in one continues | ||
2059 | * location that fills in all of the space. We want to | ||
2060 | * waste as little space as possible. | ||
2061 | */ | ||
2062 | for (;;) { | ||
2063 | cnt = ftrace_allocate_records(pg, num_to_init); | ||
2064 | if (cnt < 0) | ||
2065 | goto free_pages; | ||
2066 | |||
2067 | num_to_init -= cnt; | ||
2068 | if (!num_to_init) | ||
2044 | break; | 2069 | break; |
2045 | 2070 | ||
2071 | pg->next = kzalloc(sizeof(*pg), GFP_KERNEL); | ||
2072 | if (!pg->next) | ||
2073 | goto free_pages; | ||
2074 | |||
2046 | pg = pg->next; | 2075 | pg = pg->next; |
2047 | } | 2076 | } |
2048 | 2077 | ||
2078 | return start_pg; | ||
2079 | |||
2080 | free_pages: | ||
2081 | while (start_pg) { | ||
2082 | order = get_count_order(pg->size / ENTRIES_PER_PAGE); | ||
2083 | free_pages((unsigned long)pg->records, order); | ||
2084 | start_pg = pg->next; | ||
2085 | kfree(pg); | ||
2086 | pg = start_pg; | ||
2087 | } | ||
2088 | pr_info("ftrace: FAILED to allocate memory for functions\n"); | ||
2089 | return NULL; | ||
2090 | } | ||
2091 | |||
2092 | static int __init ftrace_dyn_table_alloc(unsigned long num_to_init) | ||
2093 | { | ||
2094 | int cnt; | ||
2095 | |||
2096 | if (!num_to_init) { | ||
2097 | pr_info("ftrace: No functions to be traced?\n"); | ||
2098 | return -1; | ||
2099 | } | ||
2100 | |||
2101 | cnt = num_to_init / ENTRIES_PER_PAGE; | ||
2102 | pr_info("ftrace: allocating %ld entries in %d pages\n", | ||
2103 | num_to_init, cnt + 1); | ||
2104 | |||
2049 | return 0; | 2105 | return 0; |
2050 | } | 2106 | } |
2051 | 2107 | ||
@@ -3520,30 +3576,45 @@ static int ftrace_process_locs(struct module *mod, | |||
3520 | unsigned long *start, | 3576 | unsigned long *start, |
3521 | unsigned long *end) | 3577 | unsigned long *end) |
3522 | { | 3578 | { |
3579 | struct ftrace_page *pg; | ||
3580 | unsigned long count; | ||
3523 | unsigned long *p; | 3581 | unsigned long *p; |
3524 | unsigned long addr; | 3582 | unsigned long addr; |
3525 | unsigned long flags = 0; /* Shut up gcc */ | 3583 | unsigned long flags = 0; /* Shut up gcc */ |
3584 | int ret = -ENOMEM; | ||
3585 | |||
3586 | count = end - start; | ||
3587 | |||
3588 | if (!count) | ||
3589 | return 0; | ||
3590 | |||
3591 | pg = ftrace_allocate_pages(count); | ||
3592 | if (!pg) | ||
3593 | return -ENOMEM; | ||
3526 | 3594 | ||
3527 | mutex_lock(&ftrace_lock); | 3595 | mutex_lock(&ftrace_lock); |
3596 | |||
3528 | /* | 3597 | /* |
3529 | * Core and each module needs their own pages, as | 3598 | * Core and each module needs their own pages, as |
3530 | * modules will free them when they are removed. | 3599 | * modules will free them when they are removed. |
3531 | * Force a new page to be allocated for modules. | 3600 | * Force a new page to be allocated for modules. |
3532 | */ | 3601 | */ |
3533 | if (mod) { | 3602 | if (!mod) { |
3603 | WARN_ON(ftrace_pages || ftrace_pages_start); | ||
3604 | /* First initialization */ | ||
3605 | ftrace_pages = ftrace_pages_start = pg; | ||
3606 | } else { | ||
3534 | if (!ftrace_pages) | 3607 | if (!ftrace_pages) |
3535 | return -ENOMEM; | 3608 | goto out; |
3536 | 3609 | ||
3537 | /* | 3610 | if (WARN_ON(ftrace_pages->next)) { |
3538 | * If the last page was full, it will be | 3611 | /* Hmm, we have free pages? */ |
3539 | * allocated anyway. | 3612 | while (ftrace_pages->next) |
3540 | */ | 3613 | ftrace_pages = ftrace_pages->next; |
3541 | if (ftrace_pages->index != ENTRIES_PER_PAGE) { | ||
3542 | ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL); | ||
3543 | if (!ftrace_pages->next) | ||
3544 | return -ENOMEM; | ||
3545 | ftrace_pages = ftrace_pages->next; | ||
3546 | } | 3614 | } |
3615 | |||
3616 | ftrace_pages->next = pg; | ||
3617 | ftrace_pages = pg; | ||
3547 | } | 3618 | } |
3548 | 3619 | ||
3549 | p = start; | 3620 | p = start; |
@@ -3557,7 +3628,8 @@ static int ftrace_process_locs(struct module *mod, | |||
3557 | */ | 3628 | */ |
3558 | if (!addr) | 3629 | if (!addr) |
3559 | continue; | 3630 | continue; |
3560 | ftrace_record_ip(addr); | 3631 | if (!ftrace_record_ip(addr)) |
3632 | break; | ||
3561 | } | 3633 | } |
3562 | 3634 | ||
3563 | /* | 3635 | /* |
@@ -3573,9 +3645,11 @@ static int ftrace_process_locs(struct module *mod, | |||
3573 | ftrace_update_code(mod); | 3645 | ftrace_update_code(mod); |
3574 | if (!mod) | 3646 | if (!mod) |
3575 | local_irq_restore(flags); | 3647 | local_irq_restore(flags); |
3648 | ret = 0; | ||
3649 | out: | ||
3576 | mutex_unlock(&ftrace_lock); | 3650 | mutex_unlock(&ftrace_lock); |
3577 | 3651 | ||
3578 | return 0; | 3652 | return ret; |
3579 | } | 3653 | } |
3580 | 3654 | ||
3581 | #ifdef CONFIG_MODULES | 3655 | #ifdef CONFIG_MODULES |
@@ -3587,6 +3661,7 @@ void ftrace_release_mod(struct module *mod) | |||
3587 | struct dyn_ftrace *rec; | 3661 | struct dyn_ftrace *rec; |
3588 | struct ftrace_page **last_pg; | 3662 | struct ftrace_page **last_pg; |
3589 | struct ftrace_page *pg; | 3663 | struct ftrace_page *pg; |
3664 | int order; | ||
3590 | 3665 | ||
3591 | mutex_lock(&ftrace_lock); | 3666 | mutex_lock(&ftrace_lock); |
3592 | 3667 | ||
@@ -3613,7 +3688,9 @@ void ftrace_release_mod(struct module *mod) | |||
3613 | ftrace_pages = next_to_ftrace_page(last_pg); | 3688 | ftrace_pages = next_to_ftrace_page(last_pg); |
3614 | 3689 | ||
3615 | *last_pg = pg->next; | 3690 | *last_pg = pg->next; |
3616 | free_page((unsigned long)pg); | 3691 | order = get_count_order(pg->size / ENTRIES_PER_PAGE); |
3692 | free_pages((unsigned long)pg->records, order); | ||
3693 | kfree(pg); | ||
3617 | } else | 3694 | } else |
3618 | last_pg = &pg->next; | 3695 | last_pg = &pg->next; |
3619 | } | 3696 | } |