aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/slub_def.h19
-rw-r--r--kernel/trace/ftrace.c56
-rw-r--r--mm/slub.c16
3 files changed, 62 insertions, 29 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 6b657f7dcb2b..9e3a575b2c30 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -122,10 +122,23 @@ struct kmem_cache {
122#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) 122#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
123 123
124/* 124/*
125 * Maximum kmalloc object size handled by SLUB. Larger object allocations
126 * are passed through to the page allocator. The page allocator "fastpath"
127 * is relatively slow so we need this value sufficiently high so that
128 * performance critical objects are allocated through the SLUB fastpath.
129 *
130 * This should be dropped to PAGE_SIZE / 2 once the page allocator
131 * "fastpath" becomes competitive with the slab allocator fastpaths.
132 */
133#define SLUB_MAX_SIZE (PAGE_SIZE)
134
135#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 1)
136
137/*
125 * We keep the general caches in an array of slab caches that are used for 138 * We keep the general caches in an array of slab caches that are used for
126 * 2^x bytes of allocations. 139 * 2^x bytes of allocations.
127 */ 140 */
128extern struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1]; 141extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT];
129 142
130/* 143/*
131 * Sorry that the following has to be that ugly but some versions of GCC 144 * Sorry that the following has to be that ugly but some versions of GCC
@@ -231,7 +244,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
231 void *ret; 244 void *ret;
232 245
233 if (__builtin_constant_p(size)) { 246 if (__builtin_constant_p(size)) {
234 if (size > PAGE_SIZE) 247 if (size > SLUB_MAX_SIZE)
235 return kmalloc_large(size, flags); 248 return kmalloc_large(size, flags);
236 249
237 if (!(flags & SLUB_DMA)) { 250 if (!(flags & SLUB_DMA)) {
@@ -275,7 +288,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
275 void *ret; 288 void *ret;
276 289
277 if (__builtin_constant_p(size) && 290 if (__builtin_constant_p(size) &&
278 size <= PAGE_SIZE && !(flags & SLUB_DMA)) { 291 size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) {
279 struct kmem_cache *s = kmalloc_slab(size); 292 struct kmem_cache *s = kmalloc_slab(size);
280 293
281 if (!s) 294 if (!s)
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 7dd5a2bef9cd..cf59f4c54745 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1895,6 +1895,10 @@ static void *g_start(struct seq_file *m, loff_t *pos)
1895 1895
1896 mutex_lock(&graph_lock); 1896 mutex_lock(&graph_lock);
1897 1897
1898 /* Nothing, tell g_show to print all functions are enabled */
1899 if (!ftrace_graph_count && !*pos)
1900 return (void *)1;
1901
1898 p = g_next(m, p, pos); 1902 p = g_next(m, p, pos);
1899 1903
1900 return p; 1904 return p;
@@ -1913,6 +1917,11 @@ static int g_show(struct seq_file *m, void *v)
1913 if (!ptr) 1917 if (!ptr)
1914 return 0; 1918 return 0;
1915 1919
1920 if (ptr == (unsigned long *)1) {
1921 seq_printf(m, "#### all functions enabled ####\n");
1922 return 0;
1923 }
1924
1916 kallsyms_lookup(*ptr, NULL, NULL, NULL, str); 1925 kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
1917 1926
1918 seq_printf(m, "%s\n", str); 1927 seq_printf(m, "%s\n", str);
@@ -1966,38 +1975,51 @@ ftrace_graph_read(struct file *file, char __user *ubuf,
1966} 1975}
1967 1976
1968static int 1977static int
1969ftrace_set_func(unsigned long *array, int idx, char *buffer) 1978ftrace_set_func(unsigned long *array, int *idx, char *buffer)
1970{ 1979{
1971 char str[KSYM_SYMBOL_LEN];
1972 struct dyn_ftrace *rec; 1980 struct dyn_ftrace *rec;
1973 struct ftrace_page *pg; 1981 struct ftrace_page *pg;
1982 int search_len;
1974 int found = 0; 1983 int found = 0;
1975 int j; 1984 int type, not;
1985 char *search;
1986 bool exists;
1987 int i;
1976 1988
1977 if (ftrace_disabled) 1989 if (ftrace_disabled)
1978 return -ENODEV; 1990 return -ENODEV;
1979 1991
1992 /* decode regex */
1993 type = ftrace_setup_glob(buffer, strlen(buffer), &search, &not);
1994 if (not)
1995 return -EINVAL;
1996
1997 search_len = strlen(search);
1998
1980 mutex_lock(&ftrace_lock); 1999 mutex_lock(&ftrace_lock);
1981 do_for_each_ftrace_rec(pg, rec) { 2000 do_for_each_ftrace_rec(pg, rec) {
1982 2001
2002 if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
2003 break;
2004
1983 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE)) 2005 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
1984 continue; 2006 continue;
1985 2007
1986 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); 2008 if (ftrace_match_record(rec, search, search_len, type)) {
1987 if (strcmp(str, buffer) == 0) { 2009 /* ensure it is not already in the array */
1988 /* Return 1 if we add it to the array */ 2010 exists = false;
1989 found = 1; 2011 for (i = 0; i < *idx; i++)
1990 for (j = 0; j < idx; j++) 2012 if (array[i] == rec->ip) {
1991 if (array[j] == rec->ip) { 2013 exists = true;
1992 found = 0;
1993 break; 2014 break;
1994 } 2015 }
1995 if (found) 2016 if (!exists) {
1996 array[idx] = rec->ip; 2017 array[(*idx)++] = rec->ip;
1997 goto out; 2018 found = 1;
2019 }
1998 } 2020 }
1999 } while_for_each_ftrace_rec(); 2021 } while_for_each_ftrace_rec();
2000 out: 2022
2001 mutex_unlock(&ftrace_lock); 2023 mutex_unlock(&ftrace_lock);
2002 2024
2003 return found ? 0 : -EINVAL; 2025 return found ? 0 : -EINVAL;
@@ -2066,13 +2088,11 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
2066 } 2088 }
2067 buffer[index] = 0; 2089 buffer[index] = 0;
2068 2090
2069 /* we allow only one at a time */ 2091 /* we allow only one expression at a time */
2070 ret = ftrace_set_func(array, ftrace_graph_count, buffer); 2092 ret = ftrace_set_func(array, &ftrace_graph_count, buffer);
2071 if (ret) 2093 if (ret)
2072 goto out; 2094 goto out;
2073 2095
2074 ftrace_graph_count++;
2075
2076 file->f_pos += read; 2096 file->f_pos += read;
2077 2097
2078 ret = read; 2098 ret = read;
diff --git a/mm/slub.c b/mm/slub.c
index 3525e7b21d19..6de5e07c8850 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2506,7 +2506,7 @@ EXPORT_SYMBOL(kmem_cache_destroy);
2506 * Kmalloc subsystem 2506 * Kmalloc subsystem
2507 *******************************************************************/ 2507 *******************************************************************/
2508 2508
2509struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned; 2509struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned;
2510EXPORT_SYMBOL(kmalloc_caches); 2510EXPORT_SYMBOL(kmalloc_caches);
2511 2511
2512static int __init setup_slub_min_order(char *str) 2512static int __init setup_slub_min_order(char *str)
@@ -2568,7 +2568,7 @@ panic:
2568} 2568}
2569 2569
2570#ifdef CONFIG_ZONE_DMA 2570#ifdef CONFIG_ZONE_DMA
2571static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1]; 2571static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT];
2572 2572
2573static void sysfs_add_func(struct work_struct *w) 2573static void sysfs_add_func(struct work_struct *w)
2574{ 2574{
@@ -2690,7 +2690,7 @@ void *__kmalloc(size_t size, gfp_t flags)
2690 struct kmem_cache *s; 2690 struct kmem_cache *s;
2691 void *ret; 2691 void *ret;
2692 2692
2693 if (unlikely(size > PAGE_SIZE)) 2693 if (unlikely(size > SLUB_MAX_SIZE))
2694 return kmalloc_large(size, flags); 2694 return kmalloc_large(size, flags);
2695 2695
2696 s = get_slab(size, flags); 2696 s = get_slab(size, flags);
@@ -2724,7 +2724,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
2724 struct kmem_cache *s; 2724 struct kmem_cache *s;
2725 void *ret; 2725 void *ret;
2726 2726
2727 if (unlikely(size > PAGE_SIZE)) { 2727 if (unlikely(size > SLUB_MAX_SIZE)) {
2728 ret = kmalloc_large_node(size, flags, node); 2728 ret = kmalloc_large_node(size, flags, node);
2729 2729
2730 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, 2730 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
@@ -3039,7 +3039,7 @@ void __init kmem_cache_init(void)
3039 caches++; 3039 caches++;
3040 } 3040 }
3041 3041
3042 for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) { 3042 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
3043 create_kmalloc_cache(&kmalloc_caches[i], 3043 create_kmalloc_cache(&kmalloc_caches[i],
3044 "kmalloc", 1 << i, GFP_KERNEL); 3044 "kmalloc", 1 << i, GFP_KERNEL);
3045 caches++; 3045 caches++;
@@ -3076,7 +3076,7 @@ void __init kmem_cache_init(void)
3076 slab_state = UP; 3076 slab_state = UP;
3077 3077
3078 /* Provide the correct kmalloc names now that the caches are up */ 3078 /* Provide the correct kmalloc names now that the caches are up */
3079 for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) 3079 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++)
3080 kmalloc_caches[i]. name = 3080 kmalloc_caches[i]. name =
3081 kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); 3081 kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
3082 3082
@@ -3277,7 +3277,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3277 struct kmem_cache *s; 3277 struct kmem_cache *s;
3278 void *ret; 3278 void *ret;
3279 3279
3280 if (unlikely(size > PAGE_SIZE)) 3280 if (unlikely(size > SLUB_MAX_SIZE))
3281 return kmalloc_large(size, gfpflags); 3281 return kmalloc_large(size, gfpflags);
3282 3282
3283 s = get_slab(size, gfpflags); 3283 s = get_slab(size, gfpflags);
@@ -3300,7 +3300,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3300 struct kmem_cache *s; 3300 struct kmem_cache *s;
3301 void *ret; 3301 void *ret;
3302 3302
3303 if (unlikely(size > PAGE_SIZE)) 3303 if (unlikely(size > SLUB_MAX_SIZE))
3304 return kmalloc_large_node(size, gfpflags, node); 3304 return kmalloc_large_node(size, gfpflags, node);
3305 3305
3306 s = get_slab(size, gfpflags); 3306 s = get_slab(size, gfpflags);