diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-11 21:52:23 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-11 21:52:23 -0500 |
commit | 6296e5d3c067df41980a5fd09ad4cc6765f79bb9 (patch) | |
tree | ac10bc5321ac1d750612c0e0ae53d6c4097c5734 /mm/slab.c | |
parent | c086ae4ed94f9a1d283318e006813268c2dbf9fc (diff) | |
parent | 5878cf431ca7233a56819ca6970153ac0b129599 (diff) |
Merge branch 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux
* 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux:
slub: disallow changing cpu_partial from userspace for debug caches
slub: add missed accounting
slub: Extract get_freelist from __slab_alloc
slub: Switch per cpu partial page support off for debugging
slub: fix a possible memleak in __slab_alloc()
slub: fix slub_max_order Documentation
slub: add missed accounting
slab: add taint flag outputting to debug paths.
slub: add taint flag outputting to debug paths
slab: introduce slab_max_order kernel parameter
slab: rename slab_break_gfp_order to slab_max_order
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 39 |
1 files changed, 27 insertions, 12 deletions
@@ -481,11 +481,13 @@ EXPORT_SYMBOL(slab_buffer_size); | |||
481 | #endif | 481 | #endif |
482 | 482 | ||
483 | /* | 483 | /* |
484 | * Do not go above this order unless 0 objects fit into the slab. | 484 | * Do not go above this order unless 0 objects fit into the slab or |
485 | * overridden on the command line. | ||
485 | */ | 486 | */ |
486 | #define BREAK_GFP_ORDER_HI 1 | 487 | #define SLAB_MAX_ORDER_HI 1 |
487 | #define BREAK_GFP_ORDER_LO 0 | 488 | #define SLAB_MAX_ORDER_LO 0 |
488 | static int slab_break_gfp_order = BREAK_GFP_ORDER_LO; | 489 | static int slab_max_order = SLAB_MAX_ORDER_LO; |
490 | static bool slab_max_order_set __initdata; | ||
489 | 491 | ||
490 | /* | 492 | /* |
491 | * Functions for storing/retrieving the cachep and or slab from the page | 493 | * Functions for storing/retrieving the cachep and or slab from the page |
@@ -854,6 +856,17 @@ static int __init noaliencache_setup(char *s) | |||
854 | } | 856 | } |
855 | __setup("noaliencache", noaliencache_setup); | 857 | __setup("noaliencache", noaliencache_setup); |
856 | 858 | ||
859 | static int __init slab_max_order_setup(char *str) | ||
860 | { | ||
861 | get_option(&str, &slab_max_order); | ||
862 | slab_max_order = slab_max_order < 0 ? 0 : | ||
863 | min(slab_max_order, MAX_ORDER - 1); | ||
864 | slab_max_order_set = true; | ||
865 | |||
866 | return 1; | ||
867 | } | ||
868 | __setup("slab_max_order=", slab_max_order_setup); | ||
869 | |||
857 | #ifdef CONFIG_NUMA | 870 | #ifdef CONFIG_NUMA |
858 | /* | 871 | /* |
859 | * Special reaping functions for NUMA systems called from cache_reap(). | 872 | * Special reaping functions for NUMA systems called from cache_reap(). |
@@ -1502,10 +1515,11 @@ void __init kmem_cache_init(void) | |||
1502 | 1515 | ||
1503 | /* | 1516 | /* |
1504 | * Fragmentation resistance on low memory - only use bigger | 1517 | * Fragmentation resistance on low memory - only use bigger |
1505 | * page orders on machines with more than 32MB of memory. | 1518 | * page orders on machines with more than 32MB of memory if |
1519 | * not overridden on the command line. | ||
1506 | */ | 1520 | */ |
1507 | if (totalram_pages > (32 << 20) >> PAGE_SHIFT) | 1521 | if (!slab_max_order_set && totalram_pages > (32 << 20) >> PAGE_SHIFT) |
1508 | slab_break_gfp_order = BREAK_GFP_ORDER_HI; | 1522 | slab_max_order = SLAB_MAX_ORDER_HI; |
1509 | 1523 | ||
1510 | /* Bootstrap is tricky, because several objects are allocated | 1524 | /* Bootstrap is tricky, because several objects are allocated |
1511 | * from caches that do not exist yet: | 1525 | * from caches that do not exist yet: |
@@ -1932,8 +1946,8 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp) | |||
1932 | /* Print header */ | 1946 | /* Print header */ |
1933 | if (lines == 0) { | 1947 | if (lines == 0) { |
1934 | printk(KERN_ERR | 1948 | printk(KERN_ERR |
1935 | "Slab corruption: %s start=%p, len=%d\n", | 1949 | "Slab corruption (%s): %s start=%p, len=%d\n", |
1936 | cachep->name, realobj, size); | 1950 | print_tainted(), cachep->name, realobj, size); |
1937 | print_objinfo(cachep, objp, 0); | 1951 | print_objinfo(cachep, objp, 0); |
1938 | } | 1952 | } |
1939 | /* Hexdump the affected line */ | 1953 | /* Hexdump the affected line */ |
@@ -2117,7 +2131,7 @@ static size_t calculate_slab_order(struct kmem_cache *cachep, | |||
2117 | * Large number of objects is good, but very large slabs are | 2131 | * Large number of objects is good, but very large slabs are |
2118 | * currently bad for the gfp()s. | 2132 | * currently bad for the gfp()s. |
2119 | */ | 2133 | */ |
2120 | if (gfporder >= slab_break_gfp_order) | 2134 | if (gfporder >= slab_max_order) |
2121 | break; | 2135 | break; |
2122 | 2136 | ||
2123 | /* | 2137 | /* |
@@ -3042,8 +3056,9 @@ static void check_slabp(struct kmem_cache *cachep, struct slab *slabp) | |||
3042 | if (entries != cachep->num - slabp->inuse) { | 3056 | if (entries != cachep->num - slabp->inuse) { |
3043 | bad: | 3057 | bad: |
3044 | printk(KERN_ERR "slab: Internal list corruption detected in " | 3058 | printk(KERN_ERR "slab: Internal list corruption detected in " |
3045 | "cache '%s'(%d), slabp %p(%d). Hexdump:\n", | 3059 | "cache '%s'(%d), slabp %p(%d). Tainted(%s). Hexdump:\n", |
3046 | cachep->name, cachep->num, slabp, slabp->inuse); | 3060 | cachep->name, cachep->num, slabp, slabp->inuse, |
3061 | print_tainted()); | ||
3047 | print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, slabp, | 3062 | print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, slabp, |
3048 | sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t), | 3063 | sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t), |
3049 | 1); | 3064 | 1); |