aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2011-10-19 01:09:28 -0400
committerPekka Enberg <penberg@kernel.org>2011-11-10 14:25:30 -0500
commit3df1cccdfb3fab6aa9176beb655d802eb384eabc (patch)
treeab3e1e206a9120e936caa52ea85193aecf670e42 /mm/slab.c
parent543585cc5b07fa99a2dc897159fbf48c1eb73058 (diff)
slab: introduce slab_max_order kernel parameter
Introduce new slab_max_order kernel parameter which is the equivalent of slub_max_order. For immediate purposes, allows users to override the heuristic that sets the max order to 1 by default if they have more than 32MB of RAM. This may result in page allocation failures if there is substantial fragmentation. Another usecase would be to increase the max order for better performance. Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: David Rientjes <rientjes@google.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c20
1 files changed, 17 insertions, 3 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 1a482e8402c4..b0414d12fd08 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -479,11 +479,13 @@ EXPORT_SYMBOL(slab_buffer_size);
479#endif 479#endif
480 480
481/* 481/*
482 * Do not go above this order unless 0 objects fit into the slab. 482 * Do not go above this order unless 0 objects fit into the slab or
483 * overridden on the command line.
483 */ 484 */
484#define SLAB_MAX_ORDER_HI 1 485#define SLAB_MAX_ORDER_HI 1
485#define SLAB_MAX_ORDER_LO 0 486#define SLAB_MAX_ORDER_LO 0
486static int slab_max_order = SLAB_MAX_ORDER_LO; 487static int slab_max_order = SLAB_MAX_ORDER_LO;
488static bool slab_max_order_set __initdata;
487 489
488/* 490/*
489 * Functions for storing/retrieving the cachep and or slab from the page 491 * Functions for storing/retrieving the cachep and or slab from the page
@@ -851,6 +853,17 @@ static int __init noaliencache_setup(char *s)
851} 853}
852__setup("noaliencache", noaliencache_setup); 854__setup("noaliencache", noaliencache_setup);
853 855
856static int __init slab_max_order_setup(char *str)
857{
858 get_option(&str, &slab_max_order);
859 slab_max_order = slab_max_order < 0 ? 0 :
860 min(slab_max_order, MAX_ORDER - 1);
861 slab_max_order_set = true;
862
863 return 1;
864}
865__setup("slab_max_order=", slab_max_order_setup);
866
854#ifdef CONFIG_NUMA 867#ifdef CONFIG_NUMA
855/* 868/*
856 * Special reaping functions for NUMA systems called from cache_reap(). 869 * Special reaping functions for NUMA systems called from cache_reap().
@@ -1499,9 +1512,10 @@ void __init kmem_cache_init(void)
1499 1512
1500 /* 1513 /*
1501 * Fragmentation resistance on low memory - only use bigger 1514 * Fragmentation resistance on low memory - only use bigger
1502 * page orders on machines with more than 32MB of memory. 1515 * page orders on machines with more than 32MB of memory if
1516 * not overridden on the command line.
1503 */ 1517 */
1504 if (totalram_pages > (32 << 20) >> PAGE_SHIFT) 1518 if (!slab_max_order_set && totalram_pages > (32 << 20) >> PAGE_SHIFT)
1505 slab_max_order = SLAB_MAX_ORDER_HI; 1519 slab_max_order = SLAB_MAX_ORDER_HI;
1506 1520
1507 /* Bootstrap is tricky, because several objects are allocated 1521 /* Bootstrap is tricky, because several objects are allocated