aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c23
1 files changed, 2 insertions, 21 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 06533f342be0..6572cef0c43c 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -149,25 +149,6 @@ static inline void ClearSlabDebug(struct page *page)
149/* Enable to test recovery from slab corruption on boot */ 149/* Enable to test recovery from slab corruption on boot */
150#undef SLUB_RESILIENCY_TEST 150#undef SLUB_RESILIENCY_TEST
151 151
152#if PAGE_SHIFT <= 12
153
154/*
155 * Small page size. Make sure that we do not fragment memory
156 */
157#define DEFAULT_MAX_ORDER 1
158#define DEFAULT_MIN_OBJECTS 4
159
160#else
161
162/*
163 * Large page machines are customarily able to handle larger
164 * page orders.
165 */
166#define DEFAULT_MAX_ORDER 2
167#define DEFAULT_MIN_OBJECTS 8
168
169#endif
170
171/* 152/*
172 * Mininum number of partial slabs. These will be left on the partial 153 * Mininum number of partial slabs. These will be left on the partial
173 * lists even if they are empty. kmem_cache_shrink may reclaim them. 154 * lists even if they are empty. kmem_cache_shrink may reclaim them.
@@ -1821,8 +1802,8 @@ static struct page *get_object_page(const void *x)
1821 * take the list_lock. 1802 * take the list_lock.
1822 */ 1803 */
1823static int slub_min_order; 1804static int slub_min_order;
1824static int slub_max_order = DEFAULT_MAX_ORDER; 1805static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
1825static int slub_min_objects = DEFAULT_MIN_OBJECTS; 1806static int slub_min_objects = 4;
1826 1807
1827/* 1808/*
1828 * Merge control. If this is set then no merging of slab caches will occur. 1809 * Merge control. If this is set then no merging of slab caches will occur.