aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-04-13 16:28:13 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-13 16:28:13 -0400
commitbf3a340738bc78008e496257c04fb5a7fc8281e6 (patch)
tree3e84d21261ff0c437f0ea2507df8c30844150769 /mm/slub.c
parent321d03c86732e45f5f33ad0db5b68e2e1364acb9 (diff)
parent34bf6ef94a835a8f1d8abd3e7d38c6c08d205867 (diff)
Merge branch 'slab/next' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux
Pull slab changes from Pekka Enberg: "The biggest change is byte-sized freelist indices which reduces slab freelist memory usage: https://lkml.org/lkml/2013/12/2/64" * 'slab/next' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux: mm: slab/slub: use page->list consistently instead of page->lru mm/slab.c: cleanup outdated comments and unify variables naming slab: fix wrongly used macro slub: fix high order page allocation problem with __GFP_NOFAIL slab: Make allocations with GFP_ZERO slightly more efficient slab: make more slab management structure off the slab slab: introduce byte sized index for the freelist of a slab slab: restrict the number of objects in a slab slab: introduce helper functions to get/set free object slab: factor out calculate nr objects in cache_estimate
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/mm/slub.c b/mm/slub.c
index f620bbf4054a..5e234f1f8853 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1352,11 +1352,12 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1352 page = alloc_slab_page(alloc_gfp, node, oo); 1352 page = alloc_slab_page(alloc_gfp, node, oo);
1353 if (unlikely(!page)) { 1353 if (unlikely(!page)) {
1354 oo = s->min; 1354 oo = s->min;
1355 alloc_gfp = flags;
1355 /* 1356 /*
1356 * Allocation may have failed due to fragmentation. 1357 * Allocation may have failed due to fragmentation.
1357 * Try a lower order alloc if possible 1358 * Try a lower order alloc if possible
1358 */ 1359 */
1359 page = alloc_slab_page(flags, node, oo); 1360 page = alloc_slab_page(alloc_gfp, node, oo);
1360 1361
1361 if (page) 1362 if (page)
1362 stat(s, ORDER_FALLBACK); 1363 stat(s, ORDER_FALLBACK);
@@ -1366,7 +1367,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1366 && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) { 1367 && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
1367 int pages = 1 << oo_order(oo); 1368 int pages = 1 << oo_order(oo);
1368 1369
1369 kmemcheck_alloc_shadow(page, oo_order(oo), flags, node); 1370 kmemcheck_alloc_shadow(page, oo_order(oo), alloc_gfp, node);
1370 1371
1371 /* 1372 /*
1372 * Objects from caches that have a constructor don't get 1373 * Objects from caches that have a constructor don't get