aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/slab.c20
1 files changed, 9 insertions, 11 deletions
diff --git a/mm/slab.c b/mm/slab.c
index f2e92dc1c9ce..6ad6bd5a0b3e 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1648,6 +1648,14 @@ static inline size_t calculate_slab_order(struct kmem_cache *cachep,
1648 left_over = remainder; 1648 left_over = remainder;
1649 1649
1650 /* 1650 /*
1651 * A VFS-reclaimable slab tends to have most allocations
1652 * as GFP_NOFS and we really don't want to have to be allocating
1653 * higher-order pages when we are unable to shrink dcache.
1654 */
1655 if (flags & SLAB_RECLAIM_ACCOUNT)
1656 break;
1657
1658 /*
1651 * Large number of objects is good, but very large slabs are 1659 * Large number of objects is good, but very large slabs are
1652 * currently bad for the gfp()s. 1660 * currently bad for the gfp()s.
1653 */ 1661 */
@@ -1869,17 +1877,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
1869 1877
1870 size = ALIGN(size, align); 1878 size = ALIGN(size, align);
1871 1879
1872 if ((flags & SLAB_RECLAIM_ACCOUNT) && size <= PAGE_SIZE) { 1880 left_over = calculate_slab_order(cachep, size, align, flags);
1873 /*
1874 * A VFS-reclaimable slab tends to have most allocations
1875 * as GFP_NOFS and we really don't want to have to be allocating
1876 * higher-order pages when we are unable to shrink dcache.
1877 */
1878 cachep->gfporder = 0;
1879 cache_estimate(cachep->gfporder, size, align, flags,
1880 &left_over, &cachep->num);
1881 } else
1882 left_over = calculate_slab_order(cachep, size, align, flags);
1883 1881
1884 if (!cachep->num) { 1882 if (!cachep->num) {
1885 printk("kmem_cache_create: couldn't create cache %s.\n", name); 1883 printk("kmem_cache_create: couldn't create cache %s.\n", name);