aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/buffer.c3
-rw-r--r--fs/dcache.c2
-rw-r--r--fs/jbd/journal.c4
-rw-r--r--fs/jbd/revoke.c6
-rw-r--r--fs/proc/base.c13
-rw-r--r--fs/proc/generic.c2
-rw-r--r--include/linux/gfp.h15
-rw-r--r--include/linux/mmzone.h6
-rw-r--r--include/linux/pageblock-flags.h2
-rw-r--r--include/linux/slab.h4
-rw-r--r--kernel/cpuset.c2
-rw-r--r--lib/radix-tree.c6
-rw-r--r--mm/page_alloc.c10
-rw-r--r--mm/shmem.c4
-rw-r--r--mm/slab.c2
-rw-r--r--mm/slub.c3
16 files changed, 56 insertions, 28 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index a406cfd89e3b..faceb5eecca9 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -3169,7 +3169,8 @@ static void recalc_bh_state(void)
3169 3169
3170struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) 3170struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3171{ 3171{
3172 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); 3172 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep,
3173 set_migrateflags(gfp_flags, __GFP_RECLAIMABLE));
3173 if (ret) { 3174 if (ret) {
3174 INIT_LIST_HEAD(&ret->b_assoc_buffers); 3175 INIT_LIST_HEAD(&ret->b_assoc_buffers);
3175 get_cpu_var(bh_accounting).nr++; 3176 get_cpu_var(bh_accounting).nr++;
diff --git a/fs/dcache.c b/fs/dcache.c
index 678d39deb607..7da0cf50873e 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -903,7 +903,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
903 struct dentry *dentry; 903 struct dentry *dentry;
904 char *dname; 904 char *dname;
905 905
906 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL); 906 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
907 if (!dentry) 907 if (!dentry)
908 return NULL; 908 return NULL;
909 909
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index 06ab3c10b1b8..a6be78c05dce 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -1710,7 +1710,7 @@ static int journal_init_journal_head_cache(void)
1710 journal_head_cache = kmem_cache_create("journal_head", 1710 journal_head_cache = kmem_cache_create("journal_head",
1711 sizeof(struct journal_head), 1711 sizeof(struct journal_head),
1712 0, /* offset */ 1712 0, /* offset */
1713 0, /* flags */ 1713 SLAB_TEMPORARY, /* flags */
1714 NULL); /* ctor */ 1714 NULL); /* ctor */
1715 retval = 0; 1715 retval = 0;
1716 if (journal_head_cache == 0) { 1716 if (journal_head_cache == 0) {
@@ -2006,7 +2006,7 @@ static int __init journal_init_handle_cache(void)
2006 jbd_handle_cache = kmem_cache_create("journal_handle", 2006 jbd_handle_cache = kmem_cache_create("journal_handle",
2007 sizeof(handle_t), 2007 sizeof(handle_t),
2008 0, /* offset */ 2008 0, /* offset */
2009 0, /* flags */ 2009 SLAB_TEMPORARY, /* flags */
2010 NULL); /* ctor */ 2010 NULL); /* ctor */
2011 if (jbd_handle_cache == NULL) { 2011 if (jbd_handle_cache == NULL) {
2012 printk(KERN_EMERG "JBD: failed to create handle cache\n"); 2012 printk(KERN_EMERG "JBD: failed to create handle cache\n");
diff --git a/fs/jbd/revoke.c b/fs/jbd/revoke.c
index 62e13c8db132..ad2eacf570c6 100644
--- a/fs/jbd/revoke.c
+++ b/fs/jbd/revoke.c
@@ -170,13 +170,15 @@ int __init journal_init_revoke_caches(void)
170{ 170{
171 revoke_record_cache = kmem_cache_create("revoke_record", 171 revoke_record_cache = kmem_cache_create("revoke_record",
172 sizeof(struct jbd_revoke_record_s), 172 sizeof(struct jbd_revoke_record_s),
173 0, SLAB_HWCACHE_ALIGN, NULL); 173 0,
174 SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY,
175 NULL);
174 if (revoke_record_cache == 0) 176 if (revoke_record_cache == 0)
175 return -ENOMEM; 177 return -ENOMEM;
176 178
177 revoke_table_cache = kmem_cache_create("revoke_table", 179 revoke_table_cache = kmem_cache_create("revoke_table",
178 sizeof(struct jbd_revoke_table_s), 180 sizeof(struct jbd_revoke_table_s),
179 0, 0, NULL); 181 0, SLAB_TEMPORARY, NULL);
180 if (revoke_table_cache == 0) { 182 if (revoke_table_cache == 0) {
181 kmem_cache_destroy(revoke_record_cache); 183 kmem_cache_destroy(revoke_record_cache);
182 revoke_record_cache = NULL; 184 revoke_record_cache = NULL;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index e5d0953d4db1..78fdfea1a7f8 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -492,7 +492,7 @@ static ssize_t proc_info_read(struct file * file, char __user * buf,
492 count = PROC_BLOCK_SIZE; 492 count = PROC_BLOCK_SIZE;
493 493
494 length = -ENOMEM; 494 length = -ENOMEM;
495 if (!(page = __get_free_page(GFP_KERNEL))) 495 if (!(page = __get_free_page(GFP_TEMPORARY)))
496 goto out; 496 goto out;
497 497
498 length = PROC_I(inode)->op.proc_read(task, (char*)page); 498 length = PROC_I(inode)->op.proc_read(task, (char*)page);
@@ -532,7 +532,7 @@ static ssize_t mem_read(struct file * file, char __user * buf,
532 goto out; 532 goto out;
533 533
534 ret = -ENOMEM; 534 ret = -ENOMEM;
535 page = (char *)__get_free_page(GFP_USER); 535 page = (char *)__get_free_page(GFP_TEMPORARY);
536 if (!page) 536 if (!page)
537 goto out; 537 goto out;
538 538
@@ -602,7 +602,7 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
602 goto out; 602 goto out;
603 603
604 copied = -ENOMEM; 604 copied = -ENOMEM;
605 page = (char *)__get_free_page(GFP_USER); 605 page = (char *)__get_free_page(GFP_TEMPORARY);
606 if (!page) 606 if (!page)
607 goto out; 607 goto out;
608 608
@@ -788,7 +788,7 @@ static ssize_t proc_loginuid_write(struct file * file, const char __user * buf,
788 /* No partial writes. */ 788 /* No partial writes. */
789 return -EINVAL; 789 return -EINVAL;
790 } 790 }
791 page = (char*)__get_free_page(GFP_USER); 791 page = (char*)__get_free_page(GFP_TEMPORARY);
792 if (!page) 792 if (!page)
793 return -ENOMEM; 793 return -ENOMEM;
794 length = -EFAULT; 794 length = -EFAULT;
@@ -954,7 +954,8 @@ static int do_proc_readlink(struct dentry *dentry, struct vfsmount *mnt,
954 char __user *buffer, int buflen) 954 char __user *buffer, int buflen)
955{ 955{
956 struct inode * inode; 956 struct inode * inode;
957 char *tmp = (char*)__get_free_page(GFP_KERNEL), *path; 957 char *tmp = (char*)__get_free_page(GFP_TEMPORARY);
958 char *path;
958 int len; 959 int len;
959 960
960 if (!tmp) 961 if (!tmp)
@@ -1726,7 +1727,7 @@ static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
1726 goto out; 1727 goto out;
1727 1728
1728 length = -ENOMEM; 1729 length = -ENOMEM;
1729 page = (char*)__get_free_page(GFP_USER); 1730 page = (char*)__get_free_page(GFP_TEMPORARY);
1730 if (!page) 1731 if (!page)
1731 goto out; 1732 goto out;
1732 1733
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index b5e7155d30d8..1bdb62435758 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -74,7 +74,7 @@ proc_file_read(struct file *file, char __user *buf, size_t nbytes,
74 nbytes = MAX_NON_LFS - pos; 74 nbytes = MAX_NON_LFS - pos;
75 75
76 dp = PDE(inode); 76 dp = PDE(inode);
77 if (!(page = (char*) __get_free_page(GFP_KERNEL))) 77 if (!(page = (char*) __get_free_page(GFP_TEMPORARY)))
78 return -ENOMEM; 78 return -ENOMEM;
79 79
80 while ((nbytes > 0) && !eof) { 80 while ((nbytes > 0) && !eof) {
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index da8aa872eb6e..f8ffcd401c5f 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -48,9 +48,10 @@ struct vm_area_struct;
48#define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */ 48#define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */
49#define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */ 49#define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */
50#define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */ 50#define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */
51#define __GFP_MOVABLE ((__force gfp_t)0x80000u) /* Page is movable */ 51#define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */
52#define __GFP_MOVABLE ((__force gfp_t)0x100000u) /* Page is movable */
52 53
53#define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */ 54#define __GFP_BITS_SHIFT 21 /* Room for 21 __GFP_FOO bits */
54#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) 55#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
55 56
56/* This equals 0, but use constants in case they ever change */ 57/* This equals 0, but use constants in case they ever change */
@@ -60,6 +61,8 @@ struct vm_area_struct;
60#define GFP_NOIO (__GFP_WAIT) 61#define GFP_NOIO (__GFP_WAIT)
61#define GFP_NOFS (__GFP_WAIT | __GFP_IO) 62#define GFP_NOFS (__GFP_WAIT | __GFP_IO)
62#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS) 63#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS)
64#define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \
65 __GFP_RECLAIMABLE)
63#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL) 66#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
64#define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \ 67#define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \
65 __GFP_HIGHMEM) 68 __GFP_HIGHMEM)
@@ -80,7 +83,7 @@ struct vm_area_struct;
80#endif 83#endif
81 84
82/* This mask makes up all the page movable related flags */ 85/* This mask makes up all the page movable related flags */
83#define GFP_MOVABLE_MASK (__GFP_MOVABLE) 86#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
84 87
85/* Control page allocator reclaim behavior */ 88/* Control page allocator reclaim behavior */
86#define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\ 89#define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\
@@ -129,6 +132,12 @@ static inline enum zone_type gfp_zone(gfp_t flags)
129 return base + ZONE_NORMAL; 132 return base + ZONE_NORMAL;
130} 133}
131 134
135static inline gfp_t set_migrateflags(gfp_t gfp, gfp_t migrate_flags)
136{
137 BUG_ON((gfp & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
138 return (gfp & ~(GFP_MOVABLE_MASK)) | migrate_flags;
139}
140
132/* 141/*
133 * There is only one page-allocator function, and two main namespaces to 142 * There is only one page-allocator function, and two main namespaces to
134 * it. The alloc_page*() variants return 'struct page *' and as such 143 * it. The alloc_page*() variants return 'struct page *' and as such
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 7d7e4fe0fda8..4721e9aa3ced 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -35,10 +35,12 @@
35 35
36#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY 36#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
37#define MIGRATE_UNMOVABLE 0 37#define MIGRATE_UNMOVABLE 0
38#define MIGRATE_MOVABLE 1 38#define MIGRATE_RECLAIMABLE 1
39#define MIGRATE_TYPES 2 39#define MIGRATE_MOVABLE 2
40#define MIGRATE_TYPES 3
40#else 41#else
41#define MIGRATE_UNMOVABLE 0 42#define MIGRATE_UNMOVABLE 0
43#define MIGRATE_UNRECLAIMABLE 0
42#define MIGRATE_MOVABLE 0 44#define MIGRATE_MOVABLE 0
43#define MIGRATE_TYPES 1 45#define MIGRATE_TYPES 1
44#endif 46#endif
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
index 3619d52a425c..5456da6b4ade 100644
--- a/include/linux/pageblock-flags.h
+++ b/include/linux/pageblock-flags.h
@@ -31,7 +31,7 @@
31 31
32/* Bit indices that affect a whole block of pages */ 32/* Bit indices that affect a whole block of pages */
33enum pageblock_bits { 33enum pageblock_bits {
34 PB_range(PB_migrate, 1), /* 1 bit required for migrate types */ 34 PB_range(PB_migrate, 2), /* 2 bits required for migrate types */
35 NR_PAGEBLOCK_BITS 35 NR_PAGEBLOCK_BITS
36}; 36};
37 37
diff --git a/include/linux/slab.h b/include/linux/slab.h
index d859354b9e51..3a5bad3ad126 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -24,12 +24,14 @@
24#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ 24#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
25#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */ 25#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
26#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */ 26#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
27#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
28#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */ 27#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */
29#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ 28#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
30#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ 29#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
31#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ 30#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */
32 31
32/* The following flags affect the page allocator grouping pages by mobility */
33#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
34#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
33/* 35/*
34 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. 36 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
35 * 37 *
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 8b2daac4de83..e196510aa40f 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1463,7 +1463,7 @@ static ssize_t cpuset_common_file_read(struct file *file, char __user *buf,
1463 ssize_t retval = 0; 1463 ssize_t retval = 0;
1464 char *s; 1464 char *s;
1465 1465
1466 if (!(page = (char *)__get_free_page(GFP_KERNEL))) 1466 if (!(page = (char *)__get_free_page(GFP_TEMPORARY)))
1467 return -ENOMEM; 1467 return -ENOMEM;
1468 1468
1469 s = page; 1469 s = page;
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 519d3f00ef9e..6b26f9d39800 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -98,7 +98,8 @@ radix_tree_node_alloc(struct radix_tree_root *root)
98 struct radix_tree_node *ret; 98 struct radix_tree_node *ret;
99 gfp_t gfp_mask = root_gfp_mask(root); 99 gfp_t gfp_mask = root_gfp_mask(root);
100 100
101 ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); 101 ret = kmem_cache_alloc(radix_tree_node_cachep,
102 set_migrateflags(gfp_mask, __GFP_RECLAIMABLE));
102 if (ret == NULL && !(gfp_mask & __GFP_WAIT)) { 103 if (ret == NULL && !(gfp_mask & __GFP_WAIT)) {
103 struct radix_tree_preload *rtp; 104 struct radix_tree_preload *rtp;
104 105
@@ -142,7 +143,8 @@ int radix_tree_preload(gfp_t gfp_mask)
142 rtp = &__get_cpu_var(radix_tree_preloads); 143 rtp = &__get_cpu_var(radix_tree_preloads);
143 while (rtp->nr < ARRAY_SIZE(rtp->nodes)) { 144 while (rtp->nr < ARRAY_SIZE(rtp->nodes)) {
144 preempt_enable(); 145 preempt_enable();
145 node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); 146 node = kmem_cache_alloc(radix_tree_node_cachep,
147 set_migrateflags(gfp_mask, __GFP_RECLAIMABLE));
146 if (node == NULL) 148 if (node == NULL)
147 goto out; 149 goto out;
148 preempt_disable(); 150 preempt_disable();
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d575a3ee8dd8..29f4de1423c9 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -172,7 +172,10 @@ static void set_pageblock_migratetype(struct page *page, int migratetype)
172 172
173static inline int gfpflags_to_migratetype(gfp_t gfp_flags) 173static inline int gfpflags_to_migratetype(gfp_t gfp_flags)
174{ 174{
175 return ((gfp_flags & __GFP_MOVABLE) != 0); 175 WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
176
177 return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) |
178 ((gfp_flags & __GFP_RECLAIMABLE) != 0);
176} 179}
177 180
178#else 181#else
@@ -676,8 +679,9 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
676 * the free lists for the desirable migrate type are depleted 679 * the free lists for the desirable migrate type are depleted
677 */ 680 */
678static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = { 681static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
679 [MIGRATE_UNMOVABLE] = { MIGRATE_MOVABLE }, 682 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE },
680 [MIGRATE_MOVABLE] = { MIGRATE_UNMOVABLE }, 683 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE },
684 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE },
681}; 685};
682 686
683/* 687/*
diff --git a/mm/shmem.c b/mm/shmem.c
index 855b93b3637c..76ecbac0d55b 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -95,9 +95,9 @@ static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
95 * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE: 95 * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
96 * might be reconsidered if it ever diverges from PAGE_SIZE. 96 * might be reconsidered if it ever diverges from PAGE_SIZE.
97 * 97 *
98 * __GFP_MOVABLE is masked out as swap vectors cannot move 98 * Mobility flags are masked out as swap vectors cannot move
99 */ 99 */
100 return alloc_pages((gfp_mask & ~__GFP_MOVABLE) | __GFP_ZERO, 100 return alloc_pages((gfp_mask & ~GFP_MOVABLE_MASK) | __GFP_ZERO,
101 PAGE_CACHE_SHIFT-PAGE_SHIFT); 101 PAGE_CACHE_SHIFT-PAGE_SHIFT);
102} 102}
103 103
diff --git a/mm/slab.c b/mm/slab.c
index 8fb56ae685de..e34bcb87a6ee 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1643,6 +1643,8 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
1643#endif 1643#endif
1644 1644
1645 flags |= cachep->gfpflags; 1645 flags |= cachep->gfpflags;
1646 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1647 flags |= __GFP_RECLAIMABLE;
1646 1648
1647 page = alloc_pages_node(nodeid, flags, cachep->gfporder); 1649 page = alloc_pages_node(nodeid, flags, cachep->gfporder);
1648 if (!page) 1650 if (!page)
diff --git a/mm/slub.c b/mm/slub.c
index 19d3202ca2dc..a90c4ffc9576 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1055,6 +1055,9 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1055 if (s->flags & SLAB_CACHE_DMA) 1055 if (s->flags & SLAB_CACHE_DMA)
1056 flags |= SLUB_DMA; 1056 flags |= SLUB_DMA;
1057 1057
1058 if (s->flags & SLAB_RECLAIM_ACCOUNT)
1059 flags |= __GFP_RECLAIMABLE;
1060
1058 if (node == -1) 1061 if (node == -1)
1059 page = alloc_pages(flags, s->order); 1062 page = alloc_pages(flags, s->order);
1060 else 1063 else