aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-05-06 17:50:17 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-07 15:12:57 -0400
commitcfce66047f1893cb7d3abb0d53e65cbbd8d605f0 (patch)
treeb6e533a6b3deee686c42abf6c9117154548c0aaf
parent4f104934591ed98534b3a4c3d17d972b790e9c42 (diff)
Slab allocators: remove useless __GFP_NO_GROW flag
There is no user remaining and I have never seen any use of that flag. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/gfp.h3
-rw-r--r--mm/slab.c6
-rw-r--r--mm/slub.c3
3 files changed, 3 insertions, 9 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 2a7d15bcde46..97a36c3d96e2 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -40,7 +40,6 @@ struct vm_area_struct;
40#define __GFP_REPEAT ((__force gfp_t)0x400u) /* Retry the allocation. Might fail */ 40#define __GFP_REPEAT ((__force gfp_t)0x400u) /* Retry the allocation. Might fail */
41#define __GFP_NOFAIL ((__force gfp_t)0x800u) /* Retry for ever. Cannot fail */ 41#define __GFP_NOFAIL ((__force gfp_t)0x800u) /* Retry for ever. Cannot fail */
42#define __GFP_NORETRY ((__force gfp_t)0x1000u)/* Do not retry. Might fail */ 42#define __GFP_NORETRY ((__force gfp_t)0x1000u)/* Do not retry. Might fail */
43#define __GFP_NO_GROW ((__force gfp_t)0x2000u)/* Slab internal usage */
44#define __GFP_COMP ((__force gfp_t)0x4000u)/* Add compound page metadata */ 43#define __GFP_COMP ((__force gfp_t)0x4000u)/* Add compound page metadata */
45#define __GFP_ZERO ((__force gfp_t)0x8000u)/* Return zeroed page on success */ 44#define __GFP_ZERO ((__force gfp_t)0x8000u)/* Return zeroed page on success */
46#define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */ 45#define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */
@@ -53,7 +52,7 @@ struct vm_area_struct;
53/* if you forget to add the bitmask here kernel will crash, period */ 52/* if you forget to add the bitmask here kernel will crash, period */
54#define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \ 53#define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \
55 __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \ 54 __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \
56 __GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP| \ 55 __GFP_NOFAIL|__GFP_NORETRY|__GFP_COMP| \
57 __GFP_NOMEMALLOC|__GFP_HARDWALL|__GFP_THISNODE) 56 __GFP_NOMEMALLOC|__GFP_HARDWALL|__GFP_THISNODE)
58 57
59/* This equals 0, but use constants in case they ever change */ 58/* This equals 0, but use constants in case they ever change */
diff --git a/mm/slab.c b/mm/slab.c
index 52ecf7599a7b..5920a412b377 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2746,9 +2746,7 @@ static int cache_grow(struct kmem_cache *cachep,
2746 * Be lazy and only check for valid flags here, keeping it out of the 2746 * Be lazy and only check for valid flags here, keeping it out of the
2747 * critical path in kmem_cache_alloc(). 2747 * critical path in kmem_cache_alloc().
2748 */ 2748 */
2749 BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK | __GFP_NO_GROW)); 2749 BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK));
2750 if (flags & __GFP_NO_GROW)
2751 return 0;
2752 2750
2753 ctor_flags = SLAB_CTOR_CONSTRUCTOR; 2751 ctor_flags = SLAB_CTOR_CONSTRUCTOR;
2754 local_flags = (flags & GFP_LEVEL_MASK); 2752 local_flags = (flags & GFP_LEVEL_MASK);
@@ -3252,7 +3250,7 @@ retry:
3252 flags | GFP_THISNODE, nid); 3250 flags | GFP_THISNODE, nid);
3253 } 3251 }
3254 3252
3255 if (!obj && !(flags & __GFP_NO_GROW)) { 3253 if (!obj) {
3256 /* 3254 /*
3257 * This allocation will be performed within the constraints 3255 * This allocation will be performed within the constraints
3258 * of the current cpuset / memory policy requirements. 3256 * of the current cpuset / memory policy requirements.
diff --git a/mm/slub.c b/mm/slub.c
index 347e44821bcb..a6323484dd3e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -815,9 +815,6 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
815 void *last; 815 void *last;
816 void *p; 816 void *p;
817 817
818 if (flags & __GFP_NO_GROW)
819 return NULL;
820
821 BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK)); 818 BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK));
822 819
823 if (flags & __GFP_WAIT) 820 if (flags & __GFP_WAIT)