aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-10-16 04:25:41 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:42:59 -0400
commit6cb062296f73e74768cca2f3eaf90deac54de02d (patch)
tree1572139653a6fc97cdffd06f2c1bfa650da2ce03 /mm
parent58c0a4a7864b2dad6da4090813322fcd29a11c92 (diff)
Categorize GFP flags
The function of GFP_LEVEL_MASK seems to be unclear. In order to clear up the mystery we get rid of it and replace GFP_LEVEL_MASK with 3 sets of GFP flags: GFP_RECLAIM_MASK Flags used to control page allocator reclaim behavior. GFP_CONSTRAINT_MASK Flags used to limit where allocations can occur. GFP_SLAB_BUG_MASK Flags that the slab allocator BUG()s on. These replace the uses of GFP_LEVEL mask in the slab allocators and in vmalloc.c. The use of the flags not included in these sets may occur as a result of a slab allocation standing in for a page allocation when constructing scatter gather lists. Extraneous flags are cleared and not passed through to the page allocator. __GFP_MOVABLE/RECLAIMABLE, __GFP_COLD and __GFP_COMP will now be ignored if passed to a slab allocator. Change the allocation of allocator meta data in SLAB and vmalloc to not pass through flags listed in GFP_CONSTRAINT_MASK. SLAB already removes the __GFP_THISNODE flag for such allocations. Generalize that to also cover vmalloc. The use of GFP_CONSTRAINT_MASK also includes __GFP_HARDWALL. The impact of allocator metadata placement on access latency to the cachelines of the object itself is minimal since metadata is only referenced on alloc and free. The attempt is still made to place the meta data optimally but we consistently allow fallback both in SLAB and vmalloc (SLUB does not need to allocate metadata like that). Allocator metadata may serve multiple in kernel users and thus should not be subject to the limitations arising from a single allocation context. [akpm@linux-foundation.org: fix fallback_alloc()] Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c8
-rw-r--r--mm/slub.c5
-rw-r--r--mm/vmalloc.c5
3 files changed, 10 insertions, 8 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 368a47d80eaf..8fb56ae685de 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2746,9 +2746,9 @@ static int cache_grow(struct kmem_cache *cachep,
2746 * Be lazy and only check for valid flags here, keeping it out of the 2746 * Be lazy and only check for valid flags here, keeping it out of the
2747 * critical path in kmem_cache_alloc(). 2747 * critical path in kmem_cache_alloc().
2748 */ 2748 */
2749 BUG_ON(flags & ~(GFP_DMA | __GFP_ZERO | GFP_LEVEL_MASK)); 2749 BUG_ON(flags & GFP_SLAB_BUG_MASK);
2750 local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
2750 2751
2751 local_flags = (flags & GFP_LEVEL_MASK);
2752 /* Take the l3 list lock to change the colour_next on this node */ 2752 /* Take the l3 list lock to change the colour_next on this node */
2753 check_irq_off(); 2753 check_irq_off();
2754 l3 = cachep->nodelists[nodeid]; 2754 l3 = cachep->nodelists[nodeid];
@@ -2785,7 +2785,7 @@ static int cache_grow(struct kmem_cache *cachep,
2785 2785
2786 /* Get slab management. */ 2786 /* Get slab management. */
2787 slabp = alloc_slabmgmt(cachep, objp, offset, 2787 slabp = alloc_slabmgmt(cachep, objp, offset,
2788 local_flags & ~GFP_THISNODE, nodeid); 2788 local_flags & ~GFP_CONSTRAINT_MASK, nodeid);
2789 if (!slabp) 2789 if (!slabp)
2790 goto opps1; 2790 goto opps1;
2791 2791
@@ -3225,7 +3225,7 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3225 3225
3226 zonelist = &NODE_DATA(slab_node(current->mempolicy)) 3226 zonelist = &NODE_DATA(slab_node(current->mempolicy))
3227 ->node_zonelists[gfp_zone(flags)]; 3227 ->node_zonelists[gfp_zone(flags)];
3228 local_flags = (flags & GFP_LEVEL_MASK); 3228 local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
3229 3229
3230retry: 3230retry:
3231 /* 3231 /*
diff --git a/mm/slub.c b/mm/slub.c
index 968ce3776e08..19d3202ca2dc 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1088,12 +1088,13 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1088 void *last; 1088 void *last;
1089 void *p; 1089 void *p;
1090 1090
1091 BUG_ON(flags & ~(GFP_DMA | __GFP_ZERO | GFP_LEVEL_MASK)); 1091 BUG_ON(flags & GFP_SLAB_BUG_MASK);
1092 1092
1093 if (flags & __GFP_WAIT) 1093 if (flags & __GFP_WAIT)
1094 local_irq_enable(); 1094 local_irq_enable();
1095 1095
1096 page = allocate_slab(s, flags & GFP_LEVEL_MASK, node); 1096 page = allocate_slab(s,
1097 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
1097 if (!page) 1098 if (!page)
1098 goto out; 1099 goto out;
1099 1100
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 3cee76a8c9f0..2e01af365848 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -190,7 +190,8 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long fl
190 if (unlikely(!size)) 190 if (unlikely(!size))
191 return NULL; 191 return NULL;
192 192
193 area = kmalloc_node(sizeof(*area), gfp_mask & GFP_LEVEL_MASK, node); 193 area = kmalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
194
194 if (unlikely(!area)) 195 if (unlikely(!area))
195 return NULL; 196 return NULL;
196 197
@@ -439,7 +440,7 @@ void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
439 area->flags |= VM_VPAGES; 440 area->flags |= VM_VPAGES;
440 } else { 441 } else {
441 pages = kmalloc_node(array_size, 442 pages = kmalloc_node(array_size,
442 (gfp_mask & GFP_LEVEL_MASK) | __GFP_ZERO, 443 (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO,
443 node); 444 node);
444 } 445 }
445 area->pages = pages; 446 area->pages = pages;