aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2008-04-28 05:12:05 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-28 11:58:17 -0400
commit488514d1798289f56f80ed018e246179fe500383 (patch)
treee68d9f703dcbceed0cc08b03014d109d7ec3acd4
parente92adcba261fd391591bb63c1703185a04a41554 (diff)
Remove set_migrateflags()
Migrate flags must be set on slab creation as agreed upon when the antifrag logic was reviewed. Otherwise some slabs of a slabcache will end up in the unmovable and others in the reclaimable section depending on which flag was active when a new slab page was allocated. This likely slid in somehow when antifrag was merged. Remove it. The buffer_heads are always allocated with __GFP_RECLAIMABLE because the SLAB_RECLAIM_ACCOUNT option is set. The set_migrateflags() never had any effect there. Radix tree allocations are not directly reclaimable but they are allocated with __GFP_RECLAIMABLE set on each allocation. We now set SLAB_RECLAIM_ACCOUNT on radix tree slab creation making sure that radix tree slabs are consistently placed in the reclaimable section. Radix tree slabs will also be accounted as such. There is then no user left of set_migratepages. So remove it. Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--fs/buffer.c3
-rw-r--r--include/linux/gfp.h6
-rw-r--r--lib/radix-tree.c9
3 files changed, 5 insertions, 13 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 39ff14403d13..8b9807523efe 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -3180,8 +3180,7 @@ static void recalc_bh_state(void)
3180 3180
3181struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) 3181struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3182{ 3182{
3183 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, 3183 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
3184 set_migrateflags(gfp_flags, __GFP_RECLAIMABLE));
3185 if (ret) { 3184 if (ret) {
3186 INIT_LIST_HEAD(&ret->b_assoc_buffers); 3185 INIT_LIST_HEAD(&ret->b_assoc_buffers);
3187 get_cpu_var(bh_accounting).nr++; 3186 get_cpu_var(bh_accounting).nr++;
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 164be9da3c1b..c17ba4945203 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -144,12 +144,6 @@ static inline enum zone_type gfp_zone(gfp_t flags)
144 return base + ZONE_NORMAL; 144 return base + ZONE_NORMAL;
145} 145}
146 146
147static inline gfp_t set_migrateflags(gfp_t gfp, gfp_t migrate_flags)
148{
149 BUG_ON((gfp & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
150 return (gfp & ~(GFP_MOVABLE_MASK)) | migrate_flags;
151}
152
153/* 147/*
154 * There is only one page-allocator function, and two main namespaces to 148 * There is only one page-allocator function, and two main namespaces to
155 * it. The alloc_page*() variants return 'struct page *' and as such 149 * it. The alloc_page*() variants return 'struct page *' and as such
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 65f0e758ec38..bd521716ab1a 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -114,8 +114,7 @@ radix_tree_node_alloc(struct radix_tree_root *root)
114 } 114 }
115 } 115 }
116 if (ret == NULL) 116 if (ret == NULL)
117 ret = kmem_cache_alloc(radix_tree_node_cachep, 117 ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
118 set_migrateflags(gfp_mask, __GFP_RECLAIMABLE));
119 118
120 BUG_ON(radix_tree_is_indirect_ptr(ret)); 119 BUG_ON(radix_tree_is_indirect_ptr(ret));
121 return ret; 120 return ret;
@@ -150,8 +149,7 @@ int radix_tree_preload(gfp_t gfp_mask)
150 rtp = &__get_cpu_var(radix_tree_preloads); 149 rtp = &__get_cpu_var(radix_tree_preloads);
151 while (rtp->nr < ARRAY_SIZE(rtp->nodes)) { 150 while (rtp->nr < ARRAY_SIZE(rtp->nodes)) {
152 preempt_enable(); 151 preempt_enable();
153 node = kmem_cache_alloc(radix_tree_node_cachep, 152 node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
154 set_migrateflags(gfp_mask, __GFP_RECLAIMABLE));
155 if (node == NULL) 153 if (node == NULL)
156 goto out; 154 goto out;
157 preempt_disable(); 155 preempt_disable();
@@ -1098,7 +1096,8 @@ void __init radix_tree_init(void)
1098{ 1096{
1099 radix_tree_node_cachep = kmem_cache_create("radix_tree_node", 1097 radix_tree_node_cachep = kmem_cache_create("radix_tree_node",
1100 sizeof(struct radix_tree_node), 0, 1098 sizeof(struct radix_tree_node), 0,
1101 SLAB_PANIC, radix_tree_node_ctor); 1099 SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
1100 radix_tree_node_ctor);
1102 radix_tree_init_maxindex(); 1101 radix_tree_init_maxindex();
1103 hotcpu_notifier(radix_tree_callback, 0); 1102 hotcpu_notifier(radix_tree_callback, 0);
1104} 1103}