aboutsummaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2008-02-05 01:28:36 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-05 12:44:14 -0500
commitb98938c373117043598002f197200d7ed08acd49 (patch)
treeda9356af43085db78cafec82505c3341d6291201 /fs/buffer.c
parentaec2c3ed01ed54d0cdf7f6b7c4be217c045ac5ea (diff)
bufferhead: revert constructor removal
The constructor for buffer_head slabs was removed recently. We need the constructor back in slab defrag in order to insure that slab objects always have a definite state even before we allocated them. I think we mistakenly merged the removal of the constuctor into a cleanup patch. You (ie: akpm) had a test that showed that the removal of the constructor led to a small regression. The prior state makes things easier for slab defrag. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c18
1 files changed, 15 insertions, 3 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 1de921484eac..826baf4f04bc 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -3153,7 +3153,7 @@ static void recalc_bh_state(void)
3153 3153
3154struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) 3154struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3155{ 3155{
3156 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, 3156 struct buffer_head *ret = kmem_cache_alloc(bh_cachep,
3157 set_migrateflags(gfp_flags, __GFP_RECLAIMABLE)); 3157 set_migrateflags(gfp_flags, __GFP_RECLAIMABLE));
3158 if (ret) { 3158 if (ret) {
3159 INIT_LIST_HEAD(&ret->b_assoc_buffers); 3159 INIT_LIST_HEAD(&ret->b_assoc_buffers);
@@ -3241,12 +3241,24 @@ int bh_submit_read(struct buffer_head *bh)
3241} 3241}
3242EXPORT_SYMBOL(bh_submit_read); 3242EXPORT_SYMBOL(bh_submit_read);
3243 3243
3244static void
3245init_buffer_head(struct kmem_cache *cachep, void *data)
3246{
3247 struct buffer_head *bh = data;
3248
3249 memset(bh, 0, sizeof(*bh));
3250 INIT_LIST_HEAD(&bh->b_assoc_buffers);
3251}
3252
3244void __init buffer_init(void) 3253void __init buffer_init(void)
3245{ 3254{
3246 int nrpages; 3255 int nrpages;
3247 3256
3248 bh_cachep = KMEM_CACHE(buffer_head, 3257 bh_cachep = kmem_cache_create("buffer_head",
3249 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD); 3258 sizeof(struct buffer_head), 0,
3259 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3260 SLAB_MEM_SPREAD),
3261 init_buffer_head);
3250 3262
3251 /* 3263 /*
3252 * Limit the bh occupancy to 10% of ZONE_NORMAL 3264 * Limit the bh occupancy to 10% of ZONE_NORMAL