aboutsummaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c41
1 files changed, 12 insertions, 29 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 6fa530256bfd..e8aa7081d25c 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -275,6 +275,7 @@ void invalidate_bdev(struct block_device *bdev)
275 return; 275 return;
276 276
277 invalidate_bh_lrus(); 277 invalidate_bh_lrus();
278 lru_add_drain_all(); /* make sure all lru add caches are flushed */
278 invalidate_mapping_pages(mapping, 0, -1); 279 invalidate_mapping_pages(mapping, 0, -1);
279} 280}
280EXPORT_SYMBOL(invalidate_bdev); 281EXPORT_SYMBOL(invalidate_bdev);
@@ -560,26 +561,17 @@ repeat:
560 return err; 561 return err;
561} 562}
562 563
563static void do_thaw_all(struct work_struct *work) 564static void do_thaw_one(struct super_block *sb, void *unused)
564{ 565{
565 struct super_block *sb;
566 char b[BDEVNAME_SIZE]; 566 char b[BDEVNAME_SIZE];
567 while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
568 printk(KERN_WARNING "Emergency Thaw on %s\n",
569 bdevname(sb->s_bdev, b));
570}
567 571
568 spin_lock(&sb_lock); 572static void do_thaw_all(struct work_struct *work)
569restart: 573{
570 list_for_each_entry(sb, &super_blocks, s_list) { 574 iterate_supers(do_thaw_one, NULL);
571 sb->s_count++;
572 spin_unlock(&sb_lock);
573 down_read(&sb->s_umount);
574 while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
575 printk(KERN_WARNING "Emergency Thaw on %s\n",
576 bdevname(sb->s_bdev, b));
577 up_read(&sb->s_umount);
578 spin_lock(&sb_lock);
579 if (__put_super_and_need_restart(sb))
580 goto restart;
581 }
582 spin_unlock(&sb_lock);
583 kfree(work); 575 kfree(work);
584 printk(KERN_WARNING "Emergency Thaw complete\n"); 576 printk(KERN_WARNING "Emergency Thaw complete\n");
585} 577}
@@ -2893,7 +2885,7 @@ int block_write_full_page_endio(struct page *page, get_block_t *get_block,
2893 2885
2894 /* 2886 /*
2895 * The page straddles i_size. It must be zeroed out on each and every 2887 * The page straddles i_size. It must be zeroed out on each and every
2896 * writepage invokation because it may be mmapped. "A file is mapped 2888 * writepage invocation because it may be mmapped. "A file is mapped
2897 * in multiples of the page size. For a file that is not a multiple of 2889 * in multiples of the page size. For a file that is not a multiple of
2898 * the page size, the remaining memory is zeroed when mapped, and 2890 * the page size, the remaining memory is zeroed when mapped, and
2899 * writes to that region are not written out to the file." 2891 * writes to that region are not written out to the file."
@@ -3265,7 +3257,7 @@ static void recalc_bh_state(void)
3265 3257
3266struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) 3258struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3267{ 3259{
3268 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags); 3260 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
3269 if (ret) { 3261 if (ret) {
3270 INIT_LIST_HEAD(&ret->b_assoc_buffers); 3262 INIT_LIST_HEAD(&ret->b_assoc_buffers);
3271 get_cpu_var(bh_accounting).nr++; 3263 get_cpu_var(bh_accounting).nr++;
@@ -3352,15 +3344,6 @@ int bh_submit_read(struct buffer_head *bh)
3352} 3344}
3353EXPORT_SYMBOL(bh_submit_read); 3345EXPORT_SYMBOL(bh_submit_read);
3354 3346
3355static void
3356init_buffer_head(void *data)
3357{
3358 struct buffer_head *bh = data;
3359
3360 memset(bh, 0, sizeof(*bh));
3361 INIT_LIST_HEAD(&bh->b_assoc_buffers);
3362}
3363
3364void __init buffer_init(void) 3347void __init buffer_init(void)
3365{ 3348{
3366 int nrpages; 3349 int nrpages;
@@ -3369,7 +3352,7 @@ void __init buffer_init(void)
3369 sizeof(struct buffer_head), 0, 3352 sizeof(struct buffer_head), 0,
3370 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| 3353 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3371 SLAB_MEM_SPREAD), 3354 SLAB_MEM_SPREAD),
3372 init_buffer_head); 3355 NULL);
3373 3356
3374 /* 3357 /*
3375 * Limit the bh occupancy to 10% of ZONE_NORMAL 3358 * Limit the bh occupancy to 10% of ZONE_NORMAL