aboutsummaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c66
1 files changed, 49 insertions, 17 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 13edf7ad3ff1..aed297739eb0 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -360,7 +360,7 @@ still_busy:
360 * Completion handler for block_write_full_page() - pages which are unlocked 360 * Completion handler for block_write_full_page() - pages which are unlocked
361 * during I/O, and which have PageWriteback cleared upon I/O completion. 361 * during I/O, and which have PageWriteback cleared upon I/O completion.
362 */ 362 */
363static void end_buffer_async_write(struct buffer_head *bh, int uptodate) 363void end_buffer_async_write(struct buffer_head *bh, int uptodate)
364{ 364{
365 char b[BDEVNAME_SIZE]; 365 char b[BDEVNAME_SIZE];
366 unsigned long flags; 366 unsigned long flags;
@@ -438,11 +438,17 @@ static void mark_buffer_async_read(struct buffer_head *bh)
438 set_buffer_async_read(bh); 438 set_buffer_async_read(bh);
439} 439}
440 440
441void mark_buffer_async_write(struct buffer_head *bh) 441void mark_buffer_async_write_endio(struct buffer_head *bh,
442 bh_end_io_t *handler)
442{ 443{
443 bh->b_end_io = end_buffer_async_write; 444 bh->b_end_io = handler;
444 set_buffer_async_write(bh); 445 set_buffer_async_write(bh);
445} 446}
447
448void mark_buffer_async_write(struct buffer_head *bh)
449{
450 mark_buffer_async_write_endio(bh, end_buffer_async_write);
451}
446EXPORT_SYMBOL(mark_buffer_async_write); 452EXPORT_SYMBOL(mark_buffer_async_write);
447 453
448 454
@@ -547,7 +553,7 @@ repeat:
547 return err; 553 return err;
548} 554}
549 555
550void do_thaw_all(unsigned long unused) 556void do_thaw_all(struct work_struct *work)
551{ 557{
552 struct super_block *sb; 558 struct super_block *sb;
553 char b[BDEVNAME_SIZE]; 559 char b[BDEVNAME_SIZE];
@@ -567,6 +573,7 @@ restart:
567 goto restart; 573 goto restart;
568 } 574 }
569 spin_unlock(&sb_lock); 575 spin_unlock(&sb_lock);
576 kfree(work);
570 printk(KERN_WARNING "Emergency Thaw complete\n"); 577 printk(KERN_WARNING "Emergency Thaw complete\n");
571} 578}
572 579
@@ -577,7 +584,13 @@ restart:
577 */ 584 */
578void emergency_thaw_all(void) 585void emergency_thaw_all(void)
579{ 586{
580 pdflush_operation(do_thaw_all, 0); 587 struct work_struct *work;
588
589 work = kmalloc(sizeof(*work), GFP_ATOMIC);
590 if (work) {
591 INIT_WORK(work, do_thaw_all);
592 schedule_work(work);
593 }
581} 594}
582 595
583/** 596/**
@@ -1608,7 +1621,8 @@ EXPORT_SYMBOL(unmap_underlying_metadata);
1608 * unplugging the device queue. 1621 * unplugging the device queue.
1609 */ 1622 */
1610static int __block_write_full_page(struct inode *inode, struct page *page, 1623static int __block_write_full_page(struct inode *inode, struct page *page,
1611 get_block_t *get_block, struct writeback_control *wbc) 1624 get_block_t *get_block, struct writeback_control *wbc,
1625 bh_end_io_t *handler)
1612{ 1626{
1613 int err; 1627 int err;
1614 sector_t block; 1628 sector_t block;
@@ -1693,7 +1707,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
1693 continue; 1707 continue;
1694 } 1708 }
1695 if (test_clear_buffer_dirty(bh)) { 1709 if (test_clear_buffer_dirty(bh)) {
1696 mark_buffer_async_write(bh); 1710 mark_buffer_async_write_endio(bh, handler);
1697 } else { 1711 } else {
1698 unlock_buffer(bh); 1712 unlock_buffer(bh);
1699 } 1713 }
@@ -1746,7 +1760,7 @@ recover:
1746 if (buffer_mapped(bh) && buffer_dirty(bh) && 1760 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1747 !buffer_delay(bh)) { 1761 !buffer_delay(bh)) {
1748 lock_buffer(bh); 1762 lock_buffer(bh);
1749 mark_buffer_async_write(bh); 1763 mark_buffer_async_write_endio(bh, handler);
1750 } else { 1764 } else {
1751 /* 1765 /*
1752 * The buffer may have been set dirty during 1766 * The buffer may have been set dirty during
@@ -2383,7 +2397,8 @@ block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2383 if ((page->mapping != inode->i_mapping) || 2397 if ((page->mapping != inode->i_mapping) ||
2384 (page_offset(page) > size)) { 2398 (page_offset(page) > size)) {
2385 /* page got truncated out from underneath us */ 2399 /* page got truncated out from underneath us */
2386 goto out_unlock; 2400 unlock_page(page);
2401 goto out;
2387 } 2402 }
2388 2403
2389 /* page is wholly or partially inside EOF */ 2404 /* page is wholly or partially inside EOF */
@@ -2397,14 +2412,15 @@ block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2397 ret = block_commit_write(page, 0, end); 2412 ret = block_commit_write(page, 0, end);
2398 2413
2399 if (unlikely(ret)) { 2414 if (unlikely(ret)) {
2415 unlock_page(page);
2400 if (ret == -ENOMEM) 2416 if (ret == -ENOMEM)
2401 ret = VM_FAULT_OOM; 2417 ret = VM_FAULT_OOM;
2402 else /* -ENOSPC, -EIO, etc */ 2418 else /* -ENOSPC, -EIO, etc */
2403 ret = VM_FAULT_SIGBUS; 2419 ret = VM_FAULT_SIGBUS;
2404 } 2420 } else
2421 ret = VM_FAULT_LOCKED;
2405 2422
2406out_unlock: 2423out:
2407 unlock_page(page);
2408 return ret; 2424 return ret;
2409} 2425}
2410 2426
@@ -2672,7 +2688,8 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
2672out: 2688out:
2673 ret = mpage_writepage(page, get_block, wbc); 2689 ret = mpage_writepage(page, get_block, wbc);
2674 if (ret == -EAGAIN) 2690 if (ret == -EAGAIN)
2675 ret = __block_write_full_page(inode, page, get_block, wbc); 2691 ret = __block_write_full_page(inode, page, get_block, wbc,
2692 end_buffer_async_write);
2676 return ret; 2693 return ret;
2677} 2694}
2678EXPORT_SYMBOL(nobh_writepage); 2695EXPORT_SYMBOL(nobh_writepage);
@@ -2830,9 +2847,10 @@ out:
2830 2847
2831/* 2848/*
2832 * The generic ->writepage function for buffer-backed address_spaces 2849 * The generic ->writepage function for buffer-backed address_spaces
2850 * this form passes in the end_io handler used to finish the IO.
2833 */ 2851 */
2834int block_write_full_page(struct page *page, get_block_t *get_block, 2852int block_write_full_page_endio(struct page *page, get_block_t *get_block,
2835 struct writeback_control *wbc) 2853 struct writeback_control *wbc, bh_end_io_t *handler)
2836{ 2854{
2837 struct inode * const inode = page->mapping->host; 2855 struct inode * const inode = page->mapping->host;
2838 loff_t i_size = i_size_read(inode); 2856 loff_t i_size = i_size_read(inode);
@@ -2841,7 +2859,8 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
2841 2859
2842 /* Is the page fully inside i_size? */ 2860 /* Is the page fully inside i_size? */
2843 if (page->index < end_index) 2861 if (page->index < end_index)
2844 return __block_write_full_page(inode, page, get_block, wbc); 2862 return __block_write_full_page(inode, page, get_block, wbc,
2863 handler);
2845 2864
2846 /* Is the page fully outside i_size? (truncate in progress) */ 2865 /* Is the page fully outside i_size? (truncate in progress) */
2847 offset = i_size & (PAGE_CACHE_SIZE-1); 2866 offset = i_size & (PAGE_CACHE_SIZE-1);
@@ -2864,9 +2883,20 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
2864 * writes to that region are not written out to the file." 2883 * writes to that region are not written out to the file."
2865 */ 2884 */
2866 zero_user_segment(page, offset, PAGE_CACHE_SIZE); 2885 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2867 return __block_write_full_page(inode, page, get_block, wbc); 2886 return __block_write_full_page(inode, page, get_block, wbc, handler);
2887}
2888
2889/*
2890 * The generic ->writepage function for buffer-backed address_spaces
2891 */
2892int block_write_full_page(struct page *page, get_block_t *get_block,
2893 struct writeback_control *wbc)
2894{
2895 return block_write_full_page_endio(page, get_block, wbc,
2896 end_buffer_async_write);
2868} 2897}
2869 2898
2899
2870sector_t generic_block_bmap(struct address_space *mapping, sector_t block, 2900sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2871 get_block_t *get_block) 2901 get_block_t *get_block)
2872{ 2902{
@@ -3335,9 +3365,11 @@ EXPORT_SYMBOL(block_read_full_page);
3335EXPORT_SYMBOL(block_sync_page); 3365EXPORT_SYMBOL(block_sync_page);
3336EXPORT_SYMBOL(block_truncate_page); 3366EXPORT_SYMBOL(block_truncate_page);
3337EXPORT_SYMBOL(block_write_full_page); 3367EXPORT_SYMBOL(block_write_full_page);
3368EXPORT_SYMBOL(block_write_full_page_endio);
3338EXPORT_SYMBOL(cont_write_begin); 3369EXPORT_SYMBOL(cont_write_begin);
3339EXPORT_SYMBOL(end_buffer_read_sync); 3370EXPORT_SYMBOL(end_buffer_read_sync);
3340EXPORT_SYMBOL(end_buffer_write_sync); 3371EXPORT_SYMBOL(end_buffer_write_sync);
3372EXPORT_SYMBOL(end_buffer_async_write);
3341EXPORT_SYMBOL(file_fsync); 3373EXPORT_SYMBOL(file_fsync);
3342EXPORT_SYMBOL(generic_block_bmap); 3374EXPORT_SYMBOL(generic_block_bmap);
3343EXPORT_SYMBOL(generic_cont_expand_simple); 3375EXPORT_SYMBOL(generic_cont_expand_simple);