diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2009-05-20 03:02:28 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2009-05-20 03:02:28 -0400 |
commit | 521c180874dae86f675d23c4eade4dba8b1f2cc8 (patch) | |
tree | 7509303da3a9a1b40a26f6811f321c89cd31737b /fs/buffer.c | |
parent | f1a11e0576c7a73d759d05d776692b2b2d37172b (diff) | |
parent | 64d1304a64477629cb16b75491a77bafe6f86963 (diff) |
Merge branch 'core/urgent' into core/futexes
Merge reason: this branch was on an pre -rc1 base, merge it up to -rc6+
to get the latest upstream fixes.
Conflicts:
kernel/futex.c
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'fs/buffer.c')
-rw-r--r-- | fs/buffer.c | 101 |
1 files changed, 78 insertions, 23 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index 5d55a896ff78..aed297739eb0 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -360,7 +360,7 @@ still_busy: | |||
360 | * Completion handler for block_write_full_page() - pages which are unlocked | 360 | * Completion handler for block_write_full_page() - pages which are unlocked |
361 | * during I/O, and which have PageWriteback cleared upon I/O completion. | 361 | * during I/O, and which have PageWriteback cleared upon I/O completion. |
362 | */ | 362 | */ |
363 | static void end_buffer_async_write(struct buffer_head *bh, int uptodate) | 363 | void end_buffer_async_write(struct buffer_head *bh, int uptodate) |
364 | { | 364 | { |
365 | char b[BDEVNAME_SIZE]; | 365 | char b[BDEVNAME_SIZE]; |
366 | unsigned long flags; | 366 | unsigned long flags; |
@@ -438,11 +438,17 @@ static void mark_buffer_async_read(struct buffer_head *bh) | |||
438 | set_buffer_async_read(bh); | 438 | set_buffer_async_read(bh); |
439 | } | 439 | } |
440 | 440 | ||
441 | void mark_buffer_async_write(struct buffer_head *bh) | 441 | void mark_buffer_async_write_endio(struct buffer_head *bh, |
442 | bh_end_io_t *handler) | ||
442 | { | 443 | { |
443 | bh->b_end_io = end_buffer_async_write; | 444 | bh->b_end_io = handler; |
444 | set_buffer_async_write(bh); | 445 | set_buffer_async_write(bh); |
445 | } | 446 | } |
447 | |||
448 | void mark_buffer_async_write(struct buffer_head *bh) | ||
449 | { | ||
450 | mark_buffer_async_write_endio(bh, end_buffer_async_write); | ||
451 | } | ||
446 | EXPORT_SYMBOL(mark_buffer_async_write); | 452 | EXPORT_SYMBOL(mark_buffer_async_write); |
447 | 453 | ||
448 | 454 | ||
@@ -547,7 +553,7 @@ repeat: | |||
547 | return err; | 553 | return err; |
548 | } | 554 | } |
549 | 555 | ||
550 | void do_thaw_all(unsigned long unused) | 556 | void do_thaw_all(struct work_struct *work) |
551 | { | 557 | { |
552 | struct super_block *sb; | 558 | struct super_block *sb; |
553 | char b[BDEVNAME_SIZE]; | 559 | char b[BDEVNAME_SIZE]; |
@@ -567,6 +573,7 @@ restart: | |||
567 | goto restart; | 573 | goto restart; |
568 | } | 574 | } |
569 | spin_unlock(&sb_lock); | 575 | spin_unlock(&sb_lock); |
576 | kfree(work); | ||
570 | printk(KERN_WARNING "Emergency Thaw complete\n"); | 577 | printk(KERN_WARNING "Emergency Thaw complete\n"); |
571 | } | 578 | } |
572 | 579 | ||
@@ -577,7 +584,13 @@ restart: | |||
577 | */ | 584 | */ |
578 | void emergency_thaw_all(void) | 585 | void emergency_thaw_all(void) |
579 | { | 586 | { |
580 | pdflush_operation(do_thaw_all, 0); | 587 | struct work_struct *work; |
588 | |||
589 | work = kmalloc(sizeof(*work), GFP_ATOMIC); | ||
590 | if (work) { | ||
591 | INIT_WORK(work, do_thaw_all); | ||
592 | schedule_work(work); | ||
593 | } | ||
581 | } | 594 | } |
582 | 595 | ||
583 | /** | 596 | /** |
@@ -737,7 +750,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) | |||
737 | { | 750 | { |
738 | struct buffer_head *bh; | 751 | struct buffer_head *bh; |
739 | struct list_head tmp; | 752 | struct list_head tmp; |
740 | struct address_space *mapping; | 753 | struct address_space *mapping, *prev_mapping = NULL; |
741 | int err = 0, err2; | 754 | int err = 0, err2; |
742 | 755 | ||
743 | INIT_LIST_HEAD(&tmp); | 756 | INIT_LIST_HEAD(&tmp); |
@@ -762,7 +775,18 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) | |||
762 | * contents - it is a noop if I/O is still in | 775 | * contents - it is a noop if I/O is still in |
763 | * flight on potentially older contents. | 776 | * flight on potentially older contents. |
764 | */ | 777 | */ |
765 | ll_rw_block(SWRITE_SYNC, 1, &bh); | 778 | ll_rw_block(SWRITE_SYNC_PLUG, 1, &bh); |
779 | |||
780 | /* | ||
781 | * Kick off IO for the previous mapping. Note | ||
782 | * that we will not run the very last mapping, | ||
783 | * wait_on_buffer() will do that for us | ||
784 | * through sync_buffer(). | ||
785 | */ | ||
786 | if (prev_mapping && prev_mapping != mapping) | ||
787 | blk_run_address_space(prev_mapping); | ||
788 | prev_mapping = mapping; | ||
789 | |||
766 | brelse(bh); | 790 | brelse(bh); |
767 | spin_lock(lock); | 791 | spin_lock(lock); |
768 | } | 792 | } |
@@ -1585,9 +1609,20 @@ EXPORT_SYMBOL(unmap_underlying_metadata); | |||
1585 | * locked buffer. This only can happen if someone has written the buffer | 1609 | * locked buffer. This only can happen if someone has written the buffer |
1586 | * directly, with submit_bh(). At the address_space level PageWriteback | 1610 | * directly, with submit_bh(). At the address_space level PageWriteback |
1587 | * prevents this contention from occurring. | 1611 | * prevents this contention from occurring. |
1612 | * | ||
1613 | * If block_write_full_page() is called with wbc->sync_mode == | ||
1614 | * WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this | ||
1615 | * causes the writes to be flagged as synchronous writes, but the | ||
1616 | * block device queue will NOT be unplugged, since usually many pages | ||
1617 | * will be pushed to the out before the higher-level caller actually | ||
1618 | * waits for the writes to be completed. The various wait functions, | ||
1619 | * such as wait_on_writeback_range() will ultimately call sync_page() | ||
1620 | * which will ultimately call blk_run_backing_dev(), which will end up | ||
1621 | * unplugging the device queue. | ||
1588 | */ | 1622 | */ |
1589 | static int __block_write_full_page(struct inode *inode, struct page *page, | 1623 | static int __block_write_full_page(struct inode *inode, struct page *page, |
1590 | get_block_t *get_block, struct writeback_control *wbc) | 1624 | get_block_t *get_block, struct writeback_control *wbc, |
1625 | bh_end_io_t *handler) | ||
1591 | { | 1626 | { |
1592 | int err; | 1627 | int err; |
1593 | sector_t block; | 1628 | sector_t block; |
@@ -1595,7 +1630,8 @@ static int __block_write_full_page(struct inode *inode, struct page *page, | |||
1595 | struct buffer_head *bh, *head; | 1630 | struct buffer_head *bh, *head; |
1596 | const unsigned blocksize = 1 << inode->i_blkbits; | 1631 | const unsigned blocksize = 1 << inode->i_blkbits; |
1597 | int nr_underway = 0; | 1632 | int nr_underway = 0; |
1598 | int write_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE); | 1633 | int write_op = (wbc->sync_mode == WB_SYNC_ALL ? |
1634 | WRITE_SYNC_PLUG : WRITE); | ||
1599 | 1635 | ||
1600 | BUG_ON(!PageLocked(page)); | 1636 | BUG_ON(!PageLocked(page)); |
1601 | 1637 | ||
@@ -1671,7 +1707,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page, | |||
1671 | continue; | 1707 | continue; |
1672 | } | 1708 | } |
1673 | if (test_clear_buffer_dirty(bh)) { | 1709 | if (test_clear_buffer_dirty(bh)) { |
1674 | mark_buffer_async_write(bh); | 1710 | mark_buffer_async_write_endio(bh, handler); |
1675 | } else { | 1711 | } else { |
1676 | unlock_buffer(bh); | 1712 | unlock_buffer(bh); |
1677 | } | 1713 | } |
@@ -1724,7 +1760,7 @@ recover: | |||
1724 | if (buffer_mapped(bh) && buffer_dirty(bh) && | 1760 | if (buffer_mapped(bh) && buffer_dirty(bh) && |
1725 | !buffer_delay(bh)) { | 1761 | !buffer_delay(bh)) { |
1726 | lock_buffer(bh); | 1762 | lock_buffer(bh); |
1727 | mark_buffer_async_write(bh); | 1763 | mark_buffer_async_write_endio(bh, handler); |
1728 | } else { | 1764 | } else { |
1729 | /* | 1765 | /* |
1730 | * The buffer may have been set dirty during | 1766 | * The buffer may have been set dirty during |
@@ -2361,7 +2397,8 @@ block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, | |||
2361 | if ((page->mapping != inode->i_mapping) || | 2397 | if ((page->mapping != inode->i_mapping) || |
2362 | (page_offset(page) > size)) { | 2398 | (page_offset(page) > size)) { |
2363 | /* page got truncated out from underneath us */ | 2399 | /* page got truncated out from underneath us */ |
2364 | goto out_unlock; | 2400 | unlock_page(page); |
2401 | goto out; | ||
2365 | } | 2402 | } |
2366 | 2403 | ||
2367 | /* page is wholly or partially inside EOF */ | 2404 | /* page is wholly or partially inside EOF */ |
@@ -2375,14 +2412,15 @@ block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, | |||
2375 | ret = block_commit_write(page, 0, end); | 2412 | ret = block_commit_write(page, 0, end); |
2376 | 2413 | ||
2377 | if (unlikely(ret)) { | 2414 | if (unlikely(ret)) { |
2415 | unlock_page(page); | ||
2378 | if (ret == -ENOMEM) | 2416 | if (ret == -ENOMEM) |
2379 | ret = VM_FAULT_OOM; | 2417 | ret = VM_FAULT_OOM; |
2380 | else /* -ENOSPC, -EIO, etc */ | 2418 | else /* -ENOSPC, -EIO, etc */ |
2381 | ret = VM_FAULT_SIGBUS; | 2419 | ret = VM_FAULT_SIGBUS; |
2382 | } | 2420 | } else |
2421 | ret = VM_FAULT_LOCKED; | ||
2383 | 2422 | ||
2384 | out_unlock: | 2423 | out: |
2385 | unlock_page(page); | ||
2386 | return ret; | 2424 | return ret; |
2387 | } | 2425 | } |
2388 | 2426 | ||
@@ -2650,7 +2688,8 @@ int nobh_writepage(struct page *page, get_block_t *get_block, | |||
2650 | out: | 2688 | out: |
2651 | ret = mpage_writepage(page, get_block, wbc); | 2689 | ret = mpage_writepage(page, get_block, wbc); |
2652 | if (ret == -EAGAIN) | 2690 | if (ret == -EAGAIN) |
2653 | ret = __block_write_full_page(inode, page, get_block, wbc); | 2691 | ret = __block_write_full_page(inode, page, get_block, wbc, |
2692 | end_buffer_async_write); | ||
2654 | return ret; | 2693 | return ret; |
2655 | } | 2694 | } |
2656 | EXPORT_SYMBOL(nobh_writepage); | 2695 | EXPORT_SYMBOL(nobh_writepage); |
@@ -2808,9 +2847,10 @@ out: | |||
2808 | 2847 | ||
2809 | /* | 2848 | /* |
2810 | * The generic ->writepage function for buffer-backed address_spaces | 2849 | * The generic ->writepage function for buffer-backed address_spaces |
2850 | * this form passes in the end_io handler used to finish the IO. | ||
2811 | */ | 2851 | */ |
2812 | int block_write_full_page(struct page *page, get_block_t *get_block, | 2852 | int block_write_full_page_endio(struct page *page, get_block_t *get_block, |
2813 | struct writeback_control *wbc) | 2853 | struct writeback_control *wbc, bh_end_io_t *handler) |
2814 | { | 2854 | { |
2815 | struct inode * const inode = page->mapping->host; | 2855 | struct inode * const inode = page->mapping->host; |
2816 | loff_t i_size = i_size_read(inode); | 2856 | loff_t i_size = i_size_read(inode); |
@@ -2819,7 +2859,8 @@ int block_write_full_page(struct page *page, get_block_t *get_block, | |||
2819 | 2859 | ||
2820 | /* Is the page fully inside i_size? */ | 2860 | /* Is the page fully inside i_size? */ |
2821 | if (page->index < end_index) | 2861 | if (page->index < end_index) |
2822 | return __block_write_full_page(inode, page, get_block, wbc); | 2862 | return __block_write_full_page(inode, page, get_block, wbc, |
2863 | handler); | ||
2823 | 2864 | ||
2824 | /* Is the page fully outside i_size? (truncate in progress) */ | 2865 | /* Is the page fully outside i_size? (truncate in progress) */ |
2825 | offset = i_size & (PAGE_CACHE_SIZE-1); | 2866 | offset = i_size & (PAGE_CACHE_SIZE-1); |
@@ -2842,9 +2883,20 @@ int block_write_full_page(struct page *page, get_block_t *get_block, | |||
2842 | * writes to that region are not written out to the file." | 2883 | * writes to that region are not written out to the file." |
2843 | */ | 2884 | */ |
2844 | zero_user_segment(page, offset, PAGE_CACHE_SIZE); | 2885 | zero_user_segment(page, offset, PAGE_CACHE_SIZE); |
2845 | return __block_write_full_page(inode, page, get_block, wbc); | 2886 | return __block_write_full_page(inode, page, get_block, wbc, handler); |
2846 | } | 2887 | } |
2847 | 2888 | ||
2889 | /* | ||
2890 | * The generic ->writepage function for buffer-backed address_spaces | ||
2891 | */ | ||
2892 | int block_write_full_page(struct page *page, get_block_t *get_block, | ||
2893 | struct writeback_control *wbc) | ||
2894 | { | ||
2895 | return block_write_full_page_endio(page, get_block, wbc, | ||
2896 | end_buffer_async_write); | ||
2897 | } | ||
2898 | |||
2899 | |||
2848 | sector_t generic_block_bmap(struct address_space *mapping, sector_t block, | 2900 | sector_t generic_block_bmap(struct address_space *mapping, sector_t block, |
2849 | get_block_t *get_block) | 2901 | get_block_t *get_block) |
2850 | { | 2902 | { |
@@ -2957,12 +3009,13 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) | |||
2957 | for (i = 0; i < nr; i++) { | 3009 | for (i = 0; i < nr; i++) { |
2958 | struct buffer_head *bh = bhs[i]; | 3010 | struct buffer_head *bh = bhs[i]; |
2959 | 3011 | ||
2960 | if (rw == SWRITE || rw == SWRITE_SYNC) | 3012 | if (rw == SWRITE || rw == SWRITE_SYNC || rw == SWRITE_SYNC_PLUG) |
2961 | lock_buffer(bh); | 3013 | lock_buffer(bh); |
2962 | else if (!trylock_buffer(bh)) | 3014 | else if (!trylock_buffer(bh)) |
2963 | continue; | 3015 | continue; |
2964 | 3016 | ||
2965 | if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC) { | 3017 | if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC || |
3018 | rw == SWRITE_SYNC_PLUG) { | ||
2966 | if (test_clear_buffer_dirty(bh)) { | 3019 | if (test_clear_buffer_dirty(bh)) { |
2967 | bh->b_end_io = end_buffer_write_sync; | 3020 | bh->b_end_io = end_buffer_write_sync; |
2968 | get_bh(bh); | 3021 | get_bh(bh); |
@@ -2998,7 +3051,7 @@ int sync_dirty_buffer(struct buffer_head *bh) | |||
2998 | if (test_clear_buffer_dirty(bh)) { | 3051 | if (test_clear_buffer_dirty(bh)) { |
2999 | get_bh(bh); | 3052 | get_bh(bh); |
3000 | bh->b_end_io = end_buffer_write_sync; | 3053 | bh->b_end_io = end_buffer_write_sync; |
3001 | ret = submit_bh(WRITE, bh); | 3054 | ret = submit_bh(WRITE_SYNC, bh); |
3002 | wait_on_buffer(bh); | 3055 | wait_on_buffer(bh); |
3003 | if (buffer_eopnotsupp(bh)) { | 3056 | if (buffer_eopnotsupp(bh)) { |
3004 | clear_buffer_eopnotsupp(bh); | 3057 | clear_buffer_eopnotsupp(bh); |
@@ -3312,9 +3365,11 @@ EXPORT_SYMBOL(block_read_full_page); | |||
3312 | EXPORT_SYMBOL(block_sync_page); | 3365 | EXPORT_SYMBOL(block_sync_page); |
3313 | EXPORT_SYMBOL(block_truncate_page); | 3366 | EXPORT_SYMBOL(block_truncate_page); |
3314 | EXPORT_SYMBOL(block_write_full_page); | 3367 | EXPORT_SYMBOL(block_write_full_page); |
3368 | EXPORT_SYMBOL(block_write_full_page_endio); | ||
3315 | EXPORT_SYMBOL(cont_write_begin); | 3369 | EXPORT_SYMBOL(cont_write_begin); |
3316 | EXPORT_SYMBOL(end_buffer_read_sync); | 3370 | EXPORT_SYMBOL(end_buffer_read_sync); |
3317 | EXPORT_SYMBOL(end_buffer_write_sync); | 3371 | EXPORT_SYMBOL(end_buffer_write_sync); |
3372 | EXPORT_SYMBOL(end_buffer_async_write); | ||
3318 | EXPORT_SYMBOL(file_fsync); | 3373 | EXPORT_SYMBOL(file_fsync); |
3319 | EXPORT_SYMBOL(generic_block_bmap); | 3374 | EXPORT_SYMBOL(generic_block_bmap); |
3320 | EXPORT_SYMBOL(generic_cont_expand_simple); | 3375 | EXPORT_SYMBOL(generic_cont_expand_simple); |