diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-05-11 08:44:27 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-05-11 08:44:31 -0400 |
commit | 41fb454ebe6024f5c1e3b3cbc0abc0da762e7b51 (patch) | |
tree | 51c50bcb67a5039448ddfa1869d7948cab1217e9 /fs/buffer.c | |
parent | 19c1a6f5764d787113fa323ffb18be7991208f82 (diff) | |
parent | 091bf7624d1c90cec9e578a18529f615213ff847 (diff) |
Merge commit 'v2.6.30-rc5' into core/iommu
Merge reason: core/iommu was on an .30-rc1 base,
update it to .30-rc5 to refresh.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'fs/buffer.c')
-rw-r--r-- | fs/buffer.c | 79 |
1 files changed, 61 insertions, 18 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index 6e35762b6169..aed297739eb0 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -360,7 +360,7 @@ still_busy: | |||
360 | * Completion handler for block_write_full_page() - pages which are unlocked | 360 | * Completion handler for block_write_full_page() - pages which are unlocked |
361 | * during I/O, and which have PageWriteback cleared upon I/O completion. | 361 | * during I/O, and which have PageWriteback cleared upon I/O completion. |
362 | */ | 362 | */ |
363 | static void end_buffer_async_write(struct buffer_head *bh, int uptodate) | 363 | void end_buffer_async_write(struct buffer_head *bh, int uptodate) |
364 | { | 364 | { |
365 | char b[BDEVNAME_SIZE]; | 365 | char b[BDEVNAME_SIZE]; |
366 | unsigned long flags; | 366 | unsigned long flags; |
@@ -438,11 +438,17 @@ static void mark_buffer_async_read(struct buffer_head *bh) | |||
438 | set_buffer_async_read(bh); | 438 | set_buffer_async_read(bh); |
439 | } | 439 | } |
440 | 440 | ||
441 | void mark_buffer_async_write(struct buffer_head *bh) | 441 | void mark_buffer_async_write_endio(struct buffer_head *bh, |
442 | bh_end_io_t *handler) | ||
442 | { | 443 | { |
443 | bh->b_end_io = end_buffer_async_write; | 444 | bh->b_end_io = handler; |
444 | set_buffer_async_write(bh); | 445 | set_buffer_async_write(bh); |
445 | } | 446 | } |
447 | |||
448 | void mark_buffer_async_write(struct buffer_head *bh) | ||
449 | { | ||
450 | mark_buffer_async_write_endio(bh, end_buffer_async_write); | ||
451 | } | ||
446 | EXPORT_SYMBOL(mark_buffer_async_write); | 452 | EXPORT_SYMBOL(mark_buffer_async_write); |
447 | 453 | ||
448 | 454 | ||
@@ -547,7 +553,7 @@ repeat: | |||
547 | return err; | 553 | return err; |
548 | } | 554 | } |
549 | 555 | ||
550 | void do_thaw_all(unsigned long unused) | 556 | void do_thaw_all(struct work_struct *work) |
551 | { | 557 | { |
552 | struct super_block *sb; | 558 | struct super_block *sb; |
553 | char b[BDEVNAME_SIZE]; | 559 | char b[BDEVNAME_SIZE]; |
@@ -567,6 +573,7 @@ restart: | |||
567 | goto restart; | 573 | goto restart; |
568 | } | 574 | } |
569 | spin_unlock(&sb_lock); | 575 | spin_unlock(&sb_lock); |
576 | kfree(work); | ||
570 | printk(KERN_WARNING "Emergency Thaw complete\n"); | 577 | printk(KERN_WARNING "Emergency Thaw complete\n"); |
571 | } | 578 | } |
572 | 579 | ||
@@ -577,7 +584,13 @@ restart: | |||
577 | */ | 584 | */ |
578 | void emergency_thaw_all(void) | 585 | void emergency_thaw_all(void) |
579 | { | 586 | { |
580 | pdflush_operation(do_thaw_all, 0); | 587 | struct work_struct *work; |
588 | |||
589 | work = kmalloc(sizeof(*work), GFP_ATOMIC); | ||
590 | if (work) { | ||
591 | INIT_WORK(work, do_thaw_all); | ||
592 | schedule_work(work); | ||
593 | } | ||
581 | } | 594 | } |
582 | 595 | ||
583 | /** | 596 | /** |
@@ -1596,9 +1609,20 @@ EXPORT_SYMBOL(unmap_underlying_metadata); | |||
1596 | * locked buffer. This only can happen if someone has written the buffer | 1609 | * locked buffer. This only can happen if someone has written the buffer |
1597 | * directly, with submit_bh(). At the address_space level PageWriteback | 1610 | * directly, with submit_bh(). At the address_space level PageWriteback |
1598 | * prevents this contention from occurring. | 1611 | * prevents this contention from occurring. |
1612 | * | ||
1613 | * If block_write_full_page() is called with wbc->sync_mode == | ||
1614 | * WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this | ||
1615 | * causes the writes to be flagged as synchronous writes, but the | ||
1616 | * block device queue will NOT be unplugged, since usually many pages | ||
1617 | * will be pushed to the out before the higher-level caller actually | ||
1618 | * waits for the writes to be completed. The various wait functions, | ||
1619 | * such as wait_on_writeback_range() will ultimately call sync_page() | ||
1620 | * which will ultimately call blk_run_backing_dev(), which will end up | ||
1621 | * unplugging the device queue. | ||
1599 | */ | 1622 | */ |
1600 | static int __block_write_full_page(struct inode *inode, struct page *page, | 1623 | static int __block_write_full_page(struct inode *inode, struct page *page, |
1601 | get_block_t *get_block, struct writeback_control *wbc) | 1624 | get_block_t *get_block, struct writeback_control *wbc, |
1625 | bh_end_io_t *handler) | ||
1602 | { | 1626 | { |
1603 | int err; | 1627 | int err; |
1604 | sector_t block; | 1628 | sector_t block; |
@@ -1606,7 +1630,8 @@ static int __block_write_full_page(struct inode *inode, struct page *page, | |||
1606 | struct buffer_head *bh, *head; | 1630 | struct buffer_head *bh, *head; |
1607 | const unsigned blocksize = 1 << inode->i_blkbits; | 1631 | const unsigned blocksize = 1 << inode->i_blkbits; |
1608 | int nr_underway = 0; | 1632 | int nr_underway = 0; |
1609 | int write_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE); | 1633 | int write_op = (wbc->sync_mode == WB_SYNC_ALL ? |
1634 | WRITE_SYNC_PLUG : WRITE); | ||
1610 | 1635 | ||
1611 | BUG_ON(!PageLocked(page)); | 1636 | BUG_ON(!PageLocked(page)); |
1612 | 1637 | ||
@@ -1682,7 +1707,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page, | |||
1682 | continue; | 1707 | continue; |
1683 | } | 1708 | } |
1684 | if (test_clear_buffer_dirty(bh)) { | 1709 | if (test_clear_buffer_dirty(bh)) { |
1685 | mark_buffer_async_write(bh); | 1710 | mark_buffer_async_write_endio(bh, handler); |
1686 | } else { | 1711 | } else { |
1687 | unlock_buffer(bh); | 1712 | unlock_buffer(bh); |
1688 | } | 1713 | } |
@@ -1735,7 +1760,7 @@ recover: | |||
1735 | if (buffer_mapped(bh) && buffer_dirty(bh) && | 1760 | if (buffer_mapped(bh) && buffer_dirty(bh) && |
1736 | !buffer_delay(bh)) { | 1761 | !buffer_delay(bh)) { |
1737 | lock_buffer(bh); | 1762 | lock_buffer(bh); |
1738 | mark_buffer_async_write(bh); | 1763 | mark_buffer_async_write_endio(bh, handler); |
1739 | } else { | 1764 | } else { |
1740 | /* | 1765 | /* |
1741 | * The buffer may have been set dirty during | 1766 | * The buffer may have been set dirty during |
@@ -2372,7 +2397,8 @@ block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, | |||
2372 | if ((page->mapping != inode->i_mapping) || | 2397 | if ((page->mapping != inode->i_mapping) || |
2373 | (page_offset(page) > size)) { | 2398 | (page_offset(page) > size)) { |
2374 | /* page got truncated out from underneath us */ | 2399 | /* page got truncated out from underneath us */ |
2375 | goto out_unlock; | 2400 | unlock_page(page); |
2401 | goto out; | ||
2376 | } | 2402 | } |
2377 | 2403 | ||
2378 | /* page is wholly or partially inside EOF */ | 2404 | /* page is wholly or partially inside EOF */ |
@@ -2386,14 +2412,15 @@ block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, | |||
2386 | ret = block_commit_write(page, 0, end); | 2412 | ret = block_commit_write(page, 0, end); |
2387 | 2413 | ||
2388 | if (unlikely(ret)) { | 2414 | if (unlikely(ret)) { |
2415 | unlock_page(page); | ||
2389 | if (ret == -ENOMEM) | 2416 | if (ret == -ENOMEM) |
2390 | ret = VM_FAULT_OOM; | 2417 | ret = VM_FAULT_OOM; |
2391 | else /* -ENOSPC, -EIO, etc */ | 2418 | else /* -ENOSPC, -EIO, etc */ |
2392 | ret = VM_FAULT_SIGBUS; | 2419 | ret = VM_FAULT_SIGBUS; |
2393 | } | 2420 | } else |
2421 | ret = VM_FAULT_LOCKED; | ||
2394 | 2422 | ||
2395 | out_unlock: | 2423 | out: |
2396 | unlock_page(page); | ||
2397 | return ret; | 2424 | return ret; |
2398 | } | 2425 | } |
2399 | 2426 | ||
@@ -2661,7 +2688,8 @@ int nobh_writepage(struct page *page, get_block_t *get_block, | |||
2661 | out: | 2688 | out: |
2662 | ret = mpage_writepage(page, get_block, wbc); | 2689 | ret = mpage_writepage(page, get_block, wbc); |
2663 | if (ret == -EAGAIN) | 2690 | if (ret == -EAGAIN) |
2664 | ret = __block_write_full_page(inode, page, get_block, wbc); | 2691 | ret = __block_write_full_page(inode, page, get_block, wbc, |
2692 | end_buffer_async_write); | ||
2665 | return ret; | 2693 | return ret; |
2666 | } | 2694 | } |
2667 | EXPORT_SYMBOL(nobh_writepage); | 2695 | EXPORT_SYMBOL(nobh_writepage); |
@@ -2819,9 +2847,10 @@ out: | |||
2819 | 2847 | ||
2820 | /* | 2848 | /* |
2821 | * The generic ->writepage function for buffer-backed address_spaces | 2849 | * The generic ->writepage function for buffer-backed address_spaces |
2850 | * this form passes in the end_io handler used to finish the IO. | ||
2822 | */ | 2851 | */ |
2823 | int block_write_full_page(struct page *page, get_block_t *get_block, | 2852 | int block_write_full_page_endio(struct page *page, get_block_t *get_block, |
2824 | struct writeback_control *wbc) | 2853 | struct writeback_control *wbc, bh_end_io_t *handler) |
2825 | { | 2854 | { |
2826 | struct inode * const inode = page->mapping->host; | 2855 | struct inode * const inode = page->mapping->host; |
2827 | loff_t i_size = i_size_read(inode); | 2856 | loff_t i_size = i_size_read(inode); |
@@ -2830,7 +2859,8 @@ int block_write_full_page(struct page *page, get_block_t *get_block, | |||
2830 | 2859 | ||
2831 | /* Is the page fully inside i_size? */ | 2860 | /* Is the page fully inside i_size? */ |
2832 | if (page->index < end_index) | 2861 | if (page->index < end_index) |
2833 | return __block_write_full_page(inode, page, get_block, wbc); | 2862 | return __block_write_full_page(inode, page, get_block, wbc, |
2863 | handler); | ||
2834 | 2864 | ||
2835 | /* Is the page fully outside i_size? (truncate in progress) */ | 2865 | /* Is the page fully outside i_size? (truncate in progress) */ |
2836 | offset = i_size & (PAGE_CACHE_SIZE-1); | 2866 | offset = i_size & (PAGE_CACHE_SIZE-1); |
@@ -2853,9 +2883,20 @@ int block_write_full_page(struct page *page, get_block_t *get_block, | |||
2853 | * writes to that region are not written out to the file." | 2883 | * writes to that region are not written out to the file." |
2854 | */ | 2884 | */ |
2855 | zero_user_segment(page, offset, PAGE_CACHE_SIZE); | 2885 | zero_user_segment(page, offset, PAGE_CACHE_SIZE); |
2856 | return __block_write_full_page(inode, page, get_block, wbc); | 2886 | return __block_write_full_page(inode, page, get_block, wbc, handler); |
2887 | } | ||
2888 | |||
2889 | /* | ||
2890 | * The generic ->writepage function for buffer-backed address_spaces | ||
2891 | */ | ||
2892 | int block_write_full_page(struct page *page, get_block_t *get_block, | ||
2893 | struct writeback_control *wbc) | ||
2894 | { | ||
2895 | return block_write_full_page_endio(page, get_block, wbc, | ||
2896 | end_buffer_async_write); | ||
2857 | } | 2897 | } |
2858 | 2898 | ||
2899 | |||
2859 | sector_t generic_block_bmap(struct address_space *mapping, sector_t block, | 2900 | sector_t generic_block_bmap(struct address_space *mapping, sector_t block, |
2860 | get_block_t *get_block) | 2901 | get_block_t *get_block) |
2861 | { | 2902 | { |
@@ -3324,9 +3365,11 @@ EXPORT_SYMBOL(block_read_full_page); | |||
3324 | EXPORT_SYMBOL(block_sync_page); | 3365 | EXPORT_SYMBOL(block_sync_page); |
3325 | EXPORT_SYMBOL(block_truncate_page); | 3366 | EXPORT_SYMBOL(block_truncate_page); |
3326 | EXPORT_SYMBOL(block_write_full_page); | 3367 | EXPORT_SYMBOL(block_write_full_page); |
3368 | EXPORT_SYMBOL(block_write_full_page_endio); | ||
3327 | EXPORT_SYMBOL(cont_write_begin); | 3369 | EXPORT_SYMBOL(cont_write_begin); |
3328 | EXPORT_SYMBOL(end_buffer_read_sync); | 3370 | EXPORT_SYMBOL(end_buffer_read_sync); |
3329 | EXPORT_SYMBOL(end_buffer_write_sync); | 3371 | EXPORT_SYMBOL(end_buffer_write_sync); |
3372 | EXPORT_SYMBOL(end_buffer_async_write); | ||
3330 | EXPORT_SYMBOL(file_fsync); | 3373 | EXPORT_SYMBOL(file_fsync); |
3331 | EXPORT_SYMBOL(generic_block_bmap); | 3374 | EXPORT_SYMBOL(generic_block_bmap); |
3332 | EXPORT_SYMBOL(generic_cont_expand_simple); | 3375 | EXPORT_SYMBOL(generic_cont_expand_simple); |