diff options
Diffstat (limited to 'fs/buffer.c')
-rw-r--r-- | fs/buffer.c | 53 |
1 files changed, 15 insertions, 38 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index 2219a76e2caf..a08bb8e61c6f 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -54,23 +54,15 @@ init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private) | |||
54 | } | 54 | } |
55 | EXPORT_SYMBOL(init_buffer); | 55 | EXPORT_SYMBOL(init_buffer); |
56 | 56 | ||
57 | static int sync_buffer(void *word) | 57 | static int sleep_on_buffer(void *word) |
58 | { | 58 | { |
59 | struct block_device *bd; | ||
60 | struct buffer_head *bh | ||
61 | = container_of(word, struct buffer_head, b_state); | ||
62 | |||
63 | smp_mb(); | ||
64 | bd = bh->b_bdev; | ||
65 | if (bd) | ||
66 | blk_run_address_space(bd->bd_inode->i_mapping); | ||
67 | io_schedule(); | 59 | io_schedule(); |
68 | return 0; | 60 | return 0; |
69 | } | 61 | } |
70 | 62 | ||
71 | void __lock_buffer(struct buffer_head *bh) | 63 | void __lock_buffer(struct buffer_head *bh) |
72 | { | 64 | { |
73 | wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer, | 65 | wait_on_bit_lock(&bh->b_state, BH_Lock, sleep_on_buffer, |
74 | TASK_UNINTERRUPTIBLE); | 66 | TASK_UNINTERRUPTIBLE); |
75 | } | 67 | } |
76 | EXPORT_SYMBOL(__lock_buffer); | 68 | EXPORT_SYMBOL(__lock_buffer); |
@@ -90,7 +82,7 @@ EXPORT_SYMBOL(unlock_buffer); | |||
90 | */ | 82 | */ |
91 | void __wait_on_buffer(struct buffer_head * bh) | 83 | void __wait_on_buffer(struct buffer_head * bh) |
92 | { | 84 | { |
93 | wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE); | 85 | wait_on_bit(&bh->b_state, BH_Lock, sleep_on_buffer, TASK_UNINTERRUPTIBLE); |
94 | } | 86 | } |
95 | EXPORT_SYMBOL(__wait_on_buffer); | 87 | EXPORT_SYMBOL(__wait_on_buffer); |
96 | 88 | ||
@@ -749,10 +741,12 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) | |||
749 | { | 741 | { |
750 | struct buffer_head *bh; | 742 | struct buffer_head *bh; |
751 | struct list_head tmp; | 743 | struct list_head tmp; |
752 | struct address_space *mapping, *prev_mapping = NULL; | 744 | struct address_space *mapping; |
753 | int err = 0, err2; | 745 | int err = 0, err2; |
746 | struct blk_plug plug; | ||
754 | 747 | ||
755 | INIT_LIST_HEAD(&tmp); | 748 | INIT_LIST_HEAD(&tmp); |
749 | blk_start_plug(&plug); | ||
756 | 750 | ||
757 | spin_lock(lock); | 751 | spin_lock(lock); |
758 | while (!list_empty(list)) { | 752 | while (!list_empty(list)) { |
@@ -775,7 +769,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) | |||
775 | * still in flight on potentially older | 769 | * still in flight on potentially older |
776 | * contents. | 770 | * contents. |
777 | */ | 771 | */ |
778 | write_dirty_buffer(bh, WRITE_SYNC_PLUG); | 772 | write_dirty_buffer(bh, WRITE_SYNC); |
779 | 773 | ||
780 | /* | 774 | /* |
781 | * Kick off IO for the previous mapping. Note | 775 | * Kick off IO for the previous mapping. Note |
@@ -783,16 +777,16 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) | |||
783 | * wait_on_buffer() will do that for us | 777 | * wait_on_buffer() will do that for us |
784 | * through sync_buffer(). | 778 | * through sync_buffer(). |
785 | */ | 779 | */ |
786 | if (prev_mapping && prev_mapping != mapping) | ||
787 | blk_run_address_space(prev_mapping); | ||
788 | prev_mapping = mapping; | ||
789 | |||
790 | brelse(bh); | 780 | brelse(bh); |
791 | spin_lock(lock); | 781 | spin_lock(lock); |
792 | } | 782 | } |
793 | } | 783 | } |
794 | } | 784 | } |
795 | 785 | ||
786 | spin_unlock(lock); | ||
787 | blk_finish_plug(&plug); | ||
788 | spin_lock(lock); | ||
789 | |||
796 | while (!list_empty(&tmp)) { | 790 | while (!list_empty(&tmp)) { |
797 | bh = BH_ENTRY(tmp.prev); | 791 | bh = BH_ENTRY(tmp.prev); |
798 | get_bh(bh); | 792 | get_bh(bh); |
@@ -1144,7 +1138,7 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size) | |||
1144 | * inode list. | 1138 | * inode list. |
1145 | * | 1139 | * |
1146 | * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock, | 1140 | * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock, |
1147 | * mapping->tree_lock and the global inode_lock. | 1141 | * mapping->tree_lock and mapping->host->i_lock. |
1148 | */ | 1142 | */ |
1149 | void mark_buffer_dirty(struct buffer_head *bh) | 1143 | void mark_buffer_dirty(struct buffer_head *bh) |
1150 | { | 1144 | { |
@@ -1614,14 +1608,8 @@ EXPORT_SYMBOL(unmap_underlying_metadata); | |||
1614 | * prevents this contention from occurring. | 1608 | * prevents this contention from occurring. |
1615 | * | 1609 | * |
1616 | * If block_write_full_page() is called with wbc->sync_mode == | 1610 | * If block_write_full_page() is called with wbc->sync_mode == |
1617 | * WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this | 1611 | * WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this |
1618 | * causes the writes to be flagged as synchronous writes, but the | 1612 | * causes the writes to be flagged as synchronous writes. |
1619 | * block device queue will NOT be unplugged, since usually many pages | ||
1620 | * will be pushed to the out before the higher-level caller actually | ||
1621 | * waits for the writes to be completed. The various wait functions, | ||
1622 | * such as wait_on_writeback_range() will ultimately call sync_page() | ||
1623 | * which will ultimately call blk_run_backing_dev(), which will end up | ||
1624 | * unplugging the device queue. | ||
1625 | */ | 1613 | */ |
1626 | static int __block_write_full_page(struct inode *inode, struct page *page, | 1614 | static int __block_write_full_page(struct inode *inode, struct page *page, |
1627 | get_block_t *get_block, struct writeback_control *wbc, | 1615 | get_block_t *get_block, struct writeback_control *wbc, |
@@ -1634,7 +1622,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page, | |||
1634 | const unsigned blocksize = 1 << inode->i_blkbits; | 1622 | const unsigned blocksize = 1 << inode->i_blkbits; |
1635 | int nr_underway = 0; | 1623 | int nr_underway = 0; |
1636 | int write_op = (wbc->sync_mode == WB_SYNC_ALL ? | 1624 | int write_op = (wbc->sync_mode == WB_SYNC_ALL ? |
1637 | WRITE_SYNC_PLUG : WRITE); | 1625 | WRITE_SYNC : WRITE); |
1638 | 1626 | ||
1639 | BUG_ON(!PageLocked(page)); | 1627 | BUG_ON(!PageLocked(page)); |
1640 | 1628 | ||
@@ -3138,17 +3126,6 @@ out: | |||
3138 | } | 3126 | } |
3139 | EXPORT_SYMBOL(try_to_free_buffers); | 3127 | EXPORT_SYMBOL(try_to_free_buffers); |
3140 | 3128 | ||
3141 | void block_sync_page(struct page *page) | ||
3142 | { | ||
3143 | struct address_space *mapping; | ||
3144 | |||
3145 | smp_mb(); | ||
3146 | mapping = page_mapping(page); | ||
3147 | if (mapping) | ||
3148 | blk_run_backing_dev(mapping->backing_dev_info, page); | ||
3149 | } | ||
3150 | EXPORT_SYMBOL(block_sync_page); | ||
3151 | |||
3152 | /* | 3129 | /* |
3153 | * There are no bdflush tunables left. But distributions are | 3130 | * There are no bdflush tunables left. But distributions are |
3154 | * still running obsolete flush daemons, so we terminate them here. | 3131 | * still running obsolete flush daemons, so we terminate them here. |