diff options
Diffstat (limited to 'fs/buffer.c')
-rw-r--r-- | fs/buffer.c | 45 |
1 files changed, 8 insertions, 37 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index 2219a76e2caf..42534f67d71b 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -54,23 +54,15 @@ init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private) | |||
54 | } | 54 | } |
55 | EXPORT_SYMBOL(init_buffer); | 55 | EXPORT_SYMBOL(init_buffer); |
56 | 56 | ||
57 | static int sync_buffer(void *word) | 57 | static int sleep_on_buffer(void *word) |
58 | { | 58 | { |
59 | struct block_device *bd; | ||
60 | struct buffer_head *bh | ||
61 | = container_of(word, struct buffer_head, b_state); | ||
62 | |||
63 | smp_mb(); | ||
64 | bd = bh->b_bdev; | ||
65 | if (bd) | ||
66 | blk_run_address_space(bd->bd_inode->i_mapping); | ||
67 | io_schedule(); | 59 | io_schedule(); |
68 | return 0; | 60 | return 0; |
69 | } | 61 | } |
70 | 62 | ||
71 | void __lock_buffer(struct buffer_head *bh) | 63 | void __lock_buffer(struct buffer_head *bh) |
72 | { | 64 | { |
73 | wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer, | 65 | wait_on_bit_lock(&bh->b_state, BH_Lock, sleep_on_buffer, |
74 | TASK_UNINTERRUPTIBLE); | 66 | TASK_UNINTERRUPTIBLE); |
75 | } | 67 | } |
76 | EXPORT_SYMBOL(__lock_buffer); | 68 | EXPORT_SYMBOL(__lock_buffer); |
@@ -90,7 +82,7 @@ EXPORT_SYMBOL(unlock_buffer); | |||
90 | */ | 82 | */ |
91 | void __wait_on_buffer(struct buffer_head * bh) | 83 | void __wait_on_buffer(struct buffer_head * bh) |
92 | { | 84 | { |
93 | wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE); | 85 | wait_on_bit(&bh->b_state, BH_Lock, sleep_on_buffer, TASK_UNINTERRUPTIBLE); |
94 | } | 86 | } |
95 | EXPORT_SYMBOL(__wait_on_buffer); | 87 | EXPORT_SYMBOL(__wait_on_buffer); |
96 | 88 | ||
@@ -749,7 +741,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) | |||
749 | { | 741 | { |
750 | struct buffer_head *bh; | 742 | struct buffer_head *bh; |
751 | struct list_head tmp; | 743 | struct list_head tmp; |
752 | struct address_space *mapping, *prev_mapping = NULL; | 744 | struct address_space *mapping; |
753 | int err = 0, err2; | 745 | int err = 0, err2; |
754 | 746 | ||
755 | INIT_LIST_HEAD(&tmp); | 747 | INIT_LIST_HEAD(&tmp); |
@@ -775,7 +767,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) | |||
775 | * still in flight on potentially older | 767 | * still in flight on potentially older |
776 | * contents. | 768 | * contents. |
777 | */ | 769 | */ |
778 | write_dirty_buffer(bh, WRITE_SYNC_PLUG); | 770 | write_dirty_buffer(bh, WRITE_SYNC); |
779 | 771 | ||
780 | /* | 772 | /* |
781 | * Kick off IO for the previous mapping. Note | 773 | * Kick off IO for the previous mapping. Note |
@@ -783,10 +775,6 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) | |||
783 | * wait_on_buffer() will do that for us | 775 | * wait_on_buffer() will do that for us |
784 | * through sync_buffer(). | 776 | * through sync_buffer(). |
785 | */ | 777 | */ |
786 | if (prev_mapping && prev_mapping != mapping) | ||
787 | blk_run_address_space(prev_mapping); | ||
788 | prev_mapping = mapping; | ||
789 | |||
790 | brelse(bh); | 778 | brelse(bh); |
791 | spin_lock(lock); | 779 | spin_lock(lock); |
792 | } | 780 | } |
@@ -1614,14 +1602,8 @@ EXPORT_SYMBOL(unmap_underlying_metadata); | |||
1614 | * prevents this contention from occurring. | 1602 | * prevents this contention from occurring. |
1615 | * | 1603 | * |
1616 | * If block_write_full_page() is called with wbc->sync_mode == | 1604 | * If block_write_full_page() is called with wbc->sync_mode == |
1617 | * WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this | 1605 | * WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this |
1618 | * causes the writes to be flagged as synchronous writes, but the | 1606 | * causes the writes to be flagged as synchronous writes. |
1619 | * block device queue will NOT be unplugged, since usually many pages | ||
1620 | * will be pushed to the out before the higher-level caller actually | ||
1621 | * waits for the writes to be completed. The various wait functions, | ||
1622 | * such as wait_on_writeback_range() will ultimately call sync_page() | ||
1623 | * which will ultimately call blk_run_backing_dev(), which will end up | ||
1624 | * unplugging the device queue. | ||
1625 | */ | 1607 | */ |
1626 | static int __block_write_full_page(struct inode *inode, struct page *page, | 1608 | static int __block_write_full_page(struct inode *inode, struct page *page, |
1627 | get_block_t *get_block, struct writeback_control *wbc, | 1609 | get_block_t *get_block, struct writeback_control *wbc, |
@@ -1634,7 +1616,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page, | |||
1634 | const unsigned blocksize = 1 << inode->i_blkbits; | 1616 | const unsigned blocksize = 1 << inode->i_blkbits; |
1635 | int nr_underway = 0; | 1617 | int nr_underway = 0; |
1636 | int write_op = (wbc->sync_mode == WB_SYNC_ALL ? | 1618 | int write_op = (wbc->sync_mode == WB_SYNC_ALL ? |
1637 | WRITE_SYNC_PLUG : WRITE); | 1619 | WRITE_SYNC : WRITE); |
1638 | 1620 | ||
1639 | BUG_ON(!PageLocked(page)); | 1621 | BUG_ON(!PageLocked(page)); |
1640 | 1622 | ||
@@ -3138,17 +3120,6 @@ out: | |||
3138 | } | 3120 | } |
3139 | EXPORT_SYMBOL(try_to_free_buffers); | 3121 | EXPORT_SYMBOL(try_to_free_buffers); |
3140 | 3122 | ||
3141 | void block_sync_page(struct page *page) | ||
3142 | { | ||
3143 | struct address_space *mapping; | ||
3144 | |||
3145 | smp_mb(); | ||
3146 | mapping = page_mapping(page); | ||
3147 | if (mapping) | ||
3148 | blk_run_backing_dev(mapping->backing_dev_info, page); | ||
3149 | } | ||
3150 | EXPORT_SYMBOL(block_sync_page); | ||
3151 | |||
3152 | /* | 3123 | /* |
3153 | * There are no bdflush tunables left. But distributions are | 3124 | * There are no bdflush tunables left. But distributions are |
3154 | * still running obsolete flush daemons, so we terminate them here. | 3125 | * still running obsolete flush daemons, so we terminate them here. |