diff options
author | Jens Axboe <jaxboe@fusionio.com> | 2011-03-10 02:52:07 -0500 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2011-03-10 02:52:07 -0500 |
commit | 7eaceaccab5f40bbfda044629a6298616aeaed50 (patch) | |
tree | 33954d12f63e25a47eb6d86ef3d3d0a5e62bf752 /fs/xfs | |
parent | 73c101011926c5832e6e141682180c4debe2cf45 (diff) |
block: remove per-queue plugging
Code has been converted over to the new explicit on-stack plugging,
and delay users have been converted to use the new API for that.
So lets kill off the old plugging along with aops->sync_page().
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r-- | fs/xfs/linux-2.6/xfs_aops.c | 1 | ||||
-rw-r--r-- | fs/xfs/linux-2.6/xfs_buf.c | 13 |
2 files changed, 5 insertions, 9 deletions
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index ec7bbb5645b6..83c1c20d145a 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c | |||
@@ -1495,7 +1495,6 @@ const struct address_space_operations xfs_address_space_operations = { | |||
1495 | .readpages = xfs_vm_readpages, | 1495 | .readpages = xfs_vm_readpages, |
1496 | .writepage = xfs_vm_writepage, | 1496 | .writepage = xfs_vm_writepage, |
1497 | .writepages = xfs_vm_writepages, | 1497 | .writepages = xfs_vm_writepages, |
1498 | .sync_page = block_sync_page, | ||
1499 | .releasepage = xfs_vm_releasepage, | 1498 | .releasepage = xfs_vm_releasepage, |
1500 | .invalidatepage = xfs_vm_invalidatepage, | 1499 | .invalidatepage = xfs_vm_invalidatepage, |
1501 | .write_begin = xfs_vm_write_begin, | 1500 | .write_begin = xfs_vm_write_begin, |
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index ac1c7e8378dd..4f8f53c4d42c 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c | |||
@@ -991,7 +991,7 @@ xfs_buf_lock( | |||
991 | if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) | 991 | if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) |
992 | xfs_log_force(bp->b_target->bt_mount, 0); | 992 | xfs_log_force(bp->b_target->bt_mount, 0); |
993 | if (atomic_read(&bp->b_io_remaining)) | 993 | if (atomic_read(&bp->b_io_remaining)) |
994 | blk_run_address_space(bp->b_target->bt_mapping); | 994 | blk_flush_plug(current); |
995 | down(&bp->b_sema); | 995 | down(&bp->b_sema); |
996 | XB_SET_OWNER(bp); | 996 | XB_SET_OWNER(bp); |
997 | 997 | ||
@@ -1035,9 +1035,7 @@ xfs_buf_wait_unpin( | |||
1035 | set_current_state(TASK_UNINTERRUPTIBLE); | 1035 | set_current_state(TASK_UNINTERRUPTIBLE); |
1036 | if (atomic_read(&bp->b_pin_count) == 0) | 1036 | if (atomic_read(&bp->b_pin_count) == 0) |
1037 | break; | 1037 | break; |
1038 | if (atomic_read(&bp->b_io_remaining)) | 1038 | io_schedule(); |
1039 | blk_run_address_space(bp->b_target->bt_mapping); | ||
1040 | schedule(); | ||
1041 | } | 1039 | } |
1042 | remove_wait_queue(&bp->b_waiters, &wait); | 1040 | remove_wait_queue(&bp->b_waiters, &wait); |
1043 | set_current_state(TASK_RUNNING); | 1041 | set_current_state(TASK_RUNNING); |
@@ -1443,7 +1441,7 @@ xfs_buf_iowait( | |||
1443 | trace_xfs_buf_iowait(bp, _RET_IP_); | 1441 | trace_xfs_buf_iowait(bp, _RET_IP_); |
1444 | 1442 | ||
1445 | if (atomic_read(&bp->b_io_remaining)) | 1443 | if (atomic_read(&bp->b_io_remaining)) |
1446 | blk_run_address_space(bp->b_target->bt_mapping); | 1444 | blk_flush_plug(current); |
1447 | wait_for_completion(&bp->b_iowait); | 1445 | wait_for_completion(&bp->b_iowait); |
1448 | 1446 | ||
1449 | trace_xfs_buf_iowait_done(bp, _RET_IP_); | 1447 | trace_xfs_buf_iowait_done(bp, _RET_IP_); |
@@ -1667,7 +1665,6 @@ xfs_mapping_buftarg( | |||
1667 | struct inode *inode; | 1665 | struct inode *inode; |
1668 | struct address_space *mapping; | 1666 | struct address_space *mapping; |
1669 | static const struct address_space_operations mapping_aops = { | 1667 | static const struct address_space_operations mapping_aops = { |
1670 | .sync_page = block_sync_page, | ||
1671 | .migratepage = fail_migrate_page, | 1668 | .migratepage = fail_migrate_page, |
1672 | }; | 1669 | }; |
1673 | 1670 | ||
@@ -1948,7 +1945,7 @@ xfsbufd( | |||
1948 | count++; | 1945 | count++; |
1949 | } | 1946 | } |
1950 | if (count) | 1947 | if (count) |
1951 | blk_run_address_space(target->bt_mapping); | 1948 | blk_flush_plug(current); |
1952 | 1949 | ||
1953 | } while (!kthread_should_stop()); | 1950 | } while (!kthread_should_stop()); |
1954 | 1951 | ||
@@ -1996,7 +1993,7 @@ xfs_flush_buftarg( | |||
1996 | 1993 | ||
1997 | if (wait) { | 1994 | if (wait) { |
1998 | /* Expedite and wait for IO to complete. */ | 1995 | /* Expedite and wait for IO to complete. */ |
1999 | blk_run_address_space(target->bt_mapping); | 1996 | blk_flush_plug(current); |
2000 | while (!list_empty(&wait_list)) { | 1997 | while (!list_empty(&wait_list)) { |
2001 | bp = list_first_entry(&wait_list, struct xfs_buf, b_list); | 1998 | bp = list_first_entry(&wait_list, struct xfs_buf, b_list); |
2002 | 1999 | ||