aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-03-24 13:16:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-03-24 13:16:26 -0400
commit6c5103890057b1bb781b26b7aae38d33e4c517d8 (patch)
treee6e57961dcddcb5841acb34956e70b9dc696a880 /fs
parent3dab04e6978e358ad2307bca563fabd6c5d2c58b (diff)
parent9d2e157d970a73b3f270b631828e03eb452d525e (diff)
Merge branch 'for-2.6.39/core' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.39/core' of git://git.kernel.dk/linux-2.6-block: (65 commits) Documentation/iostats.txt: bit-size reference etc. cfq-iosched: removing unnecessary think time checking cfq-iosched: Don't clear queue stats when preempt. blk-throttle: Reset group slice when limits are changed blk-cgroup: Only give unaccounted_time under debug cfq-iosched: Don't set active queue in preempt block: fix non-atomic access to genhd inflight structures block: attempt to merge with existing requests on plug flush block: NULL dereference on error path in __blkdev_get() cfq-iosched: Don't update group weights when on service tree fs: assign sb->s_bdi to default_backing_dev_info if the bdi is going away block: Require subsystems to explicitly allocate bio_set integrity mempool jbd2: finish conversion from WRITE_SYNC_PLUG to WRITE_SYNC and explicit plugging jbd: finish conversion from WRITE_SYNC_PLUG to WRITE_SYNC and explicit plugging fs: make fsync_buffers_list() plug mm: make generic_writepages() use plugging blk-cgroup: Add unaccounted time to timeslice_used. block: fixup plugging stubs for !CONFIG_BLOCK block: remove obsolete comments for blkdev_issue_zeroout. blktrace: Use rq->cmd_flags directly in blk_add_trace_rq. ... Fix up conflicts in fs/{aio.c,super.c}
Diffstat (limited to 'fs')
-rw-r--r--fs/adfs/inode.c1
-rw-r--r--fs/affs/file.c2
-rw-r--r--fs/aio.c77
-rw-r--r--fs/befs/linuxvfs.c1
-rw-r--r--fs/bfs/file.c1
-rw-r--r--fs/bio-integrity.c3
-rw-r--r--fs/bio.c10
-rw-r--r--fs/block_dev.c27
-rw-r--r--fs/btrfs/disk-io.c79
-rw-r--r--fs/btrfs/extent_io.c2
-rw-r--r--fs/btrfs/inode.c1
-rw-r--r--fs/btrfs/volumes.c91
-rw-r--r--fs/buffer.c51
-rw-r--r--fs/cifs/file.c30
-rw-r--r--fs/direct-io.c7
-rw-r--r--fs/efs/inode.c1
-rw-r--r--fs/exofs/inode.c1
-rw-r--r--fs/ext2/inode.c2
-rw-r--r--fs/ext3/inode.c3
-rw-r--r--fs/ext4/inode.c4
-rw-r--r--fs/ext4/page-io.c3
-rw-r--r--fs/fat/inode.c1
-rw-r--r--fs/freevxfs/vxfs_subr.c1
-rw-r--r--fs/fuse/inode.c1
-rw-r--r--fs/gfs2/aops.c3
-rw-r--r--fs/gfs2/log.c4
-rw-r--r--fs/gfs2/lops.c12
-rw-r--r--fs/gfs2/meta_io.c3
-rw-r--r--fs/hfs/inode.c2
-rw-r--r--fs/hfsplus/inode.c2
-rw-r--r--fs/hpfs/file.c1
-rw-r--r--fs/isofs/inode.c1
-rw-r--r--fs/jbd/commit.c22
-rw-r--r--fs/jbd2/commit.c22
-rw-r--r--fs/jfs/inode.c1
-rw-r--r--fs/jfs/jfs_metapage.c1
-rw-r--r--fs/logfs/dev_bdev.c2
-rw-r--r--fs/minix/inode.c1
-rw-r--r--fs/mpage.c8
-rw-r--r--fs/nilfs2/btnode.c7
-rw-r--r--fs/nilfs2/gcinode.c1
-rw-r--r--fs/nilfs2/inode.c1
-rw-r--r--fs/nilfs2/mdt.c9
-rw-r--r--fs/nilfs2/page.c5
-rw-r--r--fs/nilfs2/page.h3
-rw-r--r--fs/nilfs2/segbuf.c2
-rw-r--r--fs/ntfs/aops.c4
-rw-r--r--fs/ntfs/compress.c3
-rw-r--r--fs/ocfs2/aops.c1
-rw-r--r--fs/ocfs2/cluster/heartbeat.c4
-rw-r--r--fs/omfs/file.c1
-rw-r--r--fs/partitions/check.c3
-rw-r--r--fs/qnx4/inode.c1
-rw-r--r--fs/reiserfs/inode.c1
-rw-r--r--fs/super.c2
-rw-r--r--fs/sync.c4
-rw-r--r--fs/sysv/itree.c1
-rw-r--r--fs/ubifs/super.c1
-rw-r--r--fs/udf/file.c1
-rw-r--r--fs/udf/inode.c1
-rw-r--r--fs/ufs/inode.c1
-rw-r--r--fs/ufs/truncate.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c13
64 files changed, 116 insertions, 445 deletions
diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c
index 92444e94f84..d5250c5aae2 100644
--- a/fs/adfs/inode.c
+++ b/fs/adfs/inode.c
@@ -72,7 +72,6 @@ static sector_t _adfs_bmap(struct address_space *mapping, sector_t block)
72static const struct address_space_operations adfs_aops = { 72static const struct address_space_operations adfs_aops = {
73 .readpage = adfs_readpage, 73 .readpage = adfs_readpage,
74 .writepage = adfs_writepage, 74 .writepage = adfs_writepage,
75 .sync_page = block_sync_page,
76 .write_begin = adfs_write_begin, 75 .write_begin = adfs_write_begin,
77 .write_end = generic_write_end, 76 .write_end = generic_write_end,
78 .bmap = _adfs_bmap 77 .bmap = _adfs_bmap
diff --git a/fs/affs/file.c b/fs/affs/file.c
index 0a90dcd46de..acf321b70fc 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -429,7 +429,6 @@ static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
429const struct address_space_operations affs_aops = { 429const struct address_space_operations affs_aops = {
430 .readpage = affs_readpage, 430 .readpage = affs_readpage,
431 .writepage = affs_writepage, 431 .writepage = affs_writepage,
432 .sync_page = block_sync_page,
433 .write_begin = affs_write_begin, 432 .write_begin = affs_write_begin,
434 .write_end = generic_write_end, 433 .write_end = generic_write_end,
435 .bmap = _affs_bmap 434 .bmap = _affs_bmap
@@ -786,7 +785,6 @@ out:
786const struct address_space_operations affs_aops_ofs = { 785const struct address_space_operations affs_aops_ofs = {
787 .readpage = affs_readpage_ofs, 786 .readpage = affs_readpage_ofs,
788 //.writepage = affs_writepage_ofs, 787 //.writepage = affs_writepage_ofs,
789 //.sync_page = affs_sync_page_ofs,
790 .write_begin = affs_write_begin_ofs, 788 .write_begin = affs_write_begin_ofs,
791 .write_end = affs_write_end_ofs 789 .write_end = affs_write_end_ofs
792}; 790};
diff --git a/fs/aio.c b/fs/aio.c
index ebb6a22e4e1..e29ec485af2 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -34,8 +34,6 @@
34#include <linux/security.h> 34#include <linux/security.h>
35#include <linux/eventfd.h> 35#include <linux/eventfd.h>
36#include <linux/blkdev.h> 36#include <linux/blkdev.h>
37#include <linux/mempool.h>
38#include <linux/hash.h>
39#include <linux/compat.h> 37#include <linux/compat.h>
40 38
41#include <asm/kmap_types.h> 39#include <asm/kmap_types.h>
@@ -65,14 +63,6 @@ static DECLARE_WORK(fput_work, aio_fput_routine);
65static DEFINE_SPINLOCK(fput_lock); 63static DEFINE_SPINLOCK(fput_lock);
66static LIST_HEAD(fput_head); 64static LIST_HEAD(fput_head);
67 65
68#define AIO_BATCH_HASH_BITS 3 /* allocated on-stack, so don't go crazy */
69#define AIO_BATCH_HASH_SIZE (1 << AIO_BATCH_HASH_BITS)
70struct aio_batch_entry {
71 struct hlist_node list;
72 struct address_space *mapping;
73};
74mempool_t *abe_pool;
75
76static void aio_kick_handler(struct work_struct *); 66static void aio_kick_handler(struct work_struct *);
77static void aio_queue_work(struct kioctx *); 67static void aio_queue_work(struct kioctx *);
78 68
@@ -86,8 +76,7 @@ static int __init aio_setup(void)
86 kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); 76 kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
87 77
88 aio_wq = alloc_workqueue("aio", 0, 1); /* used to limit concurrency */ 78 aio_wq = alloc_workqueue("aio", 0, 1); /* used to limit concurrency */
89 abe_pool = mempool_create_kmalloc_pool(1, sizeof(struct aio_batch_entry)); 79 BUG_ON(!aio_wq);
90 BUG_ON(!aio_wq || !abe_pool);
91 80
92 pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page)); 81 pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page));
93 82
@@ -1525,57 +1514,8 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat)
1525 return 0; 1514 return 0;
1526} 1515}
1527 1516
1528static void aio_batch_add(struct address_space *mapping,
1529 struct hlist_head *batch_hash)
1530{
1531 struct aio_batch_entry *abe;
1532 struct hlist_node *pos;
1533 unsigned bucket;
1534
1535 bucket = hash_ptr(mapping, AIO_BATCH_HASH_BITS);
1536 hlist_for_each_entry(abe, pos, &batch_hash[bucket], list) {
1537 if (abe->mapping == mapping)
1538 return;
1539 }
1540
1541 abe = mempool_alloc(abe_pool, GFP_KERNEL);
1542
1543 /*
1544 * we should be using igrab here, but
1545 * we don't want to hammer on the global
1546 * inode spinlock just to take an extra
1547 * reference on a file that we must already
1548 * have a reference to.
1549 *
1550 * When we're called, we always have a reference
1551 * on the file, so we must always have a reference
1552 * on the inode, so ihold() is safe here.
1553 */
1554 ihold(mapping->host);
1555 abe->mapping = mapping;
1556 hlist_add_head(&abe->list, &batch_hash[bucket]);
1557 return;
1558}
1559
1560static void aio_batch_free(struct hlist_head *batch_hash)
1561{
1562 struct aio_batch_entry *abe;
1563 struct hlist_node *pos, *n;
1564 int i;
1565
1566 for (i = 0; i < AIO_BATCH_HASH_SIZE; i++) {
1567 hlist_for_each_entry_safe(abe, pos, n, &batch_hash[i], list) {
1568 blk_run_address_space(abe->mapping);
1569 iput(abe->mapping->host);
1570 hlist_del(&abe->list);
1571 mempool_free(abe, abe_pool);
1572 }
1573 }
1574}
1575
1576static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, 1517static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1577 struct iocb *iocb, struct hlist_head *batch_hash, 1518 struct iocb *iocb, bool compat)
1578 bool compat)
1579{ 1519{
1580 struct kiocb *req; 1520 struct kiocb *req;
1581 struct file *file; 1521 struct file *file;
@@ -1666,11 +1606,6 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1666 ; 1606 ;
1667 } 1607 }
1668 spin_unlock_irq(&ctx->ctx_lock); 1608 spin_unlock_irq(&ctx->ctx_lock);
1669 if (req->ki_opcode == IOCB_CMD_PREAD ||
1670 req->ki_opcode == IOCB_CMD_PREADV ||
1671 req->ki_opcode == IOCB_CMD_PWRITE ||
1672 req->ki_opcode == IOCB_CMD_PWRITEV)
1673 aio_batch_add(file->f_mapping, batch_hash);
1674 1609
1675 aio_put_req(req); /* drop extra ref to req */ 1610 aio_put_req(req); /* drop extra ref to req */
1676 return 0; 1611 return 0;
@@ -1687,7 +1622,7 @@ long do_io_submit(aio_context_t ctx_id, long nr,
1687 struct kioctx *ctx; 1622 struct kioctx *ctx;
1688 long ret = 0; 1623 long ret = 0;
1689 int i; 1624 int i;
1690 struct hlist_head batch_hash[AIO_BATCH_HASH_SIZE] = { { 0, }, }; 1625 struct blk_plug plug;
1691 1626
1692 if (unlikely(nr < 0)) 1627 if (unlikely(nr < 0))
1693 return -EINVAL; 1628 return -EINVAL;
@@ -1704,6 +1639,8 @@ long do_io_submit(aio_context_t ctx_id, long nr,
1704 return -EINVAL; 1639 return -EINVAL;
1705 } 1640 }
1706 1641
1642 blk_start_plug(&plug);
1643
1707 /* 1644 /*
1708 * AKPM: should this return a partial result if some of the IOs were 1645 * AKPM: should this return a partial result if some of the IOs were
1709 * successfully submitted? 1646 * successfully submitted?
@@ -1722,11 +1659,11 @@ long do_io_submit(aio_context_t ctx_id, long nr,
1722 break; 1659 break;
1723 } 1660 }
1724 1661
1725 ret = io_submit_one(ctx, user_iocb, &tmp, batch_hash, compat); 1662 ret = io_submit_one(ctx, user_iocb, &tmp, compat);
1726 if (ret) 1663 if (ret)
1727 break; 1664 break;
1728 } 1665 }
1729 aio_batch_free(batch_hash); 1666 blk_finish_plug(&plug);
1730 1667
1731 put_ioctx(ctx); 1668 put_ioctx(ctx);
1732 return i ? i : ret; 1669 return i ? i : ret;
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index b1d0c794747..06457ed8f3e 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -75,7 +75,6 @@ static const struct inode_operations befs_dir_inode_operations = {
75 75
76static const struct address_space_operations befs_aops = { 76static const struct address_space_operations befs_aops = {
77 .readpage = befs_readpage, 77 .readpage = befs_readpage,
78 .sync_page = block_sync_page,
79 .bmap = befs_bmap, 78 .bmap = befs_bmap,
80}; 79};
81 80
diff --git a/fs/bfs/file.c b/fs/bfs/file.c
index eb67edd0f8e..f20e8a71062 100644
--- a/fs/bfs/file.c
+++ b/fs/bfs/file.c
@@ -186,7 +186,6 @@ static sector_t bfs_bmap(struct address_space *mapping, sector_t block)
186const struct address_space_operations bfs_aops = { 186const struct address_space_operations bfs_aops = {
187 .readpage = bfs_readpage, 187 .readpage = bfs_readpage,
188 .writepage = bfs_writepage, 188 .writepage = bfs_writepage,
189 .sync_page = block_sync_page,
190 .write_begin = bfs_write_begin, 189 .write_begin = bfs_write_begin,
191 .write_end = generic_write_end, 190 .write_end = generic_write_end,
192 .bmap = bfs_bmap, 191 .bmap = bfs_bmap,
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index e49cce234c6..9c5e6b2cd11 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -761,6 +761,9 @@ int bioset_integrity_create(struct bio_set *bs, int pool_size)
761{ 761{
762 unsigned int max_slab = vecs_to_idx(BIO_MAX_PAGES); 762 unsigned int max_slab = vecs_to_idx(BIO_MAX_PAGES);
763 763
764 if (bs->bio_integrity_pool)
765 return 0;
766
764 bs->bio_integrity_pool = 767 bs->bio_integrity_pool =
765 mempool_create_slab_pool(pool_size, bip_slab[max_slab].slab); 768 mempool_create_slab_pool(pool_size, bip_slab[max_slab].slab);
766 769
diff --git a/fs/bio.c b/fs/bio.c
index 4cf2a52fbc5..4d6d4b6c2bf 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -43,7 +43,7 @@ static mempool_t *bio_split_pool __read_mostly;
43 * unsigned short 43 * unsigned short
44 */ 44 */
45#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) } 45#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
46struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = { 46static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
47 BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES), 47 BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
48}; 48};
49#undef BV 49#undef BV
@@ -1636,9 +1636,6 @@ struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
1636 if (!bs->bio_pool) 1636 if (!bs->bio_pool)
1637 goto bad; 1637 goto bad;
1638 1638
1639 if (bioset_integrity_create(bs, pool_size))
1640 goto bad;
1641
1642 if (!biovec_create_pools(bs, pool_size)) 1639 if (!biovec_create_pools(bs, pool_size))
1643 return bs; 1640 return bs;
1644 1641
@@ -1656,12 +1653,10 @@ static void __init biovec_init_slabs(void)
1656 int size; 1653 int size;
1657 struct biovec_slab *bvs = bvec_slabs + i; 1654 struct biovec_slab *bvs = bvec_slabs + i;
1658 1655
1659#ifndef CONFIG_BLK_DEV_INTEGRITY
1660 if (bvs->nr_vecs <= BIO_INLINE_VECS) { 1656 if (bvs->nr_vecs <= BIO_INLINE_VECS) {
1661 bvs->slab = NULL; 1657 bvs->slab = NULL;
1662 continue; 1658 continue;
1663 } 1659 }
1664#endif
1665 1660
1666 size = bvs->nr_vecs * sizeof(struct bio_vec); 1661 size = bvs->nr_vecs * sizeof(struct bio_vec);
1667 bvs->slab = kmem_cache_create(bvs->name, size, 0, 1662 bvs->slab = kmem_cache_create(bvs->name, size, 0,
@@ -1684,6 +1679,9 @@ static int __init init_bio(void)
1684 if (!fs_bio_set) 1679 if (!fs_bio_set)
1685 panic("bio: can't allocate bios\n"); 1680 panic("bio: can't allocate bios\n");
1686 1681
1682 if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE))
1683 panic("bio: can't create integrity pool\n");
1684
1687 bio_split_pool = mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES, 1685 bio_split_pool = mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES,
1688 sizeof(struct bio_pair)); 1686 sizeof(struct bio_pair));
1689 if (!bio_split_pool) 1687 if (!bio_split_pool)
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 88928701959..7d02afb2b7f 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1087,6 +1087,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1087 if (!disk) 1087 if (!disk)
1088 goto out; 1088 goto out;
1089 1089
1090 disk_block_events(disk);
1090 mutex_lock_nested(&bdev->bd_mutex, for_part); 1091 mutex_lock_nested(&bdev->bd_mutex, for_part);
1091 if (!bdev->bd_openers) { 1092 if (!bdev->bd_openers) {
1092 bdev->bd_disk = disk; 1093 bdev->bd_disk = disk;
@@ -1108,10 +1109,11 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1108 */ 1109 */
1109 disk_put_part(bdev->bd_part); 1110 disk_put_part(bdev->bd_part);
1110 bdev->bd_part = NULL; 1111 bdev->bd_part = NULL;
1111 module_put(disk->fops->owner);
1112 put_disk(disk);
1113 bdev->bd_disk = NULL; 1112 bdev->bd_disk = NULL;
1114 mutex_unlock(&bdev->bd_mutex); 1113 mutex_unlock(&bdev->bd_mutex);
1114 disk_unblock_events(disk);
1115 module_put(disk->fops->owner);
1116 put_disk(disk);
1115 goto restart; 1117 goto restart;
1116 } 1118 }
1117 if (ret) 1119 if (ret)
@@ -1148,9 +1150,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1148 bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9); 1150 bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
1149 } 1151 }
1150 } else { 1152 } else {
1151 module_put(disk->fops->owner);
1152 put_disk(disk);
1153 disk = NULL;
1154 if (bdev->bd_contains == bdev) { 1153 if (bdev->bd_contains == bdev) {
1155 if (bdev->bd_disk->fops->open) { 1154 if (bdev->bd_disk->fops->open) {
1156 ret = bdev->bd_disk->fops->open(bdev, mode); 1155 ret = bdev->bd_disk->fops->open(bdev, mode);
@@ -1160,11 +1159,15 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1160 if (bdev->bd_invalidated) 1159 if (bdev->bd_invalidated)
1161 rescan_partitions(bdev->bd_disk, bdev); 1160 rescan_partitions(bdev->bd_disk, bdev);
1162 } 1161 }
1162 /* only one opener holds refs to the module and disk */
1163 module_put(disk->fops->owner);
1164 put_disk(disk);
1163 } 1165 }
1164 bdev->bd_openers++; 1166 bdev->bd_openers++;
1165 if (for_part) 1167 if (for_part)
1166 bdev->bd_part_count++; 1168 bdev->bd_part_count++;
1167 mutex_unlock(&bdev->bd_mutex); 1169 mutex_unlock(&bdev->bd_mutex);
1170 disk_unblock_events(disk);
1168 return 0; 1171 return 0;
1169 1172
1170 out_clear: 1173 out_clear:
@@ -1177,10 +1180,10 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1177 bdev->bd_contains = NULL; 1180 bdev->bd_contains = NULL;
1178 out_unlock_bdev: 1181 out_unlock_bdev:
1179 mutex_unlock(&bdev->bd_mutex); 1182 mutex_unlock(&bdev->bd_mutex);
1180 out: 1183 disk_unblock_events(disk);
1181 if (disk) 1184 module_put(disk->fops->owner);
1182 module_put(disk->fops->owner);
1183 put_disk(disk); 1185 put_disk(disk);
1186 out:
1184 bdput(bdev); 1187 bdput(bdev);
1185 1188
1186 return ret; 1189 return ret;
@@ -1446,14 +1449,13 @@ int blkdev_put(struct block_device *bdev, fmode_t mode)
1446 if (bdev_free) { 1449 if (bdev_free) {
1447 if (bdev->bd_write_holder) { 1450 if (bdev->bd_write_holder) {
1448 disk_unblock_events(bdev->bd_disk); 1451 disk_unblock_events(bdev->bd_disk);
1449 bdev->bd_write_holder = false;
1450 } else
1451 disk_check_events(bdev->bd_disk); 1452 disk_check_events(bdev->bd_disk);
1453 bdev->bd_write_holder = false;
1454 }
1452 } 1455 }
1453 1456
1454 mutex_unlock(&bdev->bd_mutex); 1457 mutex_unlock(&bdev->bd_mutex);
1455 } else 1458 }
1456 disk_check_events(bdev->bd_disk);
1457 1459
1458 return __blkdev_put(bdev, mode, 0); 1460 return __blkdev_put(bdev, mode, 0);
1459} 1461}
@@ -1527,7 +1529,6 @@ static int blkdev_releasepage(struct page *page, gfp_t wait)
1527static const struct address_space_operations def_blk_aops = { 1529static const struct address_space_operations def_blk_aops = {
1528 .readpage = blkdev_readpage, 1530 .readpage = blkdev_readpage,
1529 .writepage = blkdev_writepage, 1531 .writepage = blkdev_writepage,
1530 .sync_page = block_sync_page,
1531 .write_begin = blkdev_write_begin, 1532 .write_begin = blkdev_write_begin,
1532 .write_end = blkdev_write_end, 1533 .write_end = blkdev_write_end,
1533 .writepages = generic_writepages, 1534 .writepages = generic_writepages,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 100b07f021b..830d261d0e6 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -847,7 +847,6 @@ static const struct address_space_operations btree_aops = {
847 .writepages = btree_writepages, 847 .writepages = btree_writepages,
848 .releasepage = btree_releasepage, 848 .releasepage = btree_releasepage,
849 .invalidatepage = btree_invalidatepage, 849 .invalidatepage = btree_invalidatepage,
850 .sync_page = block_sync_page,
851#ifdef CONFIG_MIGRATION 850#ifdef CONFIG_MIGRATION
852 .migratepage = btree_migratepage, 851 .migratepage = btree_migratepage,
853#endif 852#endif
@@ -1331,82 +1330,6 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1331} 1330}
1332 1331
1333/* 1332/*
1334 * this unplugs every device on the box, and it is only used when page
1335 * is null
1336 */
1337static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1338{
1339 struct btrfs_device *device;
1340 struct btrfs_fs_info *info;
1341
1342 info = (struct btrfs_fs_info *)bdi->unplug_io_data;
1343 list_for_each_entry(device, &info->fs_devices->devices, dev_list) {
1344 if (!device->bdev)
1345 continue;
1346
1347 bdi = blk_get_backing_dev_info(device->bdev);
1348 if (bdi->unplug_io_fn)
1349 bdi->unplug_io_fn(bdi, page);
1350 }
1351}
1352
1353static void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1354{
1355 struct inode *inode;
1356 struct extent_map_tree *em_tree;
1357 struct extent_map *em;
1358 struct address_space *mapping;
1359 u64 offset;
1360
1361 /* the generic O_DIRECT read code does this */
1362 if (1 || !page) {
1363 __unplug_io_fn(bdi, page);
1364 return;
1365 }
1366
1367 /*
1368 * page->mapping may change at any time. Get a consistent copy
1369 * and use that for everything below
1370 */
1371 smp_mb();
1372 mapping = page->mapping;
1373 if (!mapping)
1374 return;
1375
1376 inode = mapping->host;
1377
1378 /*
1379 * don't do the expensive searching for a small number of
1380 * devices
1381 */
1382 if (BTRFS_I(inode)->root->fs_info->fs_devices->open_devices <= 2) {
1383 __unplug_io_fn(bdi, page);
1384 return;
1385 }
1386
1387 offset = page_offset(page);
1388
1389 em_tree = &BTRFS_I(inode)->extent_tree;
1390 read_lock(&em_tree->lock);
1391 em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
1392 read_unlock(&em_tree->lock);
1393 if (!em) {
1394 __unplug_io_fn(bdi, page);
1395 return;
1396 }
1397
1398 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
1399 free_extent_map(em);
1400 __unplug_io_fn(bdi, page);
1401 return;
1402 }
1403 offset = offset - em->start;
1404 btrfs_unplug_page(&BTRFS_I(inode)->root->fs_info->mapping_tree,
1405 em->block_start + offset, page);
1406 free_extent_map(em);
1407}
1408
1409/*
1410 * If this fails, caller must call bdi_destroy() to get rid of the 1333 * If this fails, caller must call bdi_destroy() to get rid of the
1411 * bdi again. 1334 * bdi again.
1412 */ 1335 */
@@ -1420,8 +1343,6 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1420 return err; 1343 return err;
1421 1344
1422 bdi->ra_pages = default_backing_dev_info.ra_pages; 1345 bdi->ra_pages = default_backing_dev_info.ra_pages;
1423 bdi->unplug_io_fn = btrfs_unplug_io_fn;
1424 bdi->unplug_io_data = info;
1425 bdi->congested_fn = btrfs_congested_fn; 1346 bdi->congested_fn = btrfs_congested_fn;
1426 bdi->congested_data = info; 1347 bdi->congested_data = info;
1427 return 0; 1348 return 0;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 714adc4ac4c..b5b92824a27 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2188,7 +2188,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2188 unsigned long nr_written = 0; 2188 unsigned long nr_written = 0;
2189 2189
2190 if (wbc->sync_mode == WB_SYNC_ALL) 2190 if (wbc->sync_mode == WB_SYNC_ALL)
2191 write_flags = WRITE_SYNC_PLUG; 2191 write_flags = WRITE_SYNC;
2192 else 2192 else
2193 write_flags = WRITE; 2193 write_flags = WRITE;
2194 2194
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 512c3d1da08..119520bdb9a 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -7340,7 +7340,6 @@ static const struct address_space_operations btrfs_aops = {
7340 .writepage = btrfs_writepage, 7340 .writepage = btrfs_writepage,
7341 .writepages = btrfs_writepages, 7341 .writepages = btrfs_writepages,
7342 .readpages = btrfs_readpages, 7342 .readpages = btrfs_readpages,
7343 .sync_page = block_sync_page,
7344 .direct_IO = btrfs_direct_IO, 7343 .direct_IO = btrfs_direct_IO,
7345 .invalidatepage = btrfs_invalidatepage, 7344 .invalidatepage = btrfs_invalidatepage,
7346 .releasepage = btrfs_releasepage, 7345 .releasepage = btrfs_releasepage,
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index dd13eb81ee4..9d554e8e658 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -162,7 +162,6 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
162 struct bio *cur; 162 struct bio *cur;
163 int again = 0; 163 int again = 0;
164 unsigned long num_run; 164 unsigned long num_run;
165 unsigned long num_sync_run;
166 unsigned long batch_run = 0; 165 unsigned long batch_run = 0;
167 unsigned long limit; 166 unsigned long limit;
168 unsigned long last_waited = 0; 167 unsigned long last_waited = 0;
@@ -173,11 +172,6 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
173 limit = btrfs_async_submit_limit(fs_info); 172 limit = btrfs_async_submit_limit(fs_info);
174 limit = limit * 2 / 3; 173 limit = limit * 2 / 3;
175 174
176 /* we want to make sure that every time we switch from the sync
177 * list to the normal list, we unplug
178 */
179 num_sync_run = 0;
180
181loop: 175loop:
182 spin_lock(&device->io_lock); 176 spin_lock(&device->io_lock);
183 177
@@ -223,15 +217,6 @@ loop_lock:
223 217
224 spin_unlock(&device->io_lock); 218 spin_unlock(&device->io_lock);
225 219
226 /*
227 * if we're doing the regular priority list, make sure we unplug
228 * for any high prio bios we've sent down
229 */
230 if (pending_bios == &device->pending_bios && num_sync_run > 0) {
231 num_sync_run = 0;
232 blk_run_backing_dev(bdi, NULL);
233 }
234
235 while (pending) { 220 while (pending) {
236 221
237 rmb(); 222 rmb();
@@ -259,19 +244,11 @@ loop_lock:
259 244
260 BUG_ON(atomic_read(&cur->bi_cnt) == 0); 245 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
261 246
262 if (cur->bi_rw & REQ_SYNC)
263 num_sync_run++;
264
265 submit_bio(cur->bi_rw, cur); 247 submit_bio(cur->bi_rw, cur);
266 num_run++; 248 num_run++;
267 batch_run++; 249 batch_run++;
268 if (need_resched()) { 250 if (need_resched())
269 if (num_sync_run) {
270 blk_run_backing_dev(bdi, NULL);
271 num_sync_run = 0;
272 }
273 cond_resched(); 251 cond_resched();
274 }
275 252
276 /* 253 /*
277 * we made progress, there is more work to do and the bdi 254 * we made progress, there is more work to do and the bdi
@@ -304,13 +281,8 @@ loop_lock:
304 * against it before looping 281 * against it before looping
305 */ 282 */
306 last_waited = ioc->last_waited; 283 last_waited = ioc->last_waited;
307 if (need_resched()) { 284 if (need_resched())
308 if (num_sync_run) {
309 blk_run_backing_dev(bdi, NULL);
310 num_sync_run = 0;
311 }
312 cond_resched(); 285 cond_resched();
313 }
314 continue; 286 continue;
315 } 287 }
316 spin_lock(&device->io_lock); 288 spin_lock(&device->io_lock);
@@ -323,22 +295,6 @@ loop_lock:
323 } 295 }
324 } 296 }
325 297
326 if (num_sync_run) {
327 num_sync_run = 0;
328 blk_run_backing_dev(bdi, NULL);
329 }
330 /*
331 * IO has already been through a long path to get here. Checksumming,
332 * async helper threads, perhaps compression. We've done a pretty
333 * good job of collecting a batch of IO and should just unplug
334 * the device right away.
335 *
336 * This will help anyone who is waiting on the IO, they might have
337 * already unplugged, but managed to do so before the bio they
338 * cared about found its way down here.
339 */
340 blk_run_backing_dev(bdi, NULL);
341
342 cond_resched(); 298 cond_resched();
343 if (again) 299 if (again)
344 goto loop; 300 goto loop;
@@ -2955,7 +2911,7 @@ static int find_live_mirror(struct map_lookup *map, int first, int num,
2955static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, 2911static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
2956 u64 logical, u64 *length, 2912 u64 logical, u64 *length,
2957 struct btrfs_multi_bio **multi_ret, 2913 struct btrfs_multi_bio **multi_ret,
2958 int mirror_num, struct page *unplug_page) 2914 int mirror_num)
2959{ 2915{
2960 struct extent_map *em; 2916 struct extent_map *em;
2961 struct map_lookup *map; 2917 struct map_lookup *map;
@@ -2987,11 +2943,6 @@ again:
2987 em = lookup_extent_mapping(em_tree, logical, *length); 2943 em = lookup_extent_mapping(em_tree, logical, *length);
2988 read_unlock(&em_tree->lock); 2944 read_unlock(&em_tree->lock);
2989 2945
2990 if (!em && unplug_page) {
2991 kfree(multi);
2992 return 0;
2993 }
2994
2995 if (!em) { 2946 if (!em) {
2996 printk(KERN_CRIT "unable to find logical %llu len %llu\n", 2947 printk(KERN_CRIT "unable to find logical %llu len %llu\n",
2997 (unsigned long long)logical, 2948 (unsigned long long)logical,
@@ -3047,13 +2998,13 @@ again:
3047 *length = em->len - offset; 2998 *length = em->len - offset;
3048 } 2999 }
3049 3000
3050 if (!multi_ret && !unplug_page) 3001 if (!multi_ret)
3051 goto out; 3002 goto out;
3052 3003
3053 num_stripes = 1; 3004 num_stripes = 1;
3054 stripe_index = 0; 3005 stripe_index = 0;
3055 if (map->type & BTRFS_BLOCK_GROUP_RAID1) { 3006 if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3056 if (unplug_page || (rw & REQ_WRITE)) 3007 if (rw & REQ_WRITE)
3057 num_stripes = map->num_stripes; 3008 num_stripes = map->num_stripes;
3058 else if (mirror_num) 3009 else if (mirror_num)
3059 stripe_index = mirror_num - 1; 3010 stripe_index = mirror_num - 1;
@@ -3075,7 +3026,7 @@ again:
3075 stripe_index = do_div(stripe_nr, factor); 3026 stripe_index = do_div(stripe_nr, factor);
3076 stripe_index *= map->sub_stripes; 3027 stripe_index *= map->sub_stripes;
3077 3028
3078 if (unplug_page || (rw & REQ_WRITE)) 3029 if (rw & REQ_WRITE)
3079 num_stripes = map->sub_stripes; 3030 num_stripes = map->sub_stripes;
3080 else if (mirror_num) 3031 else if (mirror_num)
3081 stripe_index += mirror_num - 1; 3032 stripe_index += mirror_num - 1;
@@ -3095,22 +3046,10 @@ again:
3095 BUG_ON(stripe_index >= map->num_stripes); 3046 BUG_ON(stripe_index >= map->num_stripes);
3096 3047
3097 for (i = 0; i < num_stripes; i++) { 3048 for (i = 0; i < num_stripes; i++) {
3098 if (unplug_page) { 3049 multi->stripes[i].physical =
3099 struct btrfs_device *device; 3050 map->stripes[stripe_index].physical +
3100 struct backing_dev_info *bdi; 3051 stripe_offset + stripe_nr * map->stripe_len;
3101 3052 multi->stripes[i].dev = map->stripes[stripe_index].dev;
3102 device = map->stripes[stripe_index].dev;
3103 if (device->bdev) {
3104 bdi = blk_get_backing_dev_info(device->bdev);
3105 if (bdi->unplug_io_fn)
3106 bdi->unplug_io_fn(bdi, unplug_page);
3107 }
3108 } else {
3109 multi->stripes[i].physical =
3110 map->stripes[stripe_index].physical +
3111 stripe_offset + stripe_nr * map->stripe_len;
3112 multi->stripes[i].dev = map->stripes[stripe_index].dev;
3113 }
3114 stripe_index++; 3053 stripe_index++;
3115 } 3054 }
3116 if (multi_ret) { 3055 if (multi_ret) {
@@ -3128,7 +3067,7 @@ int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3128 struct btrfs_multi_bio **multi_ret, int mirror_num) 3067 struct btrfs_multi_bio **multi_ret, int mirror_num)
3129{ 3068{
3130 return __btrfs_map_block(map_tree, rw, logical, length, multi_ret, 3069 return __btrfs_map_block(map_tree, rw, logical, length, multi_ret,
3131 mirror_num, NULL); 3070 mirror_num);
3132} 3071}
3133 3072
3134int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, 3073int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
@@ -3196,14 +3135,6 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
3196 return 0; 3135 return 0;
3197} 3136}
3198 3137
3199int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
3200 u64 logical, struct page *page)
3201{
3202 u64 length = PAGE_CACHE_SIZE;
3203 return __btrfs_map_block(map_tree, READ, logical, &length,
3204 NULL, 0, page);
3205}
3206
3207static void end_bio_multi_stripe(struct bio *bio, int err) 3138static void end_bio_multi_stripe(struct bio *bio, int err)
3208{ 3139{
3209 struct btrfs_multi_bio *multi = bio->bi_private; 3140 struct btrfs_multi_bio *multi = bio->bi_private;
diff --git a/fs/buffer.c b/fs/buffer.c
index 2219a76e2ca..2e6b1a387b7 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -54,23 +54,15 @@ init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
54} 54}
55EXPORT_SYMBOL(init_buffer); 55EXPORT_SYMBOL(init_buffer);
56 56
57static int sync_buffer(void *word) 57static int sleep_on_buffer(void *word)
58{ 58{
59 struct block_device *bd;
60 struct buffer_head *bh
61 = container_of(word, struct buffer_head, b_state);
62
63 smp_mb();
64 bd = bh->b_bdev;
65 if (bd)
66 blk_run_address_space(bd->bd_inode->i_mapping);
67 io_schedule(); 59 io_schedule();
68 return 0; 60 return 0;
69} 61}
70 62
71void __lock_buffer(struct buffer_head *bh) 63void __lock_buffer(struct buffer_head *bh)
72{ 64{
73 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer, 65 wait_on_bit_lock(&bh->b_state, BH_Lock, sleep_on_buffer,
74 TASK_UNINTERRUPTIBLE); 66 TASK_UNINTERRUPTIBLE);
75} 67}
76EXPORT_SYMBOL(__lock_buffer); 68EXPORT_SYMBOL(__lock_buffer);
@@ -90,7 +82,7 @@ EXPORT_SYMBOL(unlock_buffer);
90 */ 82 */
91void __wait_on_buffer(struct buffer_head * bh) 83void __wait_on_buffer(struct buffer_head * bh)
92{ 84{
93 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE); 85 wait_on_bit(&bh->b_state, BH_Lock, sleep_on_buffer, TASK_UNINTERRUPTIBLE);
94} 86}
95EXPORT_SYMBOL(__wait_on_buffer); 87EXPORT_SYMBOL(__wait_on_buffer);
96 88
@@ -749,10 +741,12 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
749{ 741{
750 struct buffer_head *bh; 742 struct buffer_head *bh;
751 struct list_head tmp; 743 struct list_head tmp;
752 struct address_space *mapping, *prev_mapping = NULL; 744 struct address_space *mapping;
753 int err = 0, err2; 745 int err = 0, err2;
746 struct blk_plug plug;
754 747
755 INIT_LIST_HEAD(&tmp); 748 INIT_LIST_HEAD(&tmp);
749 blk_start_plug(&plug);
756 750
757 spin_lock(lock); 751 spin_lock(lock);
758 while (!list_empty(list)) { 752 while (!list_empty(list)) {
@@ -775,7 +769,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
775 * still in flight on potentially older 769 * still in flight on potentially older
776 * contents. 770 * contents.
777 */ 771 */
778 write_dirty_buffer(bh, WRITE_SYNC_PLUG); 772 write_dirty_buffer(bh, WRITE_SYNC);
779 773
780 /* 774 /*
781 * Kick off IO for the previous mapping. Note 775 * Kick off IO for the previous mapping. Note
@@ -783,16 +777,16 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
783 * wait_on_buffer() will do that for us 777 * wait_on_buffer() will do that for us
784 * through sync_buffer(). 778 * through sync_buffer().
785 */ 779 */
786 if (prev_mapping && prev_mapping != mapping)
787 blk_run_address_space(prev_mapping);
788 prev_mapping = mapping;
789
790 brelse(bh); 780 brelse(bh);
791 spin_lock(lock); 781 spin_lock(lock);
792 } 782 }
793 } 783 }
794 } 784 }
795 785
786 spin_unlock(lock);
787 blk_finish_plug(&plug);
788 spin_lock(lock);
789
796 while (!list_empty(&tmp)) { 790 while (!list_empty(&tmp)) {
797 bh = BH_ENTRY(tmp.prev); 791 bh = BH_ENTRY(tmp.prev);
798 get_bh(bh); 792 get_bh(bh);
@@ -1614,14 +1608,8 @@ EXPORT_SYMBOL(unmap_underlying_metadata);
1614 * prevents this contention from occurring. 1608 * prevents this contention from occurring.
1615 * 1609 *
1616 * If block_write_full_page() is called with wbc->sync_mode == 1610 * If block_write_full_page() is called with wbc->sync_mode ==
1617 * WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this 1611 * WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this
1618 * causes the writes to be flagged as synchronous writes, but the 1612 * causes the writes to be flagged as synchronous writes.
1619 * block device queue will NOT be unplugged, since usually many pages
1620 * will be pushed to the out before the higher-level caller actually
1621 * waits for the writes to be completed. The various wait functions,
1622 * such as wait_on_writeback_range() will ultimately call sync_page()
1623 * which will ultimately call blk_run_backing_dev(), which will end up
1624 * unplugging the device queue.
1625 */ 1613 */
1626static int __block_write_full_page(struct inode *inode, struct page *page, 1614static int __block_write_full_page(struct inode *inode, struct page *page,
1627 get_block_t *get_block, struct writeback_control *wbc, 1615 get_block_t *get_block, struct writeback_control *wbc,
@@ -1634,7 +1622,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
1634 const unsigned blocksize = 1 << inode->i_blkbits; 1622 const unsigned blocksize = 1 << inode->i_blkbits;
1635 int nr_underway = 0; 1623 int nr_underway = 0;
1636 int write_op = (wbc->sync_mode == WB_SYNC_ALL ? 1624 int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
1637 WRITE_SYNC_PLUG : WRITE); 1625 WRITE_SYNC : WRITE);
1638 1626
1639 BUG_ON(!PageLocked(page)); 1627 BUG_ON(!PageLocked(page));
1640 1628
@@ -3138,17 +3126,6 @@ out:
3138} 3126}
3139EXPORT_SYMBOL(try_to_free_buffers); 3127EXPORT_SYMBOL(try_to_free_buffers);
3140 3128
3141void block_sync_page(struct page *page)
3142{
3143 struct address_space *mapping;
3144
3145 smp_mb();
3146 mapping = page_mapping(page);
3147 if (mapping)
3148 blk_run_backing_dev(mapping->backing_dev_info, page);
3149}
3150EXPORT_SYMBOL(block_sync_page);
3151
3152/* 3129/*
3153 * There are no bdflush tunables left. But distributions are 3130 * There are no bdflush tunables left. But distributions are
3154 * still running obsolete flush daemons, so we terminate them here. 3131 * still running obsolete flush daemons, so we terminate them here.
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index e964b1cd5dd..c27d236738f 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1569,34 +1569,6 @@ int cifs_fsync(struct file *file, int datasync)
1569 return rc; 1569 return rc;
1570} 1570}
1571 1571
1572/* static void cifs_sync_page(struct page *page)
1573{
1574 struct address_space *mapping;
1575 struct inode *inode;
1576 unsigned long index = page->index;
1577 unsigned int rpages = 0;
1578 int rc = 0;
1579
1580 cFYI(1, "sync page %p", page);
1581 mapping = page->mapping;
1582 if (!mapping)
1583 return 0;
1584 inode = mapping->host;
1585 if (!inode)
1586 return; */
1587
1588/* fill in rpages then
1589 result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1590
1591/* cFYI(1, "rpages is %d for sync page of Index %ld", rpages, index);
1592
1593#if 0
1594 if (rc < 0)
1595 return rc;
1596 return 0;
1597#endif
1598} */
1599
1600/* 1572/*
1601 * As file closes, flush all cached write data for this inode checking 1573 * As file closes, flush all cached write data for this inode checking
1602 * for write behind errors. 1574 * for write behind errors.
@@ -2510,7 +2482,6 @@ const struct address_space_operations cifs_addr_ops = {
2510 .set_page_dirty = __set_page_dirty_nobuffers, 2482 .set_page_dirty = __set_page_dirty_nobuffers,
2511 .releasepage = cifs_release_page, 2483 .releasepage = cifs_release_page,
2512 .invalidatepage = cifs_invalidate_page, 2484 .invalidatepage = cifs_invalidate_page,
2513 /* .sync_page = cifs_sync_page, */
2514 /* .direct_IO = */ 2485 /* .direct_IO = */
2515}; 2486};
2516 2487
@@ -2528,6 +2499,5 @@ const struct address_space_operations cifs_addr_ops_smallbuf = {
2528 .set_page_dirty = __set_page_dirty_nobuffers, 2499 .set_page_dirty = __set_page_dirty_nobuffers,
2529 .releasepage = cifs_release_page, 2500 .releasepage = cifs_release_page,
2530 .invalidatepage = cifs_invalidate_page, 2501 .invalidatepage = cifs_invalidate_page,
2531 /* .sync_page = cifs_sync_page, */
2532 /* .direct_IO = */ 2502 /* .direct_IO = */
2533}; 2503};
diff --git a/fs/direct-io.c b/fs/direct-io.c
index dcb5577cde1..ac5f164170e 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -1110,11 +1110,8 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
1110 ((rw & READ) || (dio->result == dio->size))) 1110 ((rw & READ) || (dio->result == dio->size)))
1111 ret = -EIOCBQUEUED; 1111 ret = -EIOCBQUEUED;
1112 1112
1113 if (ret != -EIOCBQUEUED) { 1113 if (ret != -EIOCBQUEUED)
1114 /* All IO is now issued, send it on its way */
1115 blk_run_address_space(inode->i_mapping);
1116 dio_await_completion(dio); 1114 dio_await_completion(dio);
1117 }
1118 1115
1119 /* 1116 /*
1120 * Sync will always be dropping the final ref and completing the 1117 * Sync will always be dropping the final ref and completing the
@@ -1176,7 +1173,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
1176 struct dio *dio; 1173 struct dio *dio;
1177 1174
1178 if (rw & WRITE) 1175 if (rw & WRITE)
1179 rw = WRITE_ODIRECT_PLUG; 1176 rw = WRITE_ODIRECT;
1180 1177
1181 if (bdev) 1178 if (bdev)
1182 bdev_blkbits = blksize_bits(bdev_logical_block_size(bdev)); 1179 bdev_blkbits = blksize_bits(bdev_logical_block_size(bdev));
diff --git a/fs/efs/inode.c b/fs/efs/inode.c
index a8e7797b947..9c13412e6c9 100644
--- a/fs/efs/inode.c
+++ b/fs/efs/inode.c
@@ -23,7 +23,6 @@ static sector_t _efs_bmap(struct address_space *mapping, sector_t block)
23} 23}
24static const struct address_space_operations efs_aops = { 24static const struct address_space_operations efs_aops = {
25 .readpage = efs_readpage, 25 .readpage = efs_readpage,
26 .sync_page = block_sync_page,
27 .bmap = _efs_bmap 26 .bmap = _efs_bmap
28}; 27};
29 28
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index 0c713cfbebf..8472c098445 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -823,7 +823,6 @@ const struct address_space_operations exofs_aops = {
823 .direct_IO = NULL, /* TODO: Should be trivial to do */ 823 .direct_IO = NULL, /* TODO: Should be trivial to do */
824 824
825 /* With these NULL has special meaning or default is not exported */ 825 /* With these NULL has special meaning or default is not exported */
826 .sync_page = NULL,
827 .get_xip_mem = NULL, 826 .get_xip_mem = NULL,
828 .migratepage = NULL, 827 .migratepage = NULL,
829 .launder_page = NULL, 828 .launder_page = NULL,
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 40ad210a504..c47f706878b 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -860,7 +860,6 @@ const struct address_space_operations ext2_aops = {
860 .readpage = ext2_readpage, 860 .readpage = ext2_readpage,
861 .readpages = ext2_readpages, 861 .readpages = ext2_readpages,
862 .writepage = ext2_writepage, 862 .writepage = ext2_writepage,
863 .sync_page = block_sync_page,
864 .write_begin = ext2_write_begin, 863 .write_begin = ext2_write_begin,
865 .write_end = ext2_write_end, 864 .write_end = ext2_write_end,
866 .bmap = ext2_bmap, 865 .bmap = ext2_bmap,
@@ -880,7 +879,6 @@ const struct address_space_operations ext2_nobh_aops = {
880 .readpage = ext2_readpage, 879 .readpage = ext2_readpage,
881 .readpages = ext2_readpages, 880 .readpages = ext2_readpages,
882 .writepage = ext2_nobh_writepage, 881 .writepage = ext2_nobh_writepage,
883 .sync_page = block_sync_page,
884 .write_begin = ext2_nobh_write_begin, 882 .write_begin = ext2_nobh_write_begin,
885 .write_end = nobh_write_end, 883 .write_end = nobh_write_end,
886 .bmap = ext2_bmap, 884 .bmap = ext2_bmap,
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index ae94f6d949f..fe2541d250e 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -1894,7 +1894,6 @@ static const struct address_space_operations ext3_ordered_aops = {
1894 .readpage = ext3_readpage, 1894 .readpage = ext3_readpage,
1895 .readpages = ext3_readpages, 1895 .readpages = ext3_readpages,
1896 .writepage = ext3_ordered_writepage, 1896 .writepage = ext3_ordered_writepage,
1897 .sync_page = block_sync_page,
1898 .write_begin = ext3_write_begin, 1897 .write_begin = ext3_write_begin,
1899 .write_end = ext3_ordered_write_end, 1898 .write_end = ext3_ordered_write_end,
1900 .bmap = ext3_bmap, 1899 .bmap = ext3_bmap,
@@ -1910,7 +1909,6 @@ static const struct address_space_operations ext3_writeback_aops = {
1910 .readpage = ext3_readpage, 1909 .readpage = ext3_readpage,
1911 .readpages = ext3_readpages, 1910 .readpages = ext3_readpages,
1912 .writepage = ext3_writeback_writepage, 1911 .writepage = ext3_writeback_writepage,
1913 .sync_page = block_sync_page,
1914 .write_begin = ext3_write_begin, 1912 .write_begin = ext3_write_begin,
1915 .write_end = ext3_writeback_write_end, 1913 .write_end = ext3_writeback_write_end,
1916 .bmap = ext3_bmap, 1914 .bmap = ext3_bmap,
@@ -1926,7 +1924,6 @@ static const struct address_space_operations ext3_journalled_aops = {
1926 .readpage = ext3_readpage, 1924 .readpage = ext3_readpage,
1927 .readpages = ext3_readpages, 1925 .readpages = ext3_readpages,
1928 .writepage = ext3_journalled_writepage, 1926 .writepage = ext3_journalled_writepage,
1929 .sync_page = block_sync_page,
1930 .write_begin = ext3_write_begin, 1927 .write_begin = ext3_write_begin,
1931 .write_end = ext3_journalled_write_end, 1928 .write_end = ext3_journalled_write_end,
1932 .set_page_dirty = ext3_journalled_set_page_dirty, 1929 .set_page_dirty = ext3_journalled_set_page_dirty,
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 9f7f9e49914..9297ad46c46 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -3903,7 +3903,6 @@ static const struct address_space_operations ext4_ordered_aops = {
3903 .readpage = ext4_readpage, 3903 .readpage = ext4_readpage,
3904 .readpages = ext4_readpages, 3904 .readpages = ext4_readpages,
3905 .writepage = ext4_writepage, 3905 .writepage = ext4_writepage,
3906 .sync_page = block_sync_page,
3907 .write_begin = ext4_write_begin, 3906 .write_begin = ext4_write_begin,
3908 .write_end = ext4_ordered_write_end, 3907 .write_end = ext4_ordered_write_end,
3909 .bmap = ext4_bmap, 3908 .bmap = ext4_bmap,
@@ -3919,7 +3918,6 @@ static const struct address_space_operations ext4_writeback_aops = {
3919 .readpage = ext4_readpage, 3918 .readpage = ext4_readpage,
3920 .readpages = ext4_readpages, 3919 .readpages = ext4_readpages,
3921 .writepage = ext4_writepage, 3920 .writepage = ext4_writepage,
3922 .sync_page = block_sync_page,
3923 .write_begin = ext4_write_begin, 3921 .write_begin = ext4_write_begin,
3924 .write_end = ext4_writeback_write_end, 3922 .write_end = ext4_writeback_write_end,
3925 .bmap = ext4_bmap, 3923 .bmap = ext4_bmap,
@@ -3935,7 +3933,6 @@ static const struct address_space_operations ext4_journalled_aops = {
3935 .readpage = ext4_readpage, 3933 .readpage = ext4_readpage,
3936 .readpages = ext4_readpages, 3934 .readpages = ext4_readpages,
3937 .writepage = ext4_writepage, 3935 .writepage = ext4_writepage,
3938 .sync_page = block_sync_page,
3939 .write_begin = ext4_write_begin, 3936 .write_begin = ext4_write_begin,
3940 .write_end = ext4_journalled_write_end, 3937 .write_end = ext4_journalled_write_end,
3941 .set_page_dirty = ext4_journalled_set_page_dirty, 3938 .set_page_dirty = ext4_journalled_set_page_dirty,
@@ -3951,7 +3948,6 @@ static const struct address_space_operations ext4_da_aops = {
3951 .readpages = ext4_readpages, 3948 .readpages = ext4_readpages,
3952 .writepage = ext4_writepage, 3949 .writepage = ext4_writepage,
3953 .writepages = ext4_da_writepages, 3950 .writepages = ext4_da_writepages,
3954 .sync_page = block_sync_page,
3955 .write_begin = ext4_da_write_begin, 3951 .write_begin = ext4_da_write_begin,
3956 .write_end = ext4_da_write_end, 3952 .write_end = ext4_da_write_end,
3957 .bmap = ext4_bmap, 3953 .bmap = ext4_bmap,
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 955cc309142..e2cd90e4bb7 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -310,8 +310,7 @@ static int io_submit_init(struct ext4_io_submit *io,
310 io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh); 310 io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh);
311 311
312 io->io_bio = bio; 312 io->io_bio = bio;
313 io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? 313 io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
314 WRITE_SYNC_PLUG : WRITE);
315 io->io_next_block = bh->b_blocknr; 314 io->io_next_block = bh->b_blocknr;
316 return 0; 315 return 0;
317} 316}
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 0e277ec4b61..8d68690bdcf 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -236,7 +236,6 @@ static const struct address_space_operations fat_aops = {
236 .readpages = fat_readpages, 236 .readpages = fat_readpages,
237 .writepage = fat_writepage, 237 .writepage = fat_writepage,
238 .writepages = fat_writepages, 238 .writepages = fat_writepages,
239 .sync_page = block_sync_page,
240 .write_begin = fat_write_begin, 239 .write_begin = fat_write_begin,
241 .write_end = fat_write_end, 240 .write_end = fat_write_end,
242 .direct_IO = fat_direct_IO, 241 .direct_IO = fat_direct_IO,
diff --git a/fs/freevxfs/vxfs_subr.c b/fs/freevxfs/vxfs_subr.c
index 1429f3ae1e8..5d318c44f85 100644
--- a/fs/freevxfs/vxfs_subr.c
+++ b/fs/freevxfs/vxfs_subr.c
@@ -44,7 +44,6 @@ static sector_t vxfs_bmap(struct address_space *, sector_t);
44const struct address_space_operations vxfs_aops = { 44const struct address_space_operations vxfs_aops = {
45 .readpage = vxfs_readpage, 45 .readpage = vxfs_readpage,
46 .bmap = vxfs_bmap, 46 .bmap = vxfs_bmap,
47 .sync_page = block_sync_page,
48}; 47};
49 48
50inline void 49inline void
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 051b1a08452..cc6ec4b2f0f 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -870,7 +870,6 @@ static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb)
870 870
871 fc->bdi.name = "fuse"; 871 fc->bdi.name = "fuse";
872 fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 872 fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
873 fc->bdi.unplug_io_fn = default_unplug_io_fn;
874 /* fuse does it's own writeback accounting */ 873 /* fuse does it's own writeback accounting */
875 fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB; 874 fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB;
876 875
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index aad77e4f61b..c71995b111b 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -1117,7 +1117,6 @@ static const struct address_space_operations gfs2_writeback_aops = {
1117 .writepages = gfs2_writeback_writepages, 1117 .writepages = gfs2_writeback_writepages,
1118 .readpage = gfs2_readpage, 1118 .readpage = gfs2_readpage,
1119 .readpages = gfs2_readpages, 1119 .readpages = gfs2_readpages,
1120 .sync_page = block_sync_page,
1121 .write_begin = gfs2_write_begin, 1120 .write_begin = gfs2_write_begin,
1122 .write_end = gfs2_write_end, 1121 .write_end = gfs2_write_end,
1123 .bmap = gfs2_bmap, 1122 .bmap = gfs2_bmap,
@@ -1133,7 +1132,6 @@ static const struct address_space_operations gfs2_ordered_aops = {
1133 .writepage = gfs2_ordered_writepage, 1132 .writepage = gfs2_ordered_writepage,
1134 .readpage = gfs2_readpage, 1133 .readpage = gfs2_readpage,
1135 .readpages = gfs2_readpages, 1134 .readpages = gfs2_readpages,
1136 .sync_page = block_sync_page,
1137 .write_begin = gfs2_write_begin, 1135 .write_begin = gfs2_write_begin,
1138 .write_end = gfs2_write_end, 1136 .write_end = gfs2_write_end,
1139 .set_page_dirty = gfs2_set_page_dirty, 1137 .set_page_dirty = gfs2_set_page_dirty,
@@ -1151,7 +1149,6 @@ static const struct address_space_operations gfs2_jdata_aops = {
1151 .writepages = gfs2_jdata_writepages, 1149 .writepages = gfs2_jdata_writepages,
1152 .readpage = gfs2_readpage, 1150 .readpage = gfs2_readpage,
1153 .readpages = gfs2_readpages, 1151 .readpages = gfs2_readpages,
1154 .sync_page = block_sync_page,
1155 .write_begin = gfs2_write_begin, 1152 .write_begin = gfs2_write_begin,
1156 .write_end = gfs2_write_end, 1153 .write_end = gfs2_write_end,
1157 .set_page_dirty = gfs2_set_page_dirty, 1154 .set_page_dirty = gfs2_set_page_dirty,
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index e7ed31f858d..5b102c1887f 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -121,7 +121,7 @@ __acquires(&sdp->sd_ail_lock)
121 lock_buffer(bh); 121 lock_buffer(bh);
122 if (test_clear_buffer_dirty(bh)) { 122 if (test_clear_buffer_dirty(bh)) {
123 bh->b_end_io = end_buffer_write_sync; 123 bh->b_end_io = end_buffer_write_sync;
124 submit_bh(WRITE_SYNC_PLUG, bh); 124 submit_bh(WRITE_SYNC, bh);
125 } else { 125 } else {
126 unlock_buffer(bh); 126 unlock_buffer(bh);
127 brelse(bh); 127 brelse(bh);
@@ -647,7 +647,7 @@ static void gfs2_ordered_write(struct gfs2_sbd *sdp)
647 lock_buffer(bh); 647 lock_buffer(bh);
648 if (buffer_mapped(bh) && test_clear_buffer_dirty(bh)) { 648 if (buffer_mapped(bh) && test_clear_buffer_dirty(bh)) {
649 bh->b_end_io = end_buffer_write_sync; 649 bh->b_end_io = end_buffer_write_sync;
650 submit_bh(WRITE_SYNC_PLUG, bh); 650 submit_bh(WRITE_SYNC, bh);
651 } else { 651 } else {
652 unlock_buffer(bh); 652 unlock_buffer(bh);
653 brelse(bh); 653 brelse(bh);
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index e919abf25ec..51d27f00ebb 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -204,7 +204,7 @@ static void buf_lo_before_commit(struct gfs2_sbd *sdp)
204 } 204 }
205 205
206 gfs2_log_unlock(sdp); 206 gfs2_log_unlock(sdp);
207 submit_bh(WRITE_SYNC_PLUG, bh); 207 submit_bh(WRITE_SYNC, bh);
208 gfs2_log_lock(sdp); 208 gfs2_log_lock(sdp);
209 209
210 n = 0; 210 n = 0;
@@ -214,7 +214,7 @@ static void buf_lo_before_commit(struct gfs2_sbd *sdp)
214 gfs2_log_unlock(sdp); 214 gfs2_log_unlock(sdp);
215 lock_buffer(bd2->bd_bh); 215 lock_buffer(bd2->bd_bh);
216 bh = gfs2_log_fake_buf(sdp, bd2->bd_bh); 216 bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
217 submit_bh(WRITE_SYNC_PLUG, bh); 217 submit_bh(WRITE_SYNC, bh);
218 gfs2_log_lock(sdp); 218 gfs2_log_lock(sdp);
219 if (++n >= num) 219 if (++n >= num)
220 break; 220 break;
@@ -356,7 +356,7 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
356 sdp->sd_log_num_revoke--; 356 sdp->sd_log_num_revoke--;
357 357
358 if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) { 358 if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
359 submit_bh(WRITE_SYNC_PLUG, bh); 359 submit_bh(WRITE_SYNC, bh);
360 360
361 bh = gfs2_log_get_buf(sdp); 361 bh = gfs2_log_get_buf(sdp);
362 mh = (struct gfs2_meta_header *)bh->b_data; 362 mh = (struct gfs2_meta_header *)bh->b_data;
@@ -373,7 +373,7 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
373 } 373 }
374 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke); 374 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
375 375
376 submit_bh(WRITE_SYNC_PLUG, bh); 376 submit_bh(WRITE_SYNC, bh);
377} 377}
378 378
379static void revoke_lo_before_scan(struct gfs2_jdesc *jd, 379static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
@@ -575,7 +575,7 @@ static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh,
575 ptr = bh_log_ptr(bh); 575 ptr = bh_log_ptr(bh);
576 576
577 get_bh(bh); 577 get_bh(bh);
578 submit_bh(WRITE_SYNC_PLUG, bh); 578 submit_bh(WRITE_SYNC, bh);
579 gfs2_log_lock(sdp); 579 gfs2_log_lock(sdp);
580 while(!list_empty(list)) { 580 while(!list_empty(list)) {
581 bd = list_entry(list->next, struct gfs2_bufdata, bd_le.le_list); 581 bd = list_entry(list->next, struct gfs2_bufdata, bd_le.le_list);
@@ -601,7 +601,7 @@ static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh,
601 } else { 601 } else {
602 bh1 = gfs2_log_fake_buf(sdp, bd->bd_bh); 602 bh1 = gfs2_log_fake_buf(sdp, bd->bd_bh);
603 } 603 }
604 submit_bh(WRITE_SYNC_PLUG, bh1); 604 submit_bh(WRITE_SYNC, bh1);
605 gfs2_log_lock(sdp); 605 gfs2_log_lock(sdp);
606 ptr += 2; 606 ptr += 2;
607 } 607 }
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 01d97f48655..675349b5a13 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -37,7 +37,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
37 struct buffer_head *bh, *head; 37 struct buffer_head *bh, *head;
38 int nr_underway = 0; 38 int nr_underway = 0;
39 int write_op = REQ_META | 39 int write_op = REQ_META |
40 (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC_PLUG : WRITE); 40 (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
41 41
42 BUG_ON(!PageLocked(page)); 42 BUG_ON(!PageLocked(page));
43 BUG_ON(!page_has_buffers(page)); 43 BUG_ON(!page_has_buffers(page));
@@ -94,7 +94,6 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
94const struct address_space_operations gfs2_meta_aops = { 94const struct address_space_operations gfs2_meta_aops = {
95 .writepage = gfs2_aspace_writepage, 95 .writepage = gfs2_aspace_writepage,
96 .releasepage = gfs2_releasepage, 96 .releasepage = gfs2_releasepage,
97 .sync_page = block_sync_page,
98}; 97};
99 98
100/** 99/**
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index dffb4e99664..fff16c968e6 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -150,7 +150,6 @@ static int hfs_writepages(struct address_space *mapping,
150const struct address_space_operations hfs_btree_aops = { 150const struct address_space_operations hfs_btree_aops = {
151 .readpage = hfs_readpage, 151 .readpage = hfs_readpage,
152 .writepage = hfs_writepage, 152 .writepage = hfs_writepage,
153 .sync_page = block_sync_page,
154 .write_begin = hfs_write_begin, 153 .write_begin = hfs_write_begin,
155 .write_end = generic_write_end, 154 .write_end = generic_write_end,
156 .bmap = hfs_bmap, 155 .bmap = hfs_bmap,
@@ -160,7 +159,6 @@ const struct address_space_operations hfs_btree_aops = {
160const struct address_space_operations hfs_aops = { 159const struct address_space_operations hfs_aops = {
161 .readpage = hfs_readpage, 160 .readpage = hfs_readpage,
162 .writepage = hfs_writepage, 161 .writepage = hfs_writepage,
163 .sync_page = block_sync_page,
164 .write_begin = hfs_write_begin, 162 .write_begin = hfs_write_begin,
165 .write_end = generic_write_end, 163 .write_end = generic_write_end,
166 .bmap = hfs_bmap, 164 .bmap = hfs_bmap,
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index a8df651747f..b248a6cfcad 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -146,7 +146,6 @@ static int hfsplus_writepages(struct address_space *mapping,
146const struct address_space_operations hfsplus_btree_aops = { 146const struct address_space_operations hfsplus_btree_aops = {
147 .readpage = hfsplus_readpage, 147 .readpage = hfsplus_readpage,
148 .writepage = hfsplus_writepage, 148 .writepage = hfsplus_writepage,
149 .sync_page = block_sync_page,
150 .write_begin = hfsplus_write_begin, 149 .write_begin = hfsplus_write_begin,
151 .write_end = generic_write_end, 150 .write_end = generic_write_end,
152 .bmap = hfsplus_bmap, 151 .bmap = hfsplus_bmap,
@@ -156,7 +155,6 @@ const struct address_space_operations hfsplus_btree_aops = {
156const struct address_space_operations hfsplus_aops = { 155const struct address_space_operations hfsplus_aops = {
157 .readpage = hfsplus_readpage, 156 .readpage = hfsplus_readpage,
158 .writepage = hfsplus_writepage, 157 .writepage = hfsplus_writepage,
159 .sync_page = block_sync_page,
160 .write_begin = hfsplus_write_begin, 158 .write_begin = hfsplus_write_begin,
161 .write_end = generic_write_end, 159 .write_end = generic_write_end,
162 .bmap = hfsplus_bmap, 160 .bmap = hfsplus_bmap,
diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c
index 2dbae20450f..9b9eb6933e4 100644
--- a/fs/hpfs/file.c
+++ b/fs/hpfs/file.c
@@ -119,7 +119,6 @@ static sector_t _hpfs_bmap(struct address_space *mapping, sector_t block)
119const struct address_space_operations hpfs_aops = { 119const struct address_space_operations hpfs_aops = {
120 .readpage = hpfs_readpage, 120 .readpage = hpfs_readpage,
121 .writepage = hpfs_writepage, 121 .writepage = hpfs_writepage,
122 .sync_page = block_sync_page,
123 .write_begin = hpfs_write_begin, 122 .write_begin = hpfs_write_begin,
124 .write_end = generic_write_end, 123 .write_end = generic_write_end,
125 .bmap = _hpfs_bmap 124 .bmap = _hpfs_bmap
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index a0f3833c0db..3db5ba4568f 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -1158,7 +1158,6 @@ static sector_t _isofs_bmap(struct address_space *mapping, sector_t block)
1158 1158
1159static const struct address_space_operations isofs_aops = { 1159static const struct address_space_operations isofs_aops = {
1160 .readpage = isofs_readpage, 1160 .readpage = isofs_readpage,
1161 .sync_page = block_sync_page,
1162 .bmap = _isofs_bmap 1161 .bmap = _isofs_bmap
1163}; 1162};
1164 1163
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index 34a4861c14b..da871ee084d 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -20,6 +20,7 @@
20#include <linux/mm.h> 20#include <linux/mm.h>
21#include <linux/pagemap.h> 21#include <linux/pagemap.h>
22#include <linux/bio.h> 22#include <linux/bio.h>
23#include <linux/blkdev.h>
23 24
24/* 25/*
25 * Default IO end handler for temporary BJ_IO buffer_heads. 26 * Default IO end handler for temporary BJ_IO buffer_heads.
@@ -294,7 +295,7 @@ void journal_commit_transaction(journal_t *journal)
294 int first_tag = 0; 295 int first_tag = 0;
295 int tag_flag; 296 int tag_flag;
296 int i; 297 int i;
297 int write_op = WRITE_SYNC; 298 struct blk_plug plug;
298 299
299 /* 300 /*
300 * First job: lock down the current transaction and wait for 301 * First job: lock down the current transaction and wait for
@@ -327,13 +328,6 @@ void journal_commit_transaction(journal_t *journal)
327 spin_lock(&journal->j_state_lock); 328 spin_lock(&journal->j_state_lock);
328 commit_transaction->t_state = T_LOCKED; 329 commit_transaction->t_state = T_LOCKED;
329 330
330 /*
331 * Use plugged writes here, since we want to submit several before
332 * we unplug the device. We don't do explicit unplugging in here,
333 * instead we rely on sync_buffer() doing the unplug for us.
334 */
335 if (commit_transaction->t_synchronous_commit)
336 write_op = WRITE_SYNC_PLUG;
337 spin_lock(&commit_transaction->t_handle_lock); 331 spin_lock(&commit_transaction->t_handle_lock);
338 while (commit_transaction->t_updates) { 332 while (commit_transaction->t_updates) {
339 DEFINE_WAIT(wait); 333 DEFINE_WAIT(wait);
@@ -418,8 +412,10 @@ void journal_commit_transaction(journal_t *journal)
418 * Now start flushing things to disk, in the order they appear 412 * Now start flushing things to disk, in the order they appear
419 * on the transaction lists. Data blocks go first. 413 * on the transaction lists. Data blocks go first.
420 */ 414 */
415 blk_start_plug(&plug);
421 err = journal_submit_data_buffers(journal, commit_transaction, 416 err = journal_submit_data_buffers(journal, commit_transaction,
422 write_op); 417 WRITE_SYNC);
418 blk_finish_plug(&plug);
423 419
424 /* 420 /*
425 * Wait for all previously submitted IO to complete. 421 * Wait for all previously submitted IO to complete.
@@ -480,7 +476,9 @@ void journal_commit_transaction(journal_t *journal)
480 err = 0; 476 err = 0;
481 } 477 }
482 478
483 journal_write_revoke_records(journal, commit_transaction, write_op); 479 blk_start_plug(&plug);
480
481 journal_write_revoke_records(journal, commit_transaction, WRITE_SYNC);
484 482
485 /* 483 /*
486 * If we found any dirty or locked buffers, then we should have 484 * If we found any dirty or locked buffers, then we should have
@@ -650,7 +648,7 @@ start_journal_io:
650 clear_buffer_dirty(bh); 648 clear_buffer_dirty(bh);
651 set_buffer_uptodate(bh); 649 set_buffer_uptodate(bh);
652 bh->b_end_io = journal_end_buffer_io_sync; 650 bh->b_end_io = journal_end_buffer_io_sync;
653 submit_bh(write_op, bh); 651 submit_bh(WRITE_SYNC, bh);
654 } 652 }
655 cond_resched(); 653 cond_resched();
656 654
@@ -661,6 +659,8 @@ start_journal_io:
661 } 659 }
662 } 660 }
663 661
662 blk_finish_plug(&plug);
663
664 /* Lo and behold: we have just managed to send a transaction to 664 /* Lo and behold: we have just managed to send a transaction to
665 the log. Before we can commit it, wait for the IO so far to 665 the log. Before we can commit it, wait for the IO so far to
666 complete. Control buffers being written are on the 666 complete. Control buffers being written are on the
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index f3ad1598b20..fa36d7662b2 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -137,9 +137,9 @@ static int journal_submit_commit_record(journal_t *journal,
137 if (journal->j_flags & JBD2_BARRIER && 137 if (journal->j_flags & JBD2_BARRIER &&
138 !JBD2_HAS_INCOMPAT_FEATURE(journal, 138 !JBD2_HAS_INCOMPAT_FEATURE(journal,
139 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) 139 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT))
140 ret = submit_bh(WRITE_SYNC_PLUG | WRITE_FLUSH_FUA, bh); 140 ret = submit_bh(WRITE_SYNC | WRITE_FLUSH_FUA, bh);
141 else 141 else
142 ret = submit_bh(WRITE_SYNC_PLUG, bh); 142 ret = submit_bh(WRITE_SYNC, bh);
143 143
144 *cbh = bh; 144 *cbh = bh;
145 return ret; 145 return ret;
@@ -329,7 +329,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
329 int tag_bytes = journal_tag_bytes(journal); 329 int tag_bytes = journal_tag_bytes(journal);
330 struct buffer_head *cbh = NULL; /* For transactional checksums */ 330 struct buffer_head *cbh = NULL; /* For transactional checksums */
331 __u32 crc32_sum = ~0; 331 __u32 crc32_sum = ~0;
332 int write_op = WRITE_SYNC; 332 struct blk_plug plug;
333 333
334 /* 334 /*
335 * First job: lock down the current transaction and wait for 335 * First job: lock down the current transaction and wait for
@@ -363,13 +363,6 @@ void jbd2_journal_commit_transaction(journal_t *journal)
363 write_lock(&journal->j_state_lock); 363 write_lock(&journal->j_state_lock);
364 commit_transaction->t_state = T_LOCKED; 364 commit_transaction->t_state = T_LOCKED;
365 365
366 /*
367 * Use plugged writes here, since we want to submit several before
368 * we unplug the device. We don't do explicit unplugging in here,
369 * instead we rely on sync_buffer() doing the unplug for us.
370 */
371 if (commit_transaction->t_synchronous_commit)
372 write_op = WRITE_SYNC_PLUG;
373 trace_jbd2_commit_locking(journal, commit_transaction); 366 trace_jbd2_commit_locking(journal, commit_transaction);
374 stats.run.rs_wait = commit_transaction->t_max_wait; 367 stats.run.rs_wait = commit_transaction->t_max_wait;
375 stats.run.rs_locked = jiffies; 368 stats.run.rs_locked = jiffies;
@@ -469,8 +462,10 @@ void jbd2_journal_commit_transaction(journal_t *journal)
469 if (err) 462 if (err)
470 jbd2_journal_abort(journal, err); 463 jbd2_journal_abort(journal, err);
471 464
465 blk_start_plug(&plug);
472 jbd2_journal_write_revoke_records(journal, commit_transaction, 466 jbd2_journal_write_revoke_records(journal, commit_transaction,
473 write_op); 467 WRITE_SYNC);
468 blk_finish_plug(&plug);
474 469
475 jbd_debug(3, "JBD: commit phase 2\n"); 470 jbd_debug(3, "JBD: commit phase 2\n");
476 471
@@ -497,6 +492,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
497 err = 0; 492 err = 0;
498 descriptor = NULL; 493 descriptor = NULL;
499 bufs = 0; 494 bufs = 0;
495 blk_start_plug(&plug);
500 while (commit_transaction->t_buffers) { 496 while (commit_transaction->t_buffers) {
501 497
502 /* Find the next buffer to be journaled... */ 498 /* Find the next buffer to be journaled... */
@@ -658,7 +654,7 @@ start_journal_io:
658 clear_buffer_dirty(bh); 654 clear_buffer_dirty(bh);
659 set_buffer_uptodate(bh); 655 set_buffer_uptodate(bh);
660 bh->b_end_io = journal_end_buffer_io_sync; 656 bh->b_end_io = journal_end_buffer_io_sync;
661 submit_bh(write_op, bh); 657 submit_bh(WRITE_SYNC, bh);
662 } 658 }
663 cond_resched(); 659 cond_resched();
664 stats.run.rs_blocks_logged += bufs; 660 stats.run.rs_blocks_logged += bufs;
@@ -699,6 +695,8 @@ start_journal_io:
699 __jbd2_journal_abort_hard(journal); 695 __jbd2_journal_abort_hard(journal);
700 } 696 }
701 697
698 blk_finish_plug(&plug);
699
702 /* Lo and behold: we have just managed to send a transaction to 700 /* Lo and behold: we have just managed to send a transaction to
703 the log. Before we can commit it, wait for the IO so far to 701 the log. Before we can commit it, wait for the IO so far to
704 complete. Control buffers being written are on the 702 complete. Control buffers being written are on the
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 9978803ceed..eddbb373209 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -352,7 +352,6 @@ const struct address_space_operations jfs_aops = {
352 .readpages = jfs_readpages, 352 .readpages = jfs_readpages,
353 .writepage = jfs_writepage, 353 .writepage = jfs_writepage,
354 .writepages = jfs_writepages, 354 .writepages = jfs_writepages,
355 .sync_page = block_sync_page,
356 .write_begin = jfs_write_begin, 355 .write_begin = jfs_write_begin,
357 .write_end = nobh_write_end, 356 .write_end = nobh_write_end,
358 .bmap = jfs_bmap, 357 .bmap = jfs_bmap,
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index 48b44bd8267..6740d34cd82 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -583,7 +583,6 @@ static void metapage_invalidatepage(struct page *page, unsigned long offset)
583const struct address_space_operations jfs_metapage_aops = { 583const struct address_space_operations jfs_metapage_aops = {
584 .readpage = metapage_readpage, 584 .readpage = metapage_readpage,
585 .writepage = metapage_writepage, 585 .writepage = metapage_writepage,
586 .sync_page = block_sync_page,
587 .releasepage = metapage_releasepage, 586 .releasepage = metapage_releasepage,
588 .invalidatepage = metapage_invalidatepage, 587 .invalidatepage = metapage_invalidatepage,
589 .set_page_dirty = __set_page_dirty_nobuffers, 588 .set_page_dirty = __set_page_dirty_nobuffers,
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c
index 723bc5bca09..1adc8d455f0 100644
--- a/fs/logfs/dev_bdev.c
+++ b/fs/logfs/dev_bdev.c
@@ -39,7 +39,6 @@ static int sync_request(struct page *page, struct block_device *bdev, int rw)
39 bio.bi_end_io = request_complete; 39 bio.bi_end_io = request_complete;
40 40
41 submit_bio(rw, &bio); 41 submit_bio(rw, &bio);
42 generic_unplug_device(bdev_get_queue(bdev));
43 wait_for_completion(&complete); 42 wait_for_completion(&complete);
44 return test_bit(BIO_UPTODATE, &bio.bi_flags) ? 0 : -EIO; 43 return test_bit(BIO_UPTODATE, &bio.bi_flags) ? 0 : -EIO;
45} 44}
@@ -168,7 +167,6 @@ static void bdev_writeseg(struct super_block *sb, u64 ofs, size_t len)
168 } 167 }
169 len = PAGE_ALIGN(len); 168 len = PAGE_ALIGN(len);
170 __bdev_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT); 169 __bdev_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT);
171 generic_unplug_device(bdev_get_queue(logfs_super(sb)->s_bdev));
172} 170}
173 171
174 172
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index ae0b83f476a..adcdc0a4e18 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -399,7 +399,6 @@ static sector_t minix_bmap(struct address_space *mapping, sector_t block)
399static const struct address_space_operations minix_aops = { 399static const struct address_space_operations minix_aops = {
400 .readpage = minix_readpage, 400 .readpage = minix_readpage,
401 .writepage = minix_writepage, 401 .writepage = minix_writepage,
402 .sync_page = block_sync_page,
403 .write_begin = minix_write_begin, 402 .write_begin = minix_write_begin,
404 .write_end = generic_write_end, 403 .write_end = generic_write_end,
405 .bmap = minix_bmap 404 .bmap = minix_bmap
diff --git a/fs/mpage.c b/fs/mpage.c
index d78455a81ec..0afc809e46e 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -364,6 +364,9 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
364 sector_t last_block_in_bio = 0; 364 sector_t last_block_in_bio = 0;
365 struct buffer_head map_bh; 365 struct buffer_head map_bh;
366 unsigned long first_logical_block = 0; 366 unsigned long first_logical_block = 0;
367 struct blk_plug plug;
368
369 blk_start_plug(&plug);
367 370
368 map_bh.b_state = 0; 371 map_bh.b_state = 0;
369 map_bh.b_size = 0; 372 map_bh.b_size = 0;
@@ -385,6 +388,7 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
385 BUG_ON(!list_empty(pages)); 388 BUG_ON(!list_empty(pages));
386 if (bio) 389 if (bio)
387 mpage_bio_submit(READ, bio); 390 mpage_bio_submit(READ, bio);
391 blk_finish_plug(&plug);
388 return 0; 392 return 0;
389} 393}
390EXPORT_SYMBOL(mpage_readpages); 394EXPORT_SYMBOL(mpage_readpages);
@@ -666,8 +670,11 @@ int
666mpage_writepages(struct address_space *mapping, 670mpage_writepages(struct address_space *mapping,
667 struct writeback_control *wbc, get_block_t get_block) 671 struct writeback_control *wbc, get_block_t get_block)
668{ 672{
673 struct blk_plug plug;
669 int ret; 674 int ret;
670 675
676 blk_start_plug(&plug);
677
671 if (!get_block) 678 if (!get_block)
672 ret = generic_writepages(mapping, wbc); 679 ret = generic_writepages(mapping, wbc);
673 else { 680 else {
@@ -682,6 +689,7 @@ mpage_writepages(struct address_space *mapping,
682 if (mpd.bio) 689 if (mpd.bio)
683 mpage_bio_submit(WRITE, mpd.bio); 690 mpage_bio_submit(WRITE, mpd.bio);
684 } 691 }
692 blk_finish_plug(&plug);
685 return ret; 693 return ret;
686} 694}
687EXPORT_SYMBOL(mpage_writepages); 695EXPORT_SYMBOL(mpage_writepages);
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index 85f7baa15f5..609cd223eea 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -34,15 +34,10 @@
34#include "page.h" 34#include "page.h"
35#include "btnode.h" 35#include "btnode.h"
36 36
37
38static const struct address_space_operations def_btnode_aops = {
39 .sync_page = block_sync_page,
40};
41
42void nilfs_btnode_cache_init(struct address_space *btnc, 37void nilfs_btnode_cache_init(struct address_space *btnc,
43 struct backing_dev_info *bdi) 38 struct backing_dev_info *bdi)
44{ 39{
45 nilfs_mapping_init(btnc, bdi, &def_btnode_aops); 40 nilfs_mapping_init(btnc, bdi);
46} 41}
47 42
48void nilfs_btnode_cache_clear(struct address_space *btnc) 43void nilfs_btnode_cache_clear(struct address_space *btnc)
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
index caf9a6a3fb5..1c2a3e23f8b 100644
--- a/fs/nilfs2/gcinode.c
+++ b/fs/nilfs2/gcinode.c
@@ -49,7 +49,6 @@
49#include "ifile.h" 49#include "ifile.h"
50 50
51static const struct address_space_operations def_gcinode_aops = { 51static const struct address_space_operations def_gcinode_aops = {
52 .sync_page = block_sync_page,
53}; 52};
54 53
55/* 54/*
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index d5625be236a..c0aa27490c0 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -280,7 +280,6 @@ nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
280const struct address_space_operations nilfs_aops = { 280const struct address_space_operations nilfs_aops = {
281 .writepage = nilfs_writepage, 281 .writepage = nilfs_writepage,
282 .readpage = nilfs_readpage, 282 .readpage = nilfs_readpage,
283 .sync_page = block_sync_page,
284 .writepages = nilfs_writepages, 283 .writepages = nilfs_writepages,
285 .set_page_dirty = nilfs_set_page_dirty, 284 .set_page_dirty = nilfs_set_page_dirty,
286 .readpages = nilfs_readpages, 285 .readpages = nilfs_readpages,
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index a0babd2bff6..a649b05f706 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -399,7 +399,6 @@ nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc)
399 399
400static const struct address_space_operations def_mdt_aops = { 400static const struct address_space_operations def_mdt_aops = {
401 .writepage = nilfs_mdt_write_page, 401 .writepage = nilfs_mdt_write_page,
402 .sync_page = block_sync_page,
403}; 402};
404 403
405static const struct inode_operations def_mdt_iops; 404static const struct inode_operations def_mdt_iops;
@@ -438,10 +437,6 @@ void nilfs_mdt_set_entry_size(struct inode *inode, unsigned entry_size,
438 mi->mi_first_entry_offset = DIV_ROUND_UP(header_size, entry_size); 437 mi->mi_first_entry_offset = DIV_ROUND_UP(header_size, entry_size);
439} 438}
440 439
441static const struct address_space_operations shadow_map_aops = {
442 .sync_page = block_sync_page,
443};
444
445/** 440/**
446 * nilfs_mdt_setup_shadow_map - setup shadow map and bind it to metadata file 441 * nilfs_mdt_setup_shadow_map - setup shadow map and bind it to metadata file
447 * @inode: inode of the metadata file 442 * @inode: inode of the metadata file
@@ -455,9 +450,9 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode,
455 450
456 INIT_LIST_HEAD(&shadow->frozen_buffers); 451 INIT_LIST_HEAD(&shadow->frozen_buffers);
457 address_space_init_once(&shadow->frozen_data); 452 address_space_init_once(&shadow->frozen_data);
458 nilfs_mapping_init(&shadow->frozen_data, bdi, &shadow_map_aops); 453 nilfs_mapping_init(&shadow->frozen_data, bdi);
459 address_space_init_once(&shadow->frozen_btnodes); 454 address_space_init_once(&shadow->frozen_btnodes);
460 nilfs_mapping_init(&shadow->frozen_btnodes, bdi, &shadow_map_aops); 455 nilfs_mapping_init(&shadow->frozen_btnodes, bdi);
461 mi->mi_shadow = shadow; 456 mi->mi_shadow = shadow;
462 return 0; 457 return 0;
463} 458}
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index a585b35fd6b..4d2a1ee0eb4 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -493,15 +493,14 @@ unsigned nilfs_page_count_clean_buffers(struct page *page,
493} 493}
494 494
495void nilfs_mapping_init(struct address_space *mapping, 495void nilfs_mapping_init(struct address_space *mapping,
496 struct backing_dev_info *bdi, 496 struct backing_dev_info *bdi)
497 const struct address_space_operations *aops)
498{ 497{
499 mapping->host = NULL; 498 mapping->host = NULL;
500 mapping->flags = 0; 499 mapping->flags = 0;
501 mapping_set_gfp_mask(mapping, GFP_NOFS); 500 mapping_set_gfp_mask(mapping, GFP_NOFS);
502 mapping->assoc_mapping = NULL; 501 mapping->assoc_mapping = NULL;
503 mapping->backing_dev_info = bdi; 502 mapping->backing_dev_info = bdi;
504 mapping->a_ops = aops; 503 mapping->a_ops = NULL;
505} 504}
506 505
507/* 506/*
diff --git a/fs/nilfs2/page.h b/fs/nilfs2/page.h
index 2a00953ebd5..f06b79ad749 100644
--- a/fs/nilfs2/page.h
+++ b/fs/nilfs2/page.h
@@ -62,8 +62,7 @@ int nilfs_copy_dirty_pages(struct address_space *, struct address_space *);
62void nilfs_copy_back_pages(struct address_space *, struct address_space *); 62void nilfs_copy_back_pages(struct address_space *, struct address_space *);
63void nilfs_clear_dirty_pages(struct address_space *); 63void nilfs_clear_dirty_pages(struct address_space *);
64void nilfs_mapping_init(struct address_space *mapping, 64void nilfs_mapping_init(struct address_space *mapping,
65 struct backing_dev_info *bdi, 65 struct backing_dev_info *bdi);
66 const struct address_space_operations *aops);
67unsigned nilfs_page_count_clean_buffers(struct page *, unsigned, unsigned); 66unsigned nilfs_page_count_clean_buffers(struct page *, unsigned, unsigned);
68unsigned long nilfs_find_uncommitted_extent(struct inode *inode, 67unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
69 sector_t start_blk, 68 sector_t start_blk,
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index 0f83e93935b..2853ff20f85 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -509,7 +509,7 @@ static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
509 * Last BIO is always sent through the following 509 * Last BIO is always sent through the following
510 * submission. 510 * submission.
511 */ 511 */
512 rw |= REQ_SYNC | REQ_UNPLUG; 512 rw |= REQ_SYNC;
513 res = nilfs_segbuf_submit_bio(segbuf, &wi, rw); 513 res = nilfs_segbuf_submit_bio(segbuf, &wi, rw);
514 } 514 }
515 515
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index c3c2c7ac902..0b1e885b8cf 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -1543,8 +1543,6 @@ err_out:
1543 */ 1543 */
1544const struct address_space_operations ntfs_aops = { 1544const struct address_space_operations ntfs_aops = {
1545 .readpage = ntfs_readpage, /* Fill page with data. */ 1545 .readpage = ntfs_readpage, /* Fill page with data. */
1546 .sync_page = block_sync_page, /* Currently, just unplugs the
1547 disk request queue. */
1548#ifdef NTFS_RW 1546#ifdef NTFS_RW
1549 .writepage = ntfs_writepage, /* Write dirty page to disk. */ 1547 .writepage = ntfs_writepage, /* Write dirty page to disk. */
1550#endif /* NTFS_RW */ 1548#endif /* NTFS_RW */
@@ -1560,8 +1558,6 @@ const struct address_space_operations ntfs_aops = {
1560 */ 1558 */
1561const struct address_space_operations ntfs_mst_aops = { 1559const struct address_space_operations ntfs_mst_aops = {
1562 .readpage = ntfs_readpage, /* Fill page with data. */ 1560 .readpage = ntfs_readpage, /* Fill page with data. */
1563 .sync_page = block_sync_page, /* Currently, just unplugs the
1564 disk request queue. */
1565#ifdef NTFS_RW 1561#ifdef NTFS_RW
1566 .writepage = ntfs_writepage, /* Write dirty page to disk. */ 1562 .writepage = ntfs_writepage, /* Write dirty page to disk. */
1567 .set_page_dirty = __set_page_dirty_nobuffers, /* Set the page dirty 1563 .set_page_dirty = __set_page_dirty_nobuffers, /* Set the page dirty
diff --git a/fs/ntfs/compress.c b/fs/ntfs/compress.c
index 6551c7cbad9..ef9ed854255 100644
--- a/fs/ntfs/compress.c
+++ b/fs/ntfs/compress.c
@@ -698,8 +698,7 @@ lock_retry_remap:
698 "uptodate! Unplugging the disk queue " 698 "uptodate! Unplugging the disk queue "
699 "and rescheduling."); 699 "and rescheduling.");
700 get_bh(tbh); 700 get_bh(tbh);
701 blk_run_address_space(mapping); 701 io_schedule();
702 schedule();
703 put_bh(tbh); 702 put_bh(tbh);
704 if (unlikely(!buffer_uptodate(tbh))) 703 if (unlikely(!buffer_uptodate(tbh)))
705 goto read_err; 704 goto read_err;
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 1fbb0e20131..daea0359e97 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -2043,7 +2043,6 @@ const struct address_space_operations ocfs2_aops = {
2043 .write_begin = ocfs2_write_begin, 2043 .write_begin = ocfs2_write_begin,
2044 .write_end = ocfs2_write_end, 2044 .write_end = ocfs2_write_end,
2045 .bmap = ocfs2_bmap, 2045 .bmap = ocfs2_bmap,
2046 .sync_page = block_sync_page,
2047 .direct_IO = ocfs2_direct_IO, 2046 .direct_IO = ocfs2_direct_IO,
2048 .invalidatepage = ocfs2_invalidatepage, 2047 .invalidatepage = ocfs2_invalidatepage,
2049 .releasepage = ocfs2_releasepage, 2048 .releasepage = ocfs2_releasepage,
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index b108e863d8f..1adab287bd2 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -367,11 +367,7 @@ static inline void o2hb_bio_wait_dec(struct o2hb_bio_wait_ctxt *wc,
367static void o2hb_wait_on_io(struct o2hb_region *reg, 367static void o2hb_wait_on_io(struct o2hb_region *reg,
368 struct o2hb_bio_wait_ctxt *wc) 368 struct o2hb_bio_wait_ctxt *wc)
369{ 369{
370 struct address_space *mapping = reg->hr_bdev->bd_inode->i_mapping;
371
372 blk_run_address_space(mapping);
373 o2hb_bio_wait_dec(wc, 1); 370 o2hb_bio_wait_dec(wc, 1);
374
375 wait_for_completion(&wc->wc_io_complete); 371 wait_for_completion(&wc->wc_io_complete);
376} 372}
377 373
diff --git a/fs/omfs/file.c b/fs/omfs/file.c
index 8a6d34fa668..d738a7e493d 100644
--- a/fs/omfs/file.c
+++ b/fs/omfs/file.c
@@ -372,7 +372,6 @@ const struct address_space_operations omfs_aops = {
372 .readpages = omfs_readpages, 372 .readpages = omfs_readpages,
373 .writepage = omfs_writepage, 373 .writepage = omfs_writepage,
374 .writepages = omfs_writepages, 374 .writepages = omfs_writepages,
375 .sync_page = block_sync_page,
376 .write_begin = omfs_write_begin, 375 .write_begin = omfs_write_begin,
377 .write_end = generic_write_end, 376 .write_end = generic_write_end,
378 .bmap = omfs_bmap, 377 .bmap = omfs_bmap,
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 9c21119512b..ac546975031 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -290,7 +290,8 @@ ssize_t part_inflight_show(struct device *dev,
290{ 290{
291 struct hd_struct *p = dev_to_part(dev); 291 struct hd_struct *p = dev_to_part(dev);
292 292
293 return sprintf(buf, "%8u %8u\n", p->in_flight[0], p->in_flight[1]); 293 return sprintf(buf, "%8u %8u\n", atomic_read(&p->in_flight[0]),
294 atomic_read(&p->in_flight[1]));
294} 295}
295 296
296#ifdef CONFIG_FAIL_MAKE_REQUEST 297#ifdef CONFIG_FAIL_MAKE_REQUEST
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index e63b4171d58..2b0646613f5 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -335,7 +335,6 @@ static sector_t qnx4_bmap(struct address_space *mapping, sector_t block)
335static const struct address_space_operations qnx4_aops = { 335static const struct address_space_operations qnx4_aops = {
336 .readpage = qnx4_readpage, 336 .readpage = qnx4_readpage,
337 .writepage = qnx4_writepage, 337 .writepage = qnx4_writepage,
338 .sync_page = block_sync_page,
339 .write_begin = qnx4_write_begin, 338 .write_begin = qnx4_write_begin,
340 .write_end = generic_write_end, 339 .write_end = generic_write_end,
341 .bmap = qnx4_bmap 340 .bmap = qnx4_bmap
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 1bba24bad82..4fd5bb33dbb 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -3217,7 +3217,6 @@ const struct address_space_operations reiserfs_address_space_operations = {
3217 .readpages = reiserfs_readpages, 3217 .readpages = reiserfs_readpages,
3218 .releasepage = reiserfs_releasepage, 3218 .releasepage = reiserfs_releasepage,
3219 .invalidatepage = reiserfs_invalidatepage, 3219 .invalidatepage = reiserfs_invalidatepage,
3220 .sync_page = block_sync_page,
3221 .write_begin = reiserfs_write_begin, 3220 .write_begin = reiserfs_write_begin,
3222 .write_end = reiserfs_write_end, 3221 .write_end = reiserfs_write_end,
3223 .bmap = reiserfs_aop_bmap, 3222 .bmap = reiserfs_aop_bmap,
diff --git a/fs/super.c b/fs/super.c
index e8486490826..8a06881b192 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -71,6 +71,7 @@ static struct super_block *alloc_super(struct file_system_type *type)
71#else 71#else
72 INIT_LIST_HEAD(&s->s_files); 72 INIT_LIST_HEAD(&s->s_files);
73#endif 73#endif
74 s->s_bdi = &default_backing_dev_info;
74 INIT_LIST_HEAD(&s->s_instances); 75 INIT_LIST_HEAD(&s->s_instances);
75 INIT_HLIST_BL_HEAD(&s->s_anon); 76 INIT_HLIST_BL_HEAD(&s->s_anon);
76 INIT_LIST_HEAD(&s->s_inodes); 77 INIT_LIST_HEAD(&s->s_inodes);
@@ -936,6 +937,7 @@ mount_fs(struct file_system_type *type, int flags, const char *name, void *data)
936 sb = root->d_sb; 937 sb = root->d_sb;
937 BUG_ON(!sb); 938 BUG_ON(!sb);
938 WARN_ON(!sb->s_bdi); 939 WARN_ON(!sb->s_bdi);
940 WARN_ON(sb->s_bdi == &default_backing_dev_info);
939 sb->s_flags |= MS_BORN; 941 sb->s_flags |= MS_BORN;
940 942
941 error = security_sb_kern_mount(sb, flags, secdata); 943 error = security_sb_kern_mount(sb, flags, secdata);
diff --git a/fs/sync.c b/fs/sync.c
index 92ca208777d..c38ec163da6 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -34,7 +34,7 @@ static int __sync_filesystem(struct super_block *sb, int wait)
34 * This should be safe, as we require bdi backing to actually 34 * This should be safe, as we require bdi backing to actually
35 * write out data in the first place 35 * write out data in the first place
36 */ 36 */
37 if (!sb->s_bdi || sb->s_bdi == &noop_backing_dev_info) 37 if (sb->s_bdi == &noop_backing_dev_info)
38 return 0; 38 return 0;
39 39
40 if (sb->s_qcop && sb->s_qcop->quota_sync) 40 if (sb->s_qcop && sb->s_qcop->quota_sync)
@@ -80,7 +80,7 @@ EXPORT_SYMBOL_GPL(sync_filesystem);
80 80
81static void sync_one_sb(struct super_block *sb, void *arg) 81static void sync_one_sb(struct super_block *sb, void *arg)
82{ 82{
83 if (!(sb->s_flags & MS_RDONLY) && sb->s_bdi) 83 if (!(sb->s_flags & MS_RDONLY))
84 __sync_filesystem(sb, *(int *)arg); 84 __sync_filesystem(sb, *(int *)arg);
85} 85}
86/* 86/*
diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c
index 9ca66276315..fa8d43c92bb 100644
--- a/fs/sysv/itree.c
+++ b/fs/sysv/itree.c
@@ -488,7 +488,6 @@ static sector_t sysv_bmap(struct address_space *mapping, sector_t block)
488const struct address_space_operations sysv_aops = { 488const struct address_space_operations sysv_aops = {
489 .readpage = sysv_readpage, 489 .readpage = sysv_readpage,
490 .writepage = sysv_writepage, 490 .writepage = sysv_writepage,
491 .sync_page = block_sync_page,
492 .write_begin = sysv_write_begin, 491 .write_begin = sysv_write_begin,
493 .write_end = generic_write_end, 492 .write_end = generic_write_end,
494 .bmap = sysv_bmap 493 .bmap = sysv_bmap
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index e5dc1e120e8..6ddd9973e68 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -2011,7 +2011,6 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
2011 */ 2011 */
2012 c->bdi.name = "ubifs", 2012 c->bdi.name = "ubifs",
2013 c->bdi.capabilities = BDI_CAP_MAP_COPY; 2013 c->bdi.capabilities = BDI_CAP_MAP_COPY;
2014 c->bdi.unplug_io_fn = default_unplug_io_fn;
2015 err = bdi_init(&c->bdi); 2014 err = bdi_init(&c->bdi);
2016 if (err) 2015 if (err)
2017 goto out_close; 2016 goto out_close;
diff --git a/fs/udf/file.c b/fs/udf/file.c
index f391a2adc69..2a346bb1d9f 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -98,7 +98,6 @@ static int udf_adinicb_write_end(struct file *file,
98const struct address_space_operations udf_adinicb_aops = { 98const struct address_space_operations udf_adinicb_aops = {
99 .readpage = udf_adinicb_readpage, 99 .readpage = udf_adinicb_readpage,
100 .writepage = udf_adinicb_writepage, 100 .writepage = udf_adinicb_writepage,
101 .sync_page = block_sync_page,
102 .write_begin = simple_write_begin, 101 .write_begin = simple_write_begin,
103 .write_end = udf_adinicb_write_end, 102 .write_end = udf_adinicb_write_end,
104}; 103};
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index ccc81432141..1d1358ed80c 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -140,7 +140,6 @@ static sector_t udf_bmap(struct address_space *mapping, sector_t block)
140const struct address_space_operations udf_aops = { 140const struct address_space_operations udf_aops = {
141 .readpage = udf_readpage, 141 .readpage = udf_readpage,
142 .writepage = udf_writepage, 142 .writepage = udf_writepage,
143 .sync_page = block_sync_page,
144 .write_begin = udf_write_begin, 143 .write_begin = udf_write_begin,
145 .write_end = generic_write_end, 144 .write_end = generic_write_end,
146 .bmap = udf_bmap, 145 .bmap = udf_bmap,
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 03c255f12df..27a4babe7df 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -552,7 +552,6 @@ static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
552const struct address_space_operations ufs_aops = { 552const struct address_space_operations ufs_aops = {
553 .readpage = ufs_readpage, 553 .readpage = ufs_readpage,
554 .writepage = ufs_writepage, 554 .writepage = ufs_writepage,
555 .sync_page = block_sync_page,
556 .write_begin = ufs_write_begin, 555 .write_begin = ufs_write_begin,
557 .write_end = generic_write_end, 556 .write_end = generic_write_end,
558 .bmap = ufs_bmap 557 .bmap = ufs_bmap
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c
index e56a4f56721..11014302c9c 100644
--- a/fs/ufs/truncate.c
+++ b/fs/ufs/truncate.c
@@ -479,7 +479,7 @@ int ufs_truncate(struct inode *inode, loff_t old_i_size)
479 break; 479 break;
480 if (IS_SYNC(inode) && (inode->i_state & I_DIRTY)) 480 if (IS_SYNC(inode) && (inode->i_state & I_DIRTY))
481 ufs_sync_inode (inode); 481 ufs_sync_inode (inode);
482 blk_run_address_space(inode->i_mapping); 482 blk_flush_plug(current);
483 yield(); 483 yield();
484 } 484 }
485 485
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 8c5c8727745..52dbd14260b 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -413,8 +413,7 @@ xfs_submit_ioend_bio(
413 if (xfs_ioend_new_eof(ioend)) 413 if (xfs_ioend_new_eof(ioend))
414 xfs_mark_inode_dirty(XFS_I(ioend->io_inode)); 414 xfs_mark_inode_dirty(XFS_I(ioend->io_inode));
415 415
416 submit_bio(wbc->sync_mode == WB_SYNC_ALL ? 416 submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio);
417 WRITE_SYNC_PLUG : WRITE, bio);
418} 417}
419 418
420STATIC struct bio * 419STATIC struct bio *
@@ -1495,7 +1494,6 @@ const struct address_space_operations xfs_address_space_operations = {
1495 .readpages = xfs_vm_readpages, 1494 .readpages = xfs_vm_readpages,
1496 .writepage = xfs_vm_writepage, 1495 .writepage = xfs_vm_writepage,
1497 .writepages = xfs_vm_writepages, 1496 .writepages = xfs_vm_writepages,
1498 .sync_page = block_sync_page,
1499 .releasepage = xfs_vm_releasepage, 1497 .releasepage = xfs_vm_releasepage,
1500 .invalidatepage = xfs_vm_invalidatepage, 1498 .invalidatepage = xfs_vm_invalidatepage,
1501 .write_begin = xfs_vm_write_begin, 1499 .write_begin = xfs_vm_write_begin,
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 5cb230f2cb4..c05324d3282 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -990,7 +990,7 @@ xfs_buf_lock(
990 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) 990 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
991 xfs_log_force(bp->b_target->bt_mount, 0); 991 xfs_log_force(bp->b_target->bt_mount, 0);
992 if (atomic_read(&bp->b_io_remaining)) 992 if (atomic_read(&bp->b_io_remaining))
993 blk_run_address_space(bp->b_target->bt_mapping); 993 blk_flush_plug(current);
994 down(&bp->b_sema); 994 down(&bp->b_sema);
995 XB_SET_OWNER(bp); 995 XB_SET_OWNER(bp);
996 996
@@ -1034,9 +1034,7 @@ xfs_buf_wait_unpin(
1034 set_current_state(TASK_UNINTERRUPTIBLE); 1034 set_current_state(TASK_UNINTERRUPTIBLE);
1035 if (atomic_read(&bp->b_pin_count) == 0) 1035 if (atomic_read(&bp->b_pin_count) == 0)
1036 break; 1036 break;
1037 if (atomic_read(&bp->b_io_remaining)) 1037 io_schedule();
1038 blk_run_address_space(bp->b_target->bt_mapping);
1039 schedule();
1040 } 1038 }
1041 remove_wait_queue(&bp->b_waiters, &wait); 1039 remove_wait_queue(&bp->b_waiters, &wait);
1042 set_current_state(TASK_RUNNING); 1040 set_current_state(TASK_RUNNING);
@@ -1442,7 +1440,7 @@ xfs_buf_iowait(
1442 trace_xfs_buf_iowait(bp, _RET_IP_); 1440 trace_xfs_buf_iowait(bp, _RET_IP_);
1443 1441
1444 if (atomic_read(&bp->b_io_remaining)) 1442 if (atomic_read(&bp->b_io_remaining))
1445 blk_run_address_space(bp->b_target->bt_mapping); 1443 blk_flush_plug(current);
1446 wait_for_completion(&bp->b_iowait); 1444 wait_for_completion(&bp->b_iowait);
1447 1445
1448 trace_xfs_buf_iowait_done(bp, _RET_IP_); 1446 trace_xfs_buf_iowait_done(bp, _RET_IP_);
@@ -1666,7 +1664,6 @@ xfs_mapping_buftarg(
1666 struct inode *inode; 1664 struct inode *inode;
1667 struct address_space *mapping; 1665 struct address_space *mapping;
1668 static const struct address_space_operations mapping_aops = { 1666 static const struct address_space_operations mapping_aops = {
1669 .sync_page = block_sync_page,
1670 .migratepage = fail_migrate_page, 1667 .migratepage = fail_migrate_page,
1671 }; 1668 };
1672 1669
@@ -1947,7 +1944,7 @@ xfsbufd(
1947 count++; 1944 count++;
1948 } 1945 }
1949 if (count) 1946 if (count)
1950 blk_run_address_space(target->bt_mapping); 1947 blk_flush_plug(current);
1951 1948
1952 } while (!kthread_should_stop()); 1949 } while (!kthread_should_stop());
1953 1950
@@ -1995,7 +1992,7 @@ xfs_flush_buftarg(
1995 1992
1996 if (wait) { 1993 if (wait) {
1997 /* Expedite and wait for IO to complete. */ 1994 /* Expedite and wait for IO to complete. */
1998 blk_run_address_space(target->bt_mapping); 1995 blk_flush_plug(current);
1999 while (!list_empty(&wait_list)) { 1996 while (!list_empty(&wait_list)) {
2000 bp = list_first_entry(&wait_list, struct xfs_buf, b_list); 1997 bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
2001 1998