diff options
author | Christoph Hellwig <hch@lst.de> | 2016-11-01 09:40:10 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2016-11-01 11:43:26 -0400 |
commit | 70fd76140a6cb63262bd47b68d57b42e889c10ee (patch) | |
tree | 0590b2ef1b89b6af6abb8da9d23d5d87991d74c8 | |
parent | a2b809672ee6fcb4d5756ea815725b3dbaea654e (diff) |
block,fs: use REQ_* flags directly
Remove the WRITE_* and READ_SYNC wrappers, and just use the flags
directly. Where applicable this also drops usage of the
bio_set_op_attrs wrapper.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
53 files changed, 133 insertions, 182 deletions
diff --git a/block/blk-flush.c b/block/blk-flush.c index 95f1d4d357df..d35beca18481 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c | |||
@@ -330,7 +330,7 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq) | |||
330 | } | 330 | } |
331 | 331 | ||
332 | flush_rq->cmd_type = REQ_TYPE_FS; | 332 | flush_rq->cmd_type = REQ_TYPE_FS; |
333 | flush_rq->cmd_flags = REQ_OP_FLUSH | WRITE_FLUSH; | 333 | flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH; |
334 | flush_rq->rq_flags |= RQF_FLUSH_SEQ; | 334 | flush_rq->rq_flags |= RQF_FLUSH_SEQ; |
335 | flush_rq->rq_disk = first_rq->rq_disk; | 335 | flush_rq->rq_disk = first_rq->rq_disk; |
336 | flush_rq->end_io = flush_end_io; | 336 | flush_rq->end_io = flush_end_io; |
@@ -486,7 +486,7 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, | |||
486 | 486 | ||
487 | bio = bio_alloc(gfp_mask, 0); | 487 | bio = bio_alloc(gfp_mask, 0); |
488 | bio->bi_bdev = bdev; | 488 | bio->bi_bdev = bdev; |
489 | bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH); | 489 | bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; |
490 | 490 | ||
491 | ret = submit_bio_wait(bio); | 491 | ret = submit_bio_wait(bio); |
492 | 492 | ||
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 942384f34e22..a89538cb3eaa 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c | |||
@@ -1266,7 +1266,7 @@ static void submit_one_flush(struct drbd_device *device, struct issue_flush_cont | |||
1266 | bio->bi_bdev = device->ldev->backing_bdev; | 1266 | bio->bi_bdev = device->ldev->backing_bdev; |
1267 | bio->bi_private = octx; | 1267 | bio->bi_private = octx; |
1268 | bio->bi_end_io = one_flush_endio; | 1268 | bio->bi_end_io = one_flush_endio; |
1269 | bio_set_op_attrs(bio, REQ_OP_FLUSH, WRITE_FLUSH); | 1269 | bio->bi_opf = REQ_OP_FLUSH | REQ_PREFLUSH; |
1270 | 1270 | ||
1271 | device->flush_jif = jiffies; | 1271 | device->flush_jif = jiffies; |
1272 | set_bit(FLUSH_PENDING, &device->flags); | 1272 | set_bit(FLUSH_PENDING, &device->flags); |
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 4a80ee752597..726c32e35db9 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c | |||
@@ -1253,14 +1253,14 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, | |||
1253 | case BLKIF_OP_WRITE: | 1253 | case BLKIF_OP_WRITE: |
1254 | ring->st_wr_req++; | 1254 | ring->st_wr_req++; |
1255 | operation = REQ_OP_WRITE; | 1255 | operation = REQ_OP_WRITE; |
1256 | operation_flags = WRITE_ODIRECT; | 1256 | operation_flags = REQ_SYNC | REQ_IDLE; |
1257 | break; | 1257 | break; |
1258 | case BLKIF_OP_WRITE_BARRIER: | 1258 | case BLKIF_OP_WRITE_BARRIER: |
1259 | drain = true; | 1259 | drain = true; |
1260 | case BLKIF_OP_FLUSH_DISKCACHE: | 1260 | case BLKIF_OP_FLUSH_DISKCACHE: |
1261 | ring->st_f_req++; | 1261 | ring->st_f_req++; |
1262 | operation = REQ_OP_WRITE; | 1262 | operation = REQ_OP_WRITE; |
1263 | operation_flags = WRITE_FLUSH; | 1263 | operation_flags = REQ_PREFLUSH; |
1264 | break; | 1264 | break; |
1265 | default: | 1265 | default: |
1266 | operation = 0; /* make gcc happy */ | 1266 | operation = 0; /* make gcc happy */ |
@@ -1272,7 +1272,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, | |||
1272 | nseg = req->operation == BLKIF_OP_INDIRECT ? | 1272 | nseg = req->operation == BLKIF_OP_INDIRECT ? |
1273 | req->u.indirect.nr_segments : req->u.rw.nr_segments; | 1273 | req->u.indirect.nr_segments : req->u.rw.nr_segments; |
1274 | 1274 | ||
1275 | if (unlikely(nseg == 0 && operation_flags != WRITE_FLUSH) || | 1275 | if (unlikely(nseg == 0 && operation_flags != REQ_PREFLUSH) || |
1276 | unlikely((req->operation != BLKIF_OP_INDIRECT) && | 1276 | unlikely((req->operation != BLKIF_OP_INDIRECT) && |
1277 | (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) || | 1277 | (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) || |
1278 | unlikely((req->operation == BLKIF_OP_INDIRECT) && | 1278 | unlikely((req->operation == BLKIF_OP_INDIRECT) && |
@@ -1334,7 +1334,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, | |||
1334 | } | 1334 | } |
1335 | 1335 | ||
1336 | /* Wait on all outstanding I/O's and once that has been completed | 1336 | /* Wait on all outstanding I/O's and once that has been completed |
1337 | * issue the WRITE_FLUSH. | 1337 | * issue the flush. |
1338 | */ | 1338 | */ |
1339 | if (drain) | 1339 | if (drain) |
1340 | xen_blk_drain_io(pending_req->ring); | 1340 | xen_blk_drain_io(pending_req->ring); |
@@ -1380,7 +1380,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, | |||
1380 | 1380 | ||
1381 | /* This will be hit if the operation was a flush or discard. */ | 1381 | /* This will be hit if the operation was a flush or discard. */ |
1382 | if (!bio) { | 1382 | if (!bio) { |
1383 | BUG_ON(operation_flags != WRITE_FLUSH); | 1383 | BUG_ON(operation_flags != REQ_PREFLUSH); |
1384 | 1384 | ||
1385 | bio = bio_alloc(GFP_KERNEL, 0); | 1385 | bio = bio_alloc(GFP_KERNEL, 0); |
1386 | if (unlikely(bio == NULL)) | 1386 | if (unlikely(bio == NULL)) |
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 81d3db40cd7b..6fdd8e252760 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c | |||
@@ -297,7 +297,7 @@ static void bch_btree_node_read(struct btree *b) | |||
297 | bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9; | 297 | bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9; |
298 | bio->bi_end_io = btree_node_read_endio; | 298 | bio->bi_end_io = btree_node_read_endio; |
299 | bio->bi_private = &cl; | 299 | bio->bi_private = &cl; |
300 | bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC); | 300 | bio->bi_opf = REQ_OP_READ | REQ_META; |
301 | 301 | ||
302 | bch_bio_map(bio, b->keys.set[0].data); | 302 | bch_bio_map(bio, b->keys.set[0].data); |
303 | 303 | ||
@@ -393,7 +393,7 @@ static void do_btree_node_write(struct btree *b) | |||
393 | b->bio->bi_end_io = btree_node_write_endio; | 393 | b->bio->bi_end_io = btree_node_write_endio; |
394 | b->bio->bi_private = cl; | 394 | b->bio->bi_private = cl; |
395 | b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c)); | 395 | b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c)); |
396 | bio_set_op_attrs(b->bio, REQ_OP_WRITE, REQ_META|WRITE_SYNC|REQ_FUA); | 396 | b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA; |
397 | bch_bio_map(b->bio, i); | 397 | bch_bio_map(b->bio, i); |
398 | 398 | ||
399 | /* | 399 | /* |
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c index 333a1e5f6ae6..1c9130ae0073 100644 --- a/drivers/md/bcache/debug.c +++ b/drivers/md/bcache/debug.c | |||
@@ -52,7 +52,7 @@ void bch_btree_verify(struct btree *b) | |||
52 | bio->bi_bdev = PTR_CACHE(b->c, &b->key, 0)->bdev; | 52 | bio->bi_bdev = PTR_CACHE(b->c, &b->key, 0)->bdev; |
53 | bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); | 53 | bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); |
54 | bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9; | 54 | bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9; |
55 | bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC); | 55 | bio->bi_opf = REQ_OP_READ | REQ_META; |
56 | bch_bio_map(bio, sorted); | 56 | bch_bio_map(bio, sorted); |
57 | 57 | ||
58 | submit_bio_wait(bio); | 58 | submit_bio_wait(bio); |
@@ -113,7 +113,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio) | |||
113 | check = bio_clone(bio, GFP_NOIO); | 113 | check = bio_clone(bio, GFP_NOIO); |
114 | if (!check) | 114 | if (!check) |
115 | return; | 115 | return; |
116 | bio_set_op_attrs(check, REQ_OP_READ, READ_SYNC); | 116 | check->bi_opf = REQ_OP_READ; |
117 | 117 | ||
118 | if (bio_alloc_pages(check, GFP_NOIO)) | 118 | if (bio_alloc_pages(check, GFP_NOIO)) |
119 | goto out_put; | 119 | goto out_put; |
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index e8a2b693c928..0d99b5f4b3e6 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c | |||
@@ -923,7 +923,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) | |||
923 | flush->bi_bdev = bio->bi_bdev; | 923 | flush->bi_bdev = bio->bi_bdev; |
924 | flush->bi_end_io = request_endio; | 924 | flush->bi_end_io = request_endio; |
925 | flush->bi_private = cl; | 925 | flush->bi_private = cl; |
926 | bio_set_op_attrs(flush, REQ_OP_WRITE, WRITE_FLUSH); | 926 | flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; |
927 | 927 | ||
928 | closure_bio_submit(flush, cl); | 928 | closure_bio_submit(flush, cl); |
929 | } | 929 | } |
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 849ad441cd76..988edf928466 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c | |||
@@ -381,7 +381,7 @@ static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl) | |||
381 | return "bad uuid pointer"; | 381 | return "bad uuid pointer"; |
382 | 382 | ||
383 | bkey_copy(&c->uuid_bucket, k); | 383 | bkey_copy(&c->uuid_bucket, k); |
384 | uuid_io(c, REQ_OP_READ, READ_SYNC, k, cl); | 384 | uuid_io(c, REQ_OP_READ, 0, k, cl); |
385 | 385 | ||
386 | if (j->version < BCACHE_JSET_VERSION_UUIDv1) { | 386 | if (j->version < BCACHE_JSET_VERSION_UUIDv1) { |
387 | struct uuid_entry_v0 *u0 = (void *) c->uuids; | 387 | struct uuid_entry_v0 *u0 = (void *) c->uuids; |
@@ -600,7 +600,7 @@ static void prio_read(struct cache *ca, uint64_t bucket) | |||
600 | ca->prio_last_buckets[bucket_nr] = bucket; | 600 | ca->prio_last_buckets[bucket_nr] = bucket; |
601 | bucket_nr++; | 601 | bucket_nr++; |
602 | 602 | ||
603 | prio_io(ca, bucket, REQ_OP_READ, READ_SYNC); | 603 | prio_io(ca, bucket, REQ_OP_READ, 0); |
604 | 604 | ||
605 | if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8)) | 605 | if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8)) |
606 | pr_warn("bad csum reading priorities"); | 606 | pr_warn("bad csum reading priorities"); |
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 125aedc3875f..b3ba142e59a4 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c | |||
@@ -1316,7 +1316,7 @@ int dm_bufio_issue_flush(struct dm_bufio_client *c) | |||
1316 | { | 1316 | { |
1317 | struct dm_io_request io_req = { | 1317 | struct dm_io_request io_req = { |
1318 | .bi_op = REQ_OP_WRITE, | 1318 | .bi_op = REQ_OP_WRITE, |
1319 | .bi_op_flags = WRITE_FLUSH, | 1319 | .bi_op_flags = REQ_PREFLUSH, |
1320 | .mem.type = DM_IO_KMEM, | 1320 | .mem.type = DM_IO_KMEM, |
1321 | .mem.ptr.addr = NULL, | 1321 | .mem.ptr.addr = NULL, |
1322 | .client = c->dm_io, | 1322 | .client = c->dm_io, |
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c index 07fc1ad42ec5..33e71ea6cc14 100644 --- a/drivers/md/dm-log.c +++ b/drivers/md/dm-log.c | |||
@@ -308,7 +308,7 @@ static int flush_header(struct log_c *lc) | |||
308 | }; | 308 | }; |
309 | 309 | ||
310 | lc->io_req.bi_op = REQ_OP_WRITE; | 310 | lc->io_req.bi_op = REQ_OP_WRITE; |
311 | lc->io_req.bi_op_flags = WRITE_FLUSH; | 311 | lc->io_req.bi_op_flags = REQ_PREFLUSH; |
312 | 312 | ||
313 | return dm_io(&lc->io_req, 1, &null_location, NULL); | 313 | return dm_io(&lc->io_req, 1, &null_location, NULL); |
314 | } | 314 | } |
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index bdf1606f67bc..1a176d7c8b90 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c | |||
@@ -261,7 +261,7 @@ static int mirror_flush(struct dm_target *ti) | |||
261 | struct mirror *m; | 261 | struct mirror *m; |
262 | struct dm_io_request io_req = { | 262 | struct dm_io_request io_req = { |
263 | .bi_op = REQ_OP_WRITE, | 263 | .bi_op = REQ_OP_WRITE, |
264 | .bi_op_flags = WRITE_FLUSH, | 264 | .bi_op_flags = REQ_PREFLUSH, |
265 | .mem.type = DM_IO_KMEM, | 265 | .mem.type = DM_IO_KMEM, |
266 | .mem.ptr.addr = NULL, | 266 | .mem.ptr.addr = NULL, |
267 | .client = ms->io_client, | 267 | .client = ms->io_client, |
@@ -657,7 +657,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio) | |||
657 | struct mirror *m; | 657 | struct mirror *m; |
658 | struct dm_io_request io_req = { | 658 | struct dm_io_request io_req = { |
659 | .bi_op = REQ_OP_WRITE, | 659 | .bi_op = REQ_OP_WRITE, |
660 | .bi_op_flags = bio->bi_opf & WRITE_FLUSH_FUA, | 660 | .bi_op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH), |
661 | .mem.type = DM_IO_BIO, | 661 | .mem.type = DM_IO_BIO, |
662 | .mem.ptr.bio = bio, | 662 | .mem.ptr.bio = bio, |
663 | .notify.fn = write_callback, | 663 | .notify.fn = write_callback, |
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index b8cf956b577b..b93476c3ba3f 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c | |||
@@ -741,7 +741,7 @@ static void persistent_commit_exception(struct dm_exception_store *store, | |||
741 | /* | 741 | /* |
742 | * Commit exceptions to disk. | 742 | * Commit exceptions to disk. |
743 | */ | 743 | */ |
744 | if (ps->valid && area_io(ps, REQ_OP_WRITE, WRITE_FLUSH_FUA)) | 744 | if (ps->valid && area_io(ps, REQ_OP_WRITE, REQ_PREFLUSH | REQ_FUA)) |
745 | ps->valid = 0; | 745 | ps->valid = 0; |
746 | 746 | ||
747 | /* | 747 | /* |
@@ -818,7 +818,7 @@ static int persistent_commit_merge(struct dm_exception_store *store, | |||
818 | for (i = 0; i < nr_merged; i++) | 818 | for (i = 0; i < nr_merged; i++) |
819 | clear_exception(ps, ps->current_committed - 1 - i); | 819 | clear_exception(ps, ps->current_committed - 1 - i); |
820 | 820 | ||
821 | r = area_io(ps, REQ_OP_WRITE, WRITE_FLUSH_FUA); | 821 | r = area_io(ps, REQ_OP_WRITE, REQ_PREFLUSH | REQ_FUA); |
822 | if (r < 0) | 822 | if (r < 0) |
823 | return r; | 823 | return r; |
824 | 824 | ||
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 147af9536d0c..b2abfa41af3e 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -1527,7 +1527,7 @@ static struct mapped_device *alloc_dev(int minor) | |||
1527 | 1527 | ||
1528 | bio_init(&md->flush_bio); | 1528 | bio_init(&md->flush_bio); |
1529 | md->flush_bio.bi_bdev = md->bdev; | 1529 | md->flush_bio.bi_bdev = md->bdev; |
1530 | bio_set_op_attrs(&md->flush_bio, REQ_OP_WRITE, WRITE_FLUSH); | 1530 | md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; |
1531 | 1531 | ||
1532 | dm_stats_init(&md->stats); | 1532 | dm_stats_init(&md->stats); |
1533 | 1533 | ||
diff --git a/drivers/md/md.c b/drivers/md/md.c index eac84d8ff724..b69ec7da4bae 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -394,7 +394,7 @@ static void submit_flushes(struct work_struct *ws) | |||
394 | bi->bi_end_io = md_end_flush; | 394 | bi->bi_end_io = md_end_flush; |
395 | bi->bi_private = rdev; | 395 | bi->bi_private = rdev; |
396 | bi->bi_bdev = rdev->bdev; | 396 | bi->bi_bdev = rdev->bdev; |
397 | bio_set_op_attrs(bi, REQ_OP_WRITE, WRITE_FLUSH); | 397 | bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; |
398 | atomic_inc(&mddev->flush_pending); | 398 | atomic_inc(&mddev->flush_pending); |
399 | submit_bio(bi); | 399 | submit_bio(bi); |
400 | rcu_read_lock(); | 400 | rcu_read_lock(); |
@@ -743,7 +743,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev, | |||
743 | bio_add_page(bio, page, size, 0); | 743 | bio_add_page(bio, page, size, 0); |
744 | bio->bi_private = rdev; | 744 | bio->bi_private = rdev; |
745 | bio->bi_end_io = super_written; | 745 | bio->bi_end_io = super_written; |
746 | bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH_FUA); | 746 | bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA; |
747 | 747 | ||
748 | atomic_inc(&mddev->pending_writes); | 748 | atomic_inc(&mddev->pending_writes); |
749 | submit_bio(bio); | 749 | submit_bio(bio); |
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 1b1ab4a1d132..28d015c6fffe 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c | |||
@@ -685,7 +685,7 @@ void r5l_flush_stripe_to_raid(struct r5l_log *log) | |||
685 | bio_reset(&log->flush_bio); | 685 | bio_reset(&log->flush_bio); |
686 | log->flush_bio.bi_bdev = log->rdev->bdev; | 686 | log->flush_bio.bi_bdev = log->rdev->bdev; |
687 | log->flush_bio.bi_end_io = r5l_log_flush_endio; | 687 | log->flush_bio.bi_end_io = r5l_log_flush_endio; |
688 | bio_set_op_attrs(&log->flush_bio, REQ_OP_WRITE, WRITE_FLUSH); | 688 | log->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; |
689 | submit_bio(&log->flush_bio); | 689 | submit_bio(&log->flush_bio); |
690 | } | 690 | } |
691 | 691 | ||
@@ -1053,7 +1053,7 @@ static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos, | |||
1053 | mb->checksum = cpu_to_le32(crc); | 1053 | mb->checksum = cpu_to_le32(crc); |
1054 | 1054 | ||
1055 | if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE, | 1055 | if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE, |
1056 | WRITE_FUA, false)) { | 1056 | REQ_FUA, false)) { |
1057 | __free_page(page); | 1057 | __free_page(page); |
1058 | return -EIO; | 1058 | return -EIO; |
1059 | } | 1059 | } |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 92ac251e91e6..70acdd379e44 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -913,7 +913,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) | |||
913 | if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { | 913 | if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { |
914 | op = REQ_OP_WRITE; | 914 | op = REQ_OP_WRITE; |
915 | if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags)) | 915 | if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags)) |
916 | op_flags = WRITE_FUA; | 916 | op_flags = REQ_FUA; |
917 | if (test_bit(R5_Discard, &sh->dev[i].flags)) | 917 | if (test_bit(R5_Discard, &sh->dev[i].flags)) |
918 | op = REQ_OP_DISCARD; | 918 | op = REQ_OP_DISCARD; |
919 | } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) | 919 | } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) |
diff --git a/drivers/nvme/target/io-cmd.c b/drivers/nvme/target/io-cmd.c index 4a96c2049b7b..c2784cfc5e29 100644 --- a/drivers/nvme/target/io-cmd.c +++ b/drivers/nvme/target/io-cmd.c | |||
@@ -58,7 +58,7 @@ static void nvmet_execute_rw(struct nvmet_req *req) | |||
58 | 58 | ||
59 | if (req->cmd->rw.opcode == nvme_cmd_write) { | 59 | if (req->cmd->rw.opcode == nvme_cmd_write) { |
60 | op = REQ_OP_WRITE; | 60 | op = REQ_OP_WRITE; |
61 | op_flags = WRITE_ODIRECT; | 61 | op_flags = REQ_SYNC | REQ_IDLE; |
62 | if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA)) | 62 | if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA)) |
63 | op_flags |= REQ_FUA; | 63 | op_flags |= REQ_FUA; |
64 | } else { | 64 | } else { |
@@ -109,7 +109,7 @@ static void nvmet_execute_flush(struct nvmet_req *req) | |||
109 | bio->bi_bdev = req->ns->bdev; | 109 | bio->bi_bdev = req->ns->bdev; |
110 | bio->bi_private = req; | 110 | bio->bi_private = req; |
111 | bio->bi_end_io = nvmet_bio_done; | 111 | bio->bi_end_io = nvmet_bio_done; |
112 | bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH); | 112 | bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; |
113 | 113 | ||
114 | submit_bio(bio); | 114 | submit_bio(bio); |
115 | } | 115 | } |
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 372d744315f3..d316ed537d59 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c | |||
@@ -388,7 +388,7 @@ iblock_execute_sync_cache(struct se_cmd *cmd) | |||
388 | bio = bio_alloc(GFP_KERNEL, 0); | 388 | bio = bio_alloc(GFP_KERNEL, 0); |
389 | bio->bi_end_io = iblock_end_io_flush; | 389 | bio->bi_end_io = iblock_end_io_flush; |
390 | bio->bi_bdev = ib_dev->ibd_bd; | 390 | bio->bi_bdev = ib_dev->ibd_bd; |
391 | bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH); | 391 | bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; |
392 | if (!immed) | 392 | if (!immed) |
393 | bio->bi_private = cmd; | 393 | bio->bi_private = cmd; |
394 | submit_bio(bio); | 394 | submit_bio(bio); |
@@ -686,15 +686,15 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, | |||
686 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev); | 686 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev); |
687 | struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd); | 687 | struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd); |
688 | /* | 688 | /* |
689 | * Force writethrough using WRITE_FUA if a volatile write cache | 689 | * Force writethrough using REQ_FUA if a volatile write cache |
690 | * is not enabled, or if initiator set the Force Unit Access bit. | 690 | * is not enabled, or if initiator set the Force Unit Access bit. |
691 | */ | 691 | */ |
692 | op = REQ_OP_WRITE; | 692 | op = REQ_OP_WRITE; |
693 | if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) { | 693 | if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) { |
694 | if (cmd->se_cmd_flags & SCF_FUA) | 694 | if (cmd->se_cmd_flags & SCF_FUA) |
695 | op_flags = WRITE_FUA; | 695 | op_flags = REQ_FUA; |
696 | else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) | 696 | else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) |
697 | op_flags = WRITE_FUA; | 697 | op_flags = REQ_FUA; |
698 | } | 698 | } |
699 | } else { | 699 | } else { |
700 | op = REQ_OP_READ; | 700 | op = REQ_OP_READ; |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index c8454a8e35f2..fe10afd51e02 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -3485,9 +3485,9 @@ static int write_dev_supers(struct btrfs_device *device, | |||
3485 | * to go down lazy. | 3485 | * to go down lazy. |
3486 | */ | 3486 | */ |
3487 | if (i == 0) | 3487 | if (i == 0) |
3488 | ret = btrfsic_submit_bh(REQ_OP_WRITE, WRITE_FUA, bh); | 3488 | ret = btrfsic_submit_bh(REQ_OP_WRITE, REQ_FUA, bh); |
3489 | else | 3489 | else |
3490 | ret = btrfsic_submit_bh(REQ_OP_WRITE, WRITE_SYNC, bh); | 3490 | ret = btrfsic_submit_bh(REQ_OP_WRITE, REQ_SYNC, bh); |
3491 | if (ret) | 3491 | if (ret) |
3492 | errors++; | 3492 | errors++; |
3493 | } | 3493 | } |
@@ -3551,7 +3551,7 @@ static int write_dev_flush(struct btrfs_device *device, int wait) | |||
3551 | 3551 | ||
3552 | bio->bi_end_io = btrfs_end_empty_barrier; | 3552 | bio->bi_end_io = btrfs_end_empty_barrier; |
3553 | bio->bi_bdev = device->bdev; | 3553 | bio->bi_bdev = device->bdev; |
3554 | bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH); | 3554 | bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; |
3555 | init_completion(&device->flush_wait); | 3555 | init_completion(&device->flush_wait); |
3556 | bio->bi_private = &device->flush_wait; | 3556 | bio->bi_private = &device->flush_wait; |
3557 | device->flush_bio = bio; | 3557 | device->flush_bio = bio; |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 66a755150056..ff87bff7bdb6 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -127,7 +127,7 @@ struct extent_page_data { | |||
127 | */ | 127 | */ |
128 | unsigned int extent_locked:1; | 128 | unsigned int extent_locked:1; |
129 | 129 | ||
130 | /* tells the submit_bio code to use a WRITE_SYNC */ | 130 | /* tells the submit_bio code to use REQ_SYNC */ |
131 | unsigned int sync_io:1; | 131 | unsigned int sync_io:1; |
132 | }; | 132 | }; |
133 | 133 | ||
@@ -2047,7 +2047,7 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical, | |||
2047 | return -EIO; | 2047 | return -EIO; |
2048 | } | 2048 | } |
2049 | bio->bi_bdev = dev->bdev; | 2049 | bio->bi_bdev = dev->bdev; |
2050 | bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_SYNC); | 2050 | bio->bi_opf = REQ_OP_WRITE | REQ_SYNC; |
2051 | bio_add_page(bio, page, length, pg_offset); | 2051 | bio_add_page(bio, page, length, pg_offset); |
2052 | 2052 | ||
2053 | if (btrfsic_submit_bio_wait(bio)) { | 2053 | if (btrfsic_submit_bio_wait(bio)) { |
@@ -2388,7 +2388,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset, | |||
2388 | struct inode *inode = page->mapping->host; | 2388 | struct inode *inode = page->mapping->host; |
2389 | struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; | 2389 | struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; |
2390 | struct bio *bio; | 2390 | struct bio *bio; |
2391 | int read_mode; | 2391 | int read_mode = 0; |
2392 | int ret; | 2392 | int ret; |
2393 | 2393 | ||
2394 | BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE); | 2394 | BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE); |
@@ -2404,9 +2404,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset, | |||
2404 | } | 2404 | } |
2405 | 2405 | ||
2406 | if (failed_bio->bi_vcnt > 1) | 2406 | if (failed_bio->bi_vcnt > 1) |
2407 | read_mode = READ_SYNC | REQ_FAILFAST_DEV; | 2407 | read_mode |= REQ_FAILFAST_DEV; |
2408 | else | ||
2409 | read_mode = READ_SYNC; | ||
2410 | 2408 | ||
2411 | phy_offset >>= inode->i_sb->s_blocksize_bits; | 2409 | phy_offset >>= inode->i_sb->s_blocksize_bits; |
2412 | bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page, | 2410 | bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page, |
@@ -3484,7 +3482,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, | |||
3484 | unsigned long nr_written = 0; | 3482 | unsigned long nr_written = 0; |
3485 | 3483 | ||
3486 | if (wbc->sync_mode == WB_SYNC_ALL) | 3484 | if (wbc->sync_mode == WB_SYNC_ALL) |
3487 | write_flags = WRITE_SYNC; | 3485 | write_flags = REQ_SYNC; |
3488 | 3486 | ||
3489 | trace___extent_writepage(page, inode, wbc); | 3487 | trace___extent_writepage(page, inode, wbc); |
3490 | 3488 | ||
@@ -3729,7 +3727,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb, | |||
3729 | unsigned long i, num_pages; | 3727 | unsigned long i, num_pages; |
3730 | unsigned long bio_flags = 0; | 3728 | unsigned long bio_flags = 0; |
3731 | unsigned long start, end; | 3729 | unsigned long start, end; |
3732 | int write_flags = (epd->sync_io ? WRITE_SYNC : 0) | REQ_META; | 3730 | int write_flags = (epd->sync_io ? REQ_SYNC : 0) | REQ_META; |
3733 | int ret = 0; | 3731 | int ret = 0; |
3734 | 3732 | ||
3735 | clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags); | 3733 | clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags); |
@@ -4076,7 +4074,7 @@ static void flush_epd_write_bio(struct extent_page_data *epd) | |||
4076 | int ret; | 4074 | int ret; |
4077 | 4075 | ||
4078 | bio_set_op_attrs(epd->bio, REQ_OP_WRITE, | 4076 | bio_set_op_attrs(epd->bio, REQ_OP_WRITE, |
4079 | epd->sync_io ? WRITE_SYNC : 0); | 4077 | epd->sync_io ? REQ_SYNC : 0); |
4080 | 4078 | ||
4081 | ret = submit_one_bio(epd->bio, 0, epd->bio_flags); | 4079 | ret = submit_one_bio(epd->bio, 0, epd->bio_flags); |
4082 | BUG_ON(ret < 0); /* -ENOMEM */ | 4080 | BUG_ON(ret < 0); /* -ENOMEM */ |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 9a377079af26..c8eb82a416b3 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -7917,7 +7917,7 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio, | |||
7917 | struct io_failure_record *failrec; | 7917 | struct io_failure_record *failrec; |
7918 | struct bio *bio; | 7918 | struct bio *bio; |
7919 | int isector; | 7919 | int isector; |
7920 | int read_mode; | 7920 | int read_mode = 0; |
7921 | int ret; | 7921 | int ret; |
7922 | 7922 | ||
7923 | BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE); | 7923 | BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE); |
@@ -7936,9 +7936,7 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio, | |||
7936 | if ((failed_bio->bi_vcnt > 1) | 7936 | if ((failed_bio->bi_vcnt > 1) |
7937 | || (failed_bio->bi_io_vec->bv_len | 7937 | || (failed_bio->bi_io_vec->bv_len |
7938 | > BTRFS_I(inode)->root->sectorsize)) | 7938 | > BTRFS_I(inode)->root->sectorsize)) |
7939 | read_mode = READ_SYNC | REQ_FAILFAST_DEV; | 7939 | read_mode |= REQ_FAILFAST_DEV; |
7940 | else | ||
7941 | read_mode = READ_SYNC; | ||
7942 | 7940 | ||
7943 | isector = start - btrfs_io_bio(failed_bio)->logical; | 7941 | isector = start - btrfs_io_bio(failed_bio)->logical; |
7944 | isector >>= inode->i_sb->s_blocksize_bits; | 7942 | isector >>= inode->i_sb->s_blocksize_bits; |
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index fffb9ab8526e..ff3078234d94 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c | |||
@@ -4440,7 +4440,7 @@ static int write_page_nocow(struct scrub_ctx *sctx, | |||
4440 | bio->bi_iter.bi_size = 0; | 4440 | bio->bi_iter.bi_size = 0; |
4441 | bio->bi_iter.bi_sector = physical_for_dev_replace >> 9; | 4441 | bio->bi_iter.bi_sector = physical_for_dev_replace >> 9; |
4442 | bio->bi_bdev = dev->bdev; | 4442 | bio->bi_bdev = dev->bdev; |
4443 | bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_SYNC); | 4443 | bio->bi_opf = REQ_OP_WRITE | REQ_SYNC; |
4444 | ret = bio_add_page(bio, page, PAGE_SIZE, 0); | 4444 | ret = bio_add_page(bio, page, PAGE_SIZE, 0); |
4445 | if (ret != PAGE_SIZE) { | 4445 | if (ret != PAGE_SIZE) { |
4446 | leave_with_eio: | 4446 | leave_with_eio: |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index deda46cf1292..0d7d635d8bfb 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -6023,7 +6023,7 @@ static void btrfs_end_bio(struct bio *bio) | |||
6023 | else | 6023 | else |
6024 | btrfs_dev_stat_inc(dev, | 6024 | btrfs_dev_stat_inc(dev, |
6025 | BTRFS_DEV_STAT_READ_ERRS); | 6025 | BTRFS_DEV_STAT_READ_ERRS); |
6026 | if ((bio->bi_opf & WRITE_FLUSH) == WRITE_FLUSH) | 6026 | if (bio->bi_opf & REQ_PREFLUSH) |
6027 | btrfs_dev_stat_inc(dev, | 6027 | btrfs_dev_stat_inc(dev, |
6028 | BTRFS_DEV_STAT_FLUSH_ERRS); | 6028 | BTRFS_DEV_STAT_FLUSH_ERRS); |
6029 | btrfs_dev_stat_print_on_error(dev); | 6029 | btrfs_dev_stat_print_on_error(dev); |
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 09ed29c67848..f137ffe6654c 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h | |||
@@ -62,7 +62,7 @@ struct btrfs_device { | |||
62 | int running_pending; | 62 | int running_pending; |
63 | /* regular prio bios */ | 63 | /* regular prio bios */ |
64 | struct btrfs_pending_bios pending_bios; | 64 | struct btrfs_pending_bios pending_bios; |
65 | /* WRITE_SYNC bios */ | 65 | /* sync bios */ |
66 | struct btrfs_pending_bios pending_sync_bios; | 66 | struct btrfs_pending_bios pending_sync_bios; |
67 | 67 | ||
68 | struct block_device *bdev; | 68 | struct block_device *bdev; |
diff --git a/fs/buffer.c b/fs/buffer.c index a29335867e30..bc7c2bb30a9b 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -753,7 +753,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) | |||
753 | * still in flight on potentially older | 753 | * still in flight on potentially older |
754 | * contents. | 754 | * contents. |
755 | */ | 755 | */ |
756 | write_dirty_buffer(bh, WRITE_SYNC); | 756 | write_dirty_buffer(bh, REQ_SYNC); |
757 | 757 | ||
758 | /* | 758 | /* |
759 | * Kick off IO for the previous mapping. Note | 759 | * Kick off IO for the previous mapping. Note |
@@ -1684,7 +1684,7 @@ static struct buffer_head *create_page_buffers(struct page *page, struct inode * | |||
1684 | * prevents this contention from occurring. | 1684 | * prevents this contention from occurring. |
1685 | * | 1685 | * |
1686 | * If block_write_full_page() is called with wbc->sync_mode == | 1686 | * If block_write_full_page() is called with wbc->sync_mode == |
1687 | * WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this | 1687 | * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this |
1688 | * causes the writes to be flagged as synchronous writes. | 1688 | * causes the writes to be flagged as synchronous writes. |
1689 | */ | 1689 | */ |
1690 | int __block_write_full_page(struct inode *inode, struct page *page, | 1690 | int __block_write_full_page(struct inode *inode, struct page *page, |
@@ -1697,7 +1697,7 @@ int __block_write_full_page(struct inode *inode, struct page *page, | |||
1697 | struct buffer_head *bh, *head; | 1697 | struct buffer_head *bh, *head; |
1698 | unsigned int blocksize, bbits; | 1698 | unsigned int blocksize, bbits; |
1699 | int nr_underway = 0; | 1699 | int nr_underway = 0; |
1700 | int write_flags = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : 0); | 1700 | int write_flags = (wbc->sync_mode == WB_SYNC_ALL ? REQ_SYNC : 0); |
1701 | 1701 | ||
1702 | head = create_page_buffers(page, inode, | 1702 | head = create_page_buffers(page, inode, |
1703 | (1 << BH_Dirty)|(1 << BH_Uptodate)); | 1703 | (1 << BH_Dirty)|(1 << BH_Uptodate)); |
@@ -3210,7 +3210,7 @@ EXPORT_SYMBOL(__sync_dirty_buffer); | |||
3210 | 3210 | ||
3211 | int sync_dirty_buffer(struct buffer_head *bh) | 3211 | int sync_dirty_buffer(struct buffer_head *bh) |
3212 | { | 3212 | { |
3213 | return __sync_dirty_buffer(bh, WRITE_SYNC); | 3213 | return __sync_dirty_buffer(bh, REQ_SYNC); |
3214 | } | 3214 | } |
3215 | EXPORT_SYMBOL(sync_dirty_buffer); | 3215 | EXPORT_SYMBOL(sync_dirty_buffer); |
3216 | 3216 | ||
diff --git a/fs/direct-io.c b/fs/direct-io.c index fb9aa16a7727..a5138c564019 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c | |||
@@ -1209,7 +1209,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, | |||
1209 | dio->inode = inode; | 1209 | dio->inode = inode; |
1210 | if (iov_iter_rw(iter) == WRITE) { | 1210 | if (iov_iter_rw(iter) == WRITE) { |
1211 | dio->op = REQ_OP_WRITE; | 1211 | dio->op = REQ_OP_WRITE; |
1212 | dio->op_flags = WRITE_ODIRECT; | 1212 | dio->op_flags = REQ_SYNC | REQ_IDLE; |
1213 | } else { | 1213 | } else { |
1214 | dio->op = REQ_OP_READ; | 1214 | dio->op = REQ_OP_READ; |
1215 | } | 1215 | } |
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c index d89754ef1aab..eb9835638680 100644 --- a/fs/ext4/mmp.c +++ b/fs/ext4/mmp.c | |||
@@ -35,7 +35,7 @@ static void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp) | |||
35 | } | 35 | } |
36 | 36 | ||
37 | /* | 37 | /* |
38 | * Write the MMP block using WRITE_SYNC to try to get the block on-disk | 38 | * Write the MMP block using REQ_SYNC to try to get the block on-disk |
39 | * faster. | 39 | * faster. |
40 | */ | 40 | */ |
41 | static int write_mmp_block(struct super_block *sb, struct buffer_head *bh) | 41 | static int write_mmp_block(struct super_block *sb, struct buffer_head *bh) |
@@ -52,7 +52,7 @@ static int write_mmp_block(struct super_block *sb, struct buffer_head *bh) | |||
52 | lock_buffer(bh); | 52 | lock_buffer(bh); |
53 | bh->b_end_io = end_buffer_write_sync; | 53 | bh->b_end_io = end_buffer_write_sync; |
54 | get_bh(bh); | 54 | get_bh(bh); |
55 | submit_bh(REQ_OP_WRITE, WRITE_SYNC | REQ_META | REQ_PRIO, bh); | 55 | submit_bh(REQ_OP_WRITE, REQ_SYNC | REQ_META | REQ_PRIO, bh); |
56 | wait_on_buffer(bh); | 56 | wait_on_buffer(bh); |
57 | sb_end_write(sb); | 57 | sb_end_write(sb); |
58 | if (unlikely(!buffer_uptodate(bh))) | 58 | if (unlikely(!buffer_uptodate(bh))) |
@@ -88,7 +88,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh, | |||
88 | get_bh(*bh); | 88 | get_bh(*bh); |
89 | lock_buffer(*bh); | 89 | lock_buffer(*bh); |
90 | (*bh)->b_end_io = end_buffer_read_sync; | 90 | (*bh)->b_end_io = end_buffer_read_sync; |
91 | submit_bh(REQ_OP_READ, READ_SYNC | REQ_META | REQ_PRIO, *bh); | 91 | submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, *bh); |
92 | wait_on_buffer(*bh); | 92 | wait_on_buffer(*bh); |
93 | if (!buffer_uptodate(*bh)) { | 93 | if (!buffer_uptodate(*bh)) { |
94 | ret = -EIO; | 94 | ret = -EIO; |
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 0094923e5ebf..e0b3b54cdef3 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c | |||
@@ -340,7 +340,7 @@ void ext4_io_submit(struct ext4_io_submit *io) | |||
340 | 340 | ||
341 | if (bio) { | 341 | if (bio) { |
342 | int io_op_flags = io->io_wbc->sync_mode == WB_SYNC_ALL ? | 342 | int io_op_flags = io->io_wbc->sync_mode == WB_SYNC_ALL ? |
343 | WRITE_SYNC : 0; | 343 | REQ_SYNC : 0; |
344 | bio_set_op_attrs(io->io_bio, REQ_OP_WRITE, io_op_flags); | 344 | bio_set_op_attrs(io->io_bio, REQ_OP_WRITE, io_op_flags); |
345 | submit_bio(io->io_bio); | 345 | submit_bio(io->io_bio); |
346 | } | 346 | } |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 6db81fbcbaa6..f31eb286af90 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
@@ -4553,7 +4553,7 @@ static int ext4_commit_super(struct super_block *sb, int sync) | |||
4553 | unlock_buffer(sbh); | 4553 | unlock_buffer(sbh); |
4554 | if (sync) { | 4554 | if (sync) { |
4555 | error = __sync_dirty_buffer(sbh, | 4555 | error = __sync_dirty_buffer(sbh, |
4556 | test_opt(sb, BARRIER) ? WRITE_FUA : WRITE_SYNC); | 4556 | test_opt(sb, BARRIER) ? REQ_FUA : REQ_SYNC); |
4557 | if (error) | 4557 | if (error) |
4558 | return error; | 4558 | return error; |
4559 | 4559 | ||
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 7e9b504bd8b2..d935c06a84f0 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c | |||
@@ -65,7 +65,7 @@ static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index, | |||
65 | .sbi = sbi, | 65 | .sbi = sbi, |
66 | .type = META, | 66 | .type = META, |
67 | .op = REQ_OP_READ, | 67 | .op = REQ_OP_READ, |
68 | .op_flags = READ_SYNC | REQ_META | REQ_PRIO, | 68 | .op_flags = REQ_META | REQ_PRIO, |
69 | .old_blkaddr = index, | 69 | .old_blkaddr = index, |
70 | .new_blkaddr = index, | 70 | .new_blkaddr = index, |
71 | .encrypted_page = NULL, | 71 | .encrypted_page = NULL, |
@@ -160,7 +160,7 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, | |||
160 | .sbi = sbi, | 160 | .sbi = sbi, |
161 | .type = META, | 161 | .type = META, |
162 | .op = REQ_OP_READ, | 162 | .op = REQ_OP_READ, |
163 | .op_flags = sync ? (READ_SYNC | REQ_META | REQ_PRIO) : REQ_RAHEAD, | 163 | .op_flags = sync ? (REQ_META | REQ_PRIO) : REQ_RAHEAD, |
164 | .encrypted_page = NULL, | 164 | .encrypted_page = NULL, |
165 | }; | 165 | }; |
166 | struct blk_plug plug; | 166 | struct blk_plug plug; |
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 9ae194fd2fdb..b80bf10603d7 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c | |||
@@ -198,11 +198,9 @@ static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, | |||
198 | if (type >= META_FLUSH) { | 198 | if (type >= META_FLUSH) { |
199 | io->fio.type = META_FLUSH; | 199 | io->fio.type = META_FLUSH; |
200 | io->fio.op = REQ_OP_WRITE; | 200 | io->fio.op = REQ_OP_WRITE; |
201 | if (test_opt(sbi, NOBARRIER)) | 201 | io->fio.op_flags = REQ_PREFLUSH | REQ_META | REQ_PRIO; |
202 | io->fio.op_flags = WRITE_FLUSH | REQ_META | REQ_PRIO; | 202 | if (!test_opt(sbi, NOBARRIER)) |
203 | else | 203 | io->fio.op_flags |= REQ_FUA; |
204 | io->fio.op_flags = WRITE_FLUSH_FUA | REQ_META | | ||
205 | REQ_PRIO; | ||
206 | } | 204 | } |
207 | __submit_merged_bio(io); | 205 | __submit_merged_bio(io); |
208 | out: | 206 | out: |
@@ -483,7 +481,7 @@ struct page *find_data_page(struct inode *inode, pgoff_t index) | |||
483 | return page; | 481 | return page; |
484 | f2fs_put_page(page, 0); | 482 | f2fs_put_page(page, 0); |
485 | 483 | ||
486 | page = get_read_data_page(inode, index, READ_SYNC, false); | 484 | page = get_read_data_page(inode, index, 0, false); |
487 | if (IS_ERR(page)) | 485 | if (IS_ERR(page)) |
488 | return page; | 486 | return page; |
489 | 487 | ||
@@ -509,7 +507,7 @@ struct page *get_lock_data_page(struct inode *inode, pgoff_t index, | |||
509 | struct address_space *mapping = inode->i_mapping; | 507 | struct address_space *mapping = inode->i_mapping; |
510 | struct page *page; | 508 | struct page *page; |
511 | repeat: | 509 | repeat: |
512 | page = get_read_data_page(inode, index, READ_SYNC, for_write); | 510 | page = get_read_data_page(inode, index, 0, for_write); |
513 | if (IS_ERR(page)) | 511 | if (IS_ERR(page)) |
514 | return page; | 512 | return page; |
515 | 513 | ||
@@ -1251,7 +1249,7 @@ static int f2fs_write_data_page(struct page *page, | |||
1251 | .sbi = sbi, | 1249 | .sbi = sbi, |
1252 | .type = DATA, | 1250 | .type = DATA, |
1253 | .op = REQ_OP_WRITE, | 1251 | .op = REQ_OP_WRITE, |
1254 | .op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0, | 1252 | .op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? REQ_SYNC : 0, |
1255 | .page = page, | 1253 | .page = page, |
1256 | .encrypted_page = NULL, | 1254 | .encrypted_page = NULL, |
1257 | }; | 1255 | }; |
@@ -1663,7 +1661,7 @@ repeat: | |||
1663 | err = PTR_ERR(bio); | 1661 | err = PTR_ERR(bio); |
1664 | goto fail; | 1662 | goto fail; |
1665 | } | 1663 | } |
1666 | bio_set_op_attrs(bio, REQ_OP_READ, READ_SYNC); | 1664 | bio->bi_opf = REQ_OP_READ; |
1667 | if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { | 1665 | if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { |
1668 | bio_put(bio); | 1666 | bio_put(bio); |
1669 | err = -EFAULT; | 1667 | err = -EFAULT; |
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 93985c64d8a8..9eb11b2244ea 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c | |||
@@ -550,7 +550,7 @@ static void move_encrypted_block(struct inode *inode, block_t bidx) | |||
550 | .sbi = F2FS_I_SB(inode), | 550 | .sbi = F2FS_I_SB(inode), |
551 | .type = DATA, | 551 | .type = DATA, |
552 | .op = REQ_OP_READ, | 552 | .op = REQ_OP_READ, |
553 | .op_flags = READ_SYNC, | 553 | .op_flags = 0, |
554 | .encrypted_page = NULL, | 554 | .encrypted_page = NULL, |
555 | }; | 555 | }; |
556 | struct dnode_of_data dn; | 556 | struct dnode_of_data dn; |
@@ -625,7 +625,7 @@ static void move_encrypted_block(struct inode *inode, block_t bidx) | |||
625 | f2fs_wait_on_page_writeback(dn.node_page, NODE, true); | 625 | f2fs_wait_on_page_writeback(dn.node_page, NODE, true); |
626 | 626 | ||
627 | fio.op = REQ_OP_WRITE; | 627 | fio.op = REQ_OP_WRITE; |
628 | fio.op_flags = WRITE_SYNC; | 628 | fio.op_flags = REQ_SYNC; |
629 | fio.new_blkaddr = newaddr; | 629 | fio.new_blkaddr = newaddr; |
630 | f2fs_submit_page_mbio(&fio); | 630 | f2fs_submit_page_mbio(&fio); |
631 | 631 | ||
@@ -663,7 +663,7 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type) | |||
663 | .sbi = F2FS_I_SB(inode), | 663 | .sbi = F2FS_I_SB(inode), |
664 | .type = DATA, | 664 | .type = DATA, |
665 | .op = REQ_OP_WRITE, | 665 | .op = REQ_OP_WRITE, |
666 | .op_flags = WRITE_SYNC, | 666 | .op_flags = REQ_SYNC, |
667 | .page = page, | 667 | .page = page, |
668 | .encrypted_page = NULL, | 668 | .encrypted_page = NULL, |
669 | }; | 669 | }; |
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index 5f1a67f756af..2e7f54c191b4 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c | |||
@@ -111,7 +111,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page) | |||
111 | .sbi = F2FS_I_SB(dn->inode), | 111 | .sbi = F2FS_I_SB(dn->inode), |
112 | .type = DATA, | 112 | .type = DATA, |
113 | .op = REQ_OP_WRITE, | 113 | .op = REQ_OP_WRITE, |
114 | .op_flags = WRITE_SYNC | REQ_PRIO, | 114 | .op_flags = REQ_SYNC | REQ_PRIO, |
115 | .page = page, | 115 | .page = page, |
116 | .encrypted_page = NULL, | 116 | .encrypted_page = NULL, |
117 | }; | 117 | }; |
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 01177ecdeab8..932f3f8bb57b 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c | |||
@@ -1134,7 +1134,7 @@ repeat: | |||
1134 | if (!page) | 1134 | if (!page) |
1135 | return ERR_PTR(-ENOMEM); | 1135 | return ERR_PTR(-ENOMEM); |
1136 | 1136 | ||
1137 | err = read_node_page(page, READ_SYNC); | 1137 | err = read_node_page(page, 0); |
1138 | if (err < 0) { | 1138 | if (err < 0) { |
1139 | f2fs_put_page(page, 1); | 1139 | f2fs_put_page(page, 1); |
1140 | return ERR_PTR(err); | 1140 | return ERR_PTR(err); |
@@ -1570,7 +1570,7 @@ static int f2fs_write_node_page(struct page *page, | |||
1570 | .sbi = sbi, | 1570 | .sbi = sbi, |
1571 | .type = NODE, | 1571 | .type = NODE, |
1572 | .op = REQ_OP_WRITE, | 1572 | .op = REQ_OP_WRITE, |
1573 | .op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0, | 1573 | .op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? REQ_SYNC : 0, |
1574 | .page = page, | 1574 | .page = page, |
1575 | .encrypted_page = NULL, | 1575 | .encrypted_page = NULL, |
1576 | }; | 1576 | }; |
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index fc886f008449..f1b4a1775ebe 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c | |||
@@ -259,7 +259,7 @@ static int __commit_inmem_pages(struct inode *inode, | |||
259 | .sbi = sbi, | 259 | .sbi = sbi, |
260 | .type = DATA, | 260 | .type = DATA, |
261 | .op = REQ_OP_WRITE, | 261 | .op = REQ_OP_WRITE, |
262 | .op_flags = WRITE_SYNC | REQ_PRIO, | 262 | .op_flags = REQ_SYNC | REQ_PRIO, |
263 | .encrypted_page = NULL, | 263 | .encrypted_page = NULL, |
264 | }; | 264 | }; |
265 | bool submit_bio = false; | 265 | bool submit_bio = false; |
@@ -420,7 +420,7 @@ repeat: | |||
420 | fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list); | 420 | fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list); |
421 | 421 | ||
422 | bio->bi_bdev = sbi->sb->s_bdev; | 422 | bio->bi_bdev = sbi->sb->s_bdev; |
423 | bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH); | 423 | bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; |
424 | ret = submit_bio_wait(bio); | 424 | ret = submit_bio_wait(bio); |
425 | 425 | ||
426 | llist_for_each_entry_safe(cmd, next, | 426 | llist_for_each_entry_safe(cmd, next, |
@@ -454,7 +454,7 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi) | |||
454 | 454 | ||
455 | atomic_inc(&fcc->submit_flush); | 455 | atomic_inc(&fcc->submit_flush); |
456 | bio->bi_bdev = sbi->sb->s_bdev; | 456 | bio->bi_bdev = sbi->sb->s_bdev; |
457 | bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH); | 457 | bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; |
458 | ret = submit_bio_wait(bio); | 458 | ret = submit_bio_wait(bio); |
459 | atomic_dec(&fcc->submit_flush); | 459 | atomic_dec(&fcc->submit_flush); |
460 | bio_put(bio); | 460 | bio_put(bio); |
@@ -1515,7 +1515,7 @@ void write_meta_page(struct f2fs_sb_info *sbi, struct page *page) | |||
1515 | .sbi = sbi, | 1515 | .sbi = sbi, |
1516 | .type = META, | 1516 | .type = META, |
1517 | .op = REQ_OP_WRITE, | 1517 | .op = REQ_OP_WRITE, |
1518 | .op_flags = WRITE_SYNC | REQ_META | REQ_PRIO, | 1518 | .op_flags = REQ_SYNC | REQ_META | REQ_PRIO, |
1519 | .old_blkaddr = page->index, | 1519 | .old_blkaddr = page->index, |
1520 | .new_blkaddr = page->index, | 1520 | .new_blkaddr = page->index, |
1521 | .page = page, | 1521 | .page = page, |
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 6132b4ce4e4c..2cac6bb86080 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c | |||
@@ -1238,7 +1238,7 @@ static int __f2fs_commit_super(struct buffer_head *bh, | |||
1238 | unlock_buffer(bh); | 1238 | unlock_buffer(bh); |
1239 | 1239 | ||
1240 | /* it's rare case, we can do fua all the time */ | 1240 | /* it's rare case, we can do fua all the time */ |
1241 | return __sync_dirty_buffer(bh, WRITE_FLUSH_FUA); | 1241 | return __sync_dirty_buffer(bh, REQ_PREFLUSH | REQ_FUA); |
1242 | } | 1242 | } |
1243 | 1243 | ||
1244 | static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi, | 1244 | static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi, |
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index e58ccef09c91..27c00a16def0 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c | |||
@@ -657,7 +657,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags) | |||
657 | struct gfs2_log_header *lh; | 657 | struct gfs2_log_header *lh; |
658 | unsigned int tail; | 658 | unsigned int tail; |
659 | u32 hash; | 659 | u32 hash; |
660 | int op_flags = WRITE_FLUSH_FUA | REQ_META; | 660 | int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META; |
661 | struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO); | 661 | struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO); |
662 | enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state); | 662 | enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state); |
663 | lh = page_address(page); | 663 | lh = page_address(page); |
@@ -682,7 +682,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags) | |||
682 | if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) { | 682 | if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) { |
683 | gfs2_ordered_wait(sdp); | 683 | gfs2_ordered_wait(sdp); |
684 | log_flush_wait(sdp); | 684 | log_flush_wait(sdp); |
685 | op_flags = WRITE_SYNC | REQ_META | REQ_PRIO; | 685 | op_flags = REQ_SYNC | REQ_META | REQ_PRIO; |
686 | } | 686 | } |
687 | 687 | ||
688 | sdp->sd_log_idle = (tail == sdp->sd_log_flush_head); | 688 | sdp->sd_log_idle = (tail == sdp->sd_log_flush_head); |
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c index 373639a59782..e562b1191c9c 100644 --- a/fs/gfs2/meta_io.c +++ b/fs/gfs2/meta_io.c | |||
@@ -38,7 +38,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb | |||
38 | struct buffer_head *bh, *head; | 38 | struct buffer_head *bh, *head; |
39 | int nr_underway = 0; | 39 | int nr_underway = 0; |
40 | int write_flags = REQ_META | REQ_PRIO | | 40 | int write_flags = REQ_META | REQ_PRIO | |
41 | (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : 0); | 41 | (wbc->sync_mode == WB_SYNC_ALL ? REQ_SYNC : 0); |
42 | 42 | ||
43 | BUG_ON(!PageLocked(page)); | 43 | BUG_ON(!PageLocked(page)); |
44 | BUG_ON(!page_has_buffers(page)); | 44 | BUG_ON(!page_has_buffers(page)); |
@@ -285,7 +285,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags, | |||
285 | } | 285 | } |
286 | } | 286 | } |
287 | 287 | ||
288 | gfs2_submit_bhs(REQ_OP_READ, READ_SYNC | REQ_META | REQ_PRIO, bhs, num); | 288 | gfs2_submit_bhs(REQ_OP_READ, REQ_META | REQ_PRIO, bhs, num); |
289 | if (!(flags & DIO_WAIT)) | 289 | if (!(flags & DIO_WAIT)) |
290 | return 0; | 290 | return 0; |
291 | 291 | ||
@@ -453,7 +453,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen) | |||
453 | if (buffer_uptodate(first_bh)) | 453 | if (buffer_uptodate(first_bh)) |
454 | goto out; | 454 | goto out; |
455 | if (!buffer_locked(first_bh)) | 455 | if (!buffer_locked(first_bh)) |
456 | ll_rw_block(REQ_OP_READ, READ_SYNC | REQ_META, 1, &first_bh); | 456 | ll_rw_block(REQ_OP_READ, REQ_META, 1, &first_bh); |
457 | 457 | ||
458 | dblock++; | 458 | dblock++; |
459 | extlen--; | 459 | extlen--; |
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index ff72ac6439c8..a34308df927f 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c | |||
@@ -246,7 +246,7 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent) | |||
246 | 246 | ||
247 | bio->bi_end_io = end_bio_io_page; | 247 | bio->bi_end_io = end_bio_io_page; |
248 | bio->bi_private = page; | 248 | bio->bi_private = page; |
249 | bio_set_op_attrs(bio, REQ_OP_READ, READ_SYNC | REQ_META); | 249 | bio_set_op_attrs(bio, REQ_OP_READ, REQ_META); |
250 | submit_bio(bio); | 250 | submit_bio(bio); |
251 | wait_on_page_locked(page); | 251 | wait_on_page_locked(page); |
252 | bio_put(bio); | 252 | bio_put(bio); |
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c index 11854dd84572..67aedf4c2e7c 100644 --- a/fs/hfsplus/super.c +++ b/fs/hfsplus/super.c | |||
@@ -221,7 +221,7 @@ static int hfsplus_sync_fs(struct super_block *sb, int wait) | |||
221 | error2 = hfsplus_submit_bio(sb, | 221 | error2 = hfsplus_submit_bio(sb, |
222 | sbi->part_start + HFSPLUS_VOLHEAD_SECTOR, | 222 | sbi->part_start + HFSPLUS_VOLHEAD_SECTOR, |
223 | sbi->s_vhdr_buf, NULL, REQ_OP_WRITE, | 223 | sbi->s_vhdr_buf, NULL, REQ_OP_WRITE, |
224 | WRITE_SYNC); | 224 | REQ_SYNC); |
225 | if (!error) | 225 | if (!error) |
226 | error = error2; | 226 | error = error2; |
227 | if (!write_backup) | 227 | if (!write_backup) |
@@ -230,7 +230,7 @@ static int hfsplus_sync_fs(struct super_block *sb, int wait) | |||
230 | error2 = hfsplus_submit_bio(sb, | 230 | error2 = hfsplus_submit_bio(sb, |
231 | sbi->part_start + sbi->sect_count - 2, | 231 | sbi->part_start + sbi->sect_count - 2, |
232 | sbi->s_backup_vhdr_buf, NULL, REQ_OP_WRITE, | 232 | sbi->s_backup_vhdr_buf, NULL, REQ_OP_WRITE, |
233 | WRITE_SYNC); | 233 | REQ_SYNC); |
234 | if (!error) | 234 | if (!error) |
235 | error2 = error; | 235 | error2 = error; |
236 | out: | 236 | out: |
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c index 684996c8a3a4..4055f51617ef 100644 --- a/fs/jbd2/checkpoint.c +++ b/fs/jbd2/checkpoint.c | |||
@@ -186,7 +186,7 @@ __flush_batch(journal_t *journal, int *batch_count) | |||
186 | 186 | ||
187 | blk_start_plug(&plug); | 187 | blk_start_plug(&plug); |
188 | for (i = 0; i < *batch_count; i++) | 188 | for (i = 0; i < *batch_count; i++) |
189 | write_dirty_buffer(journal->j_chkpt_bhs[i], WRITE_SYNC); | 189 | write_dirty_buffer(journal->j_chkpt_bhs[i], REQ_SYNC); |
190 | blk_finish_plug(&plug); | 190 | blk_finish_plug(&plug); |
191 | 191 | ||
192 | for (i = 0; i < *batch_count; i++) { | 192 | for (i = 0; i < *batch_count; i++) { |
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index 31f8ca046639..8c514367ba5a 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c | |||
@@ -155,9 +155,10 @@ static int journal_submit_commit_record(journal_t *journal, | |||
155 | 155 | ||
156 | if (journal->j_flags & JBD2_BARRIER && | 156 | if (journal->j_flags & JBD2_BARRIER && |
157 | !jbd2_has_feature_async_commit(journal)) | 157 | !jbd2_has_feature_async_commit(journal)) |
158 | ret = submit_bh(REQ_OP_WRITE, WRITE_SYNC | WRITE_FLUSH_FUA, bh); | 158 | ret = submit_bh(REQ_OP_WRITE, |
159 | REQ_SYNC | REQ_PREFLUSH | REQ_FUA, bh); | ||
159 | else | 160 | else |
160 | ret = submit_bh(REQ_OP_WRITE, WRITE_SYNC, bh); | 161 | ret = submit_bh(REQ_OP_WRITE, REQ_SYNC, bh); |
161 | 162 | ||
162 | *cbh = bh; | 163 | *cbh = bh; |
163 | return ret; | 164 | return ret; |
@@ -402,7 +403,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) | |||
402 | jbd2_journal_update_sb_log_tail(journal, | 403 | jbd2_journal_update_sb_log_tail(journal, |
403 | journal->j_tail_sequence, | 404 | journal->j_tail_sequence, |
404 | journal->j_tail, | 405 | journal->j_tail, |
405 | WRITE_SYNC); | 406 | REQ_SYNC); |
406 | mutex_unlock(&journal->j_checkpoint_mutex); | 407 | mutex_unlock(&journal->j_checkpoint_mutex); |
407 | } else { | 408 | } else { |
408 | jbd_debug(3, "superblock not updated\n"); | 409 | jbd_debug(3, "superblock not updated\n"); |
@@ -717,7 +718,7 @@ start_journal_io: | |||
717 | clear_buffer_dirty(bh); | 718 | clear_buffer_dirty(bh); |
718 | set_buffer_uptodate(bh); | 719 | set_buffer_uptodate(bh); |
719 | bh->b_end_io = journal_end_buffer_io_sync; | 720 | bh->b_end_io = journal_end_buffer_io_sync; |
720 | submit_bh(REQ_OP_WRITE, WRITE_SYNC, bh); | 721 | submit_bh(REQ_OP_WRITE, REQ_SYNC, bh); |
721 | } | 722 | } |
722 | cond_resched(); | 723 | cond_resched(); |
723 | stats.run.rs_blocks_logged += bufs; | 724 | stats.run.rs_blocks_logged += bufs; |
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 927da4956a89..8ed971eeab44 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c | |||
@@ -913,7 +913,7 @@ int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block) | |||
913 | * space and if we lose sb update during power failure we'd replay | 913 | * space and if we lose sb update during power failure we'd replay |
914 | * old transaction with possibly newly overwritten data. | 914 | * old transaction with possibly newly overwritten data. |
915 | */ | 915 | */ |
916 | ret = jbd2_journal_update_sb_log_tail(journal, tid, block, WRITE_FUA); | 916 | ret = jbd2_journal_update_sb_log_tail(journal, tid, block, REQ_FUA); |
917 | if (ret) | 917 | if (ret) |
918 | goto out; | 918 | goto out; |
919 | 919 | ||
@@ -1306,7 +1306,7 @@ static int journal_reset(journal_t *journal) | |||
1306 | /* Lock here to make assertions happy... */ | 1306 | /* Lock here to make assertions happy... */ |
1307 | mutex_lock(&journal->j_checkpoint_mutex); | 1307 | mutex_lock(&journal->j_checkpoint_mutex); |
1308 | /* | 1308 | /* |
1309 | * Update log tail information. We use WRITE_FUA since new | 1309 | * Update log tail information. We use REQ_FUA since new |
1310 | * transaction will start reusing journal space and so we | 1310 | * transaction will start reusing journal space and so we |
1311 | * must make sure information about current log tail is on | 1311 | * must make sure information about current log tail is on |
1312 | * disk before that. | 1312 | * disk before that. |
@@ -1314,7 +1314,7 @@ static int journal_reset(journal_t *journal) | |||
1314 | jbd2_journal_update_sb_log_tail(journal, | 1314 | jbd2_journal_update_sb_log_tail(journal, |
1315 | journal->j_tail_sequence, | 1315 | journal->j_tail_sequence, |
1316 | journal->j_tail, | 1316 | journal->j_tail, |
1317 | WRITE_FUA); | 1317 | REQ_FUA); |
1318 | mutex_unlock(&journal->j_checkpoint_mutex); | 1318 | mutex_unlock(&journal->j_checkpoint_mutex); |
1319 | } | 1319 | } |
1320 | return jbd2_journal_start_thread(journal); | 1320 | return jbd2_journal_start_thread(journal); |
@@ -1454,7 +1454,7 @@ void jbd2_journal_update_sb_errno(journal_t *journal) | |||
1454 | sb->s_errno = cpu_to_be32(journal->j_errno); | 1454 | sb->s_errno = cpu_to_be32(journal->j_errno); |
1455 | read_unlock(&journal->j_state_lock); | 1455 | read_unlock(&journal->j_state_lock); |
1456 | 1456 | ||
1457 | jbd2_write_superblock(journal, WRITE_FUA); | 1457 | jbd2_write_superblock(journal, REQ_FUA); |
1458 | } | 1458 | } |
1459 | EXPORT_SYMBOL(jbd2_journal_update_sb_errno); | 1459 | EXPORT_SYMBOL(jbd2_journal_update_sb_errno); |
1460 | 1460 | ||
@@ -1720,7 +1720,8 @@ int jbd2_journal_destroy(journal_t *journal) | |||
1720 | ++journal->j_transaction_sequence; | 1720 | ++journal->j_transaction_sequence; |
1721 | write_unlock(&journal->j_state_lock); | 1721 | write_unlock(&journal->j_state_lock); |
1722 | 1722 | ||
1723 | jbd2_mark_journal_empty(journal, WRITE_FLUSH_FUA); | 1723 | jbd2_mark_journal_empty(journal, |
1724 | REQ_PREFLUSH | REQ_FUA); | ||
1724 | mutex_unlock(&journal->j_checkpoint_mutex); | 1725 | mutex_unlock(&journal->j_checkpoint_mutex); |
1725 | } else | 1726 | } else |
1726 | err = -EIO; | 1727 | err = -EIO; |
@@ -1979,7 +1980,7 @@ int jbd2_journal_flush(journal_t *journal) | |||
1979 | * the magic code for a fully-recovered superblock. Any future | 1980 | * the magic code for a fully-recovered superblock. Any future |
1980 | * commits of data to the journal will restore the current | 1981 | * commits of data to the journal will restore the current |
1981 | * s_start value. */ | 1982 | * s_start value. */ |
1982 | jbd2_mark_journal_empty(journal, WRITE_FUA); | 1983 | jbd2_mark_journal_empty(journal, REQ_FUA); |
1983 | mutex_unlock(&journal->j_checkpoint_mutex); | 1984 | mutex_unlock(&journal->j_checkpoint_mutex); |
1984 | write_lock(&journal->j_state_lock); | 1985 | write_lock(&journal->j_state_lock); |
1985 | J_ASSERT(!journal->j_running_transaction); | 1986 | J_ASSERT(!journal->j_running_transaction); |
@@ -2025,7 +2026,7 @@ int jbd2_journal_wipe(journal_t *journal, int write) | |||
2025 | if (write) { | 2026 | if (write) { |
2026 | /* Lock to make assertions happy... */ | 2027 | /* Lock to make assertions happy... */ |
2027 | mutex_lock(&journal->j_checkpoint_mutex); | 2028 | mutex_lock(&journal->j_checkpoint_mutex); |
2028 | jbd2_mark_journal_empty(journal, WRITE_FUA); | 2029 | jbd2_mark_journal_empty(journal, REQ_FUA); |
2029 | mutex_unlock(&journal->j_checkpoint_mutex); | 2030 | mutex_unlock(&journal->j_checkpoint_mutex); |
2030 | } | 2031 | } |
2031 | 2032 | ||
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c index 91171dc352cb..cfc38b552118 100644 --- a/fs/jbd2/revoke.c +++ b/fs/jbd2/revoke.c | |||
@@ -648,7 +648,7 @@ static void flush_descriptor(journal_t *journal, | |||
648 | set_buffer_jwrite(descriptor); | 648 | set_buffer_jwrite(descriptor); |
649 | BUFFER_TRACE(descriptor, "write"); | 649 | BUFFER_TRACE(descriptor, "write"); |
650 | set_buffer_dirty(descriptor); | 650 | set_buffer_dirty(descriptor); |
651 | write_dirty_buffer(descriptor, WRITE_SYNC); | 651 | write_dirty_buffer(descriptor, REQ_SYNC); |
652 | } | 652 | } |
653 | #endif | 653 | #endif |
654 | 654 | ||
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c index a21ea8b3e5fa..bb1da1feafeb 100644 --- a/fs/jfs/jfs_logmgr.c +++ b/fs/jfs/jfs_logmgr.c | |||
@@ -2002,7 +2002,7 @@ static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp) | |||
2002 | 2002 | ||
2003 | bio->bi_end_io = lbmIODone; | 2003 | bio->bi_end_io = lbmIODone; |
2004 | bio->bi_private = bp; | 2004 | bio->bi_private = bp; |
2005 | bio_set_op_attrs(bio, REQ_OP_READ, READ_SYNC); | 2005 | bio->bi_opf = REQ_OP_READ; |
2006 | /*check if journaling to disk has been disabled*/ | 2006 | /*check if journaling to disk has been disabled*/ |
2007 | if (log->no_integrity) { | 2007 | if (log->no_integrity) { |
2008 | bio->bi_iter.bi_size = 0; | 2008 | bio->bi_iter.bi_size = 0; |
@@ -2146,7 +2146,7 @@ static void lbmStartIO(struct lbuf * bp) | |||
2146 | 2146 | ||
2147 | bio->bi_end_io = lbmIODone; | 2147 | bio->bi_end_io = lbmIODone; |
2148 | bio->bi_private = bp; | 2148 | bio->bi_private = bp; |
2149 | bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_SYNC); | 2149 | bio->bi_opf = REQ_OP_WRITE | REQ_SYNC; |
2150 | 2150 | ||
2151 | /* check if journaling to disk has been disabled */ | 2151 | /* check if journaling to disk has been disabled */ |
2152 | if (log->no_integrity) { | 2152 | if (log->no_integrity) { |
diff --git a/fs/mpage.c b/fs/mpage.c index d2413af0823a..f35e2819d0c6 100644 --- a/fs/mpage.c +++ b/fs/mpage.c | |||
@@ -489,7 +489,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc, | |||
489 | struct buffer_head map_bh; | 489 | struct buffer_head map_bh; |
490 | loff_t i_size = i_size_read(inode); | 490 | loff_t i_size = i_size_read(inode); |
491 | int ret = 0; | 491 | int ret = 0; |
492 | int op_flags = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : 0); | 492 | int op_flags = (wbc->sync_mode == WB_SYNC_ALL ? REQ_SYNC : 0); |
493 | 493 | ||
494 | if (page_has_buffers(page)) { | 494 | if (page_has_buffers(page)) { |
495 | struct buffer_head *head = page_buffers(page); | 495 | struct buffer_head *head = page_buffers(page); |
@@ -705,7 +705,7 @@ mpage_writepages(struct address_space *mapping, | |||
705 | ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd); | 705 | ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd); |
706 | if (mpd.bio) { | 706 | if (mpd.bio) { |
707 | int op_flags = (wbc->sync_mode == WB_SYNC_ALL ? | 707 | int op_flags = (wbc->sync_mode == WB_SYNC_ALL ? |
708 | WRITE_SYNC : 0); | 708 | REQ_SYNC : 0); |
709 | mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio); | 709 | mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio); |
710 | } | 710 | } |
711 | } | 711 | } |
@@ -726,7 +726,7 @@ int mpage_writepage(struct page *page, get_block_t get_block, | |||
726 | int ret = __mpage_writepage(page, wbc, &mpd); | 726 | int ret = __mpage_writepage(page, wbc, &mpd); |
727 | if (mpd.bio) { | 727 | if (mpd.bio) { |
728 | int op_flags = (wbc->sync_mode == WB_SYNC_ALL ? | 728 | int op_flags = (wbc->sync_mode == WB_SYNC_ALL ? |
729 | WRITE_SYNC : 0); | 729 | REQ_SYNC : 0); |
730 | mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio); | 730 | mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio); |
731 | } | 731 | } |
732 | return ret; | 732 | return ret; |
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index c95d369e90aa..12eeae62a2b1 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c | |||
@@ -189,7 +189,7 @@ static int nilfs_sync_super(struct super_block *sb, int flag) | |||
189 | set_buffer_dirty(nilfs->ns_sbh[0]); | 189 | set_buffer_dirty(nilfs->ns_sbh[0]); |
190 | if (nilfs_test_opt(nilfs, BARRIER)) { | 190 | if (nilfs_test_opt(nilfs, BARRIER)) { |
191 | err = __sync_dirty_buffer(nilfs->ns_sbh[0], | 191 | err = __sync_dirty_buffer(nilfs->ns_sbh[0], |
192 | WRITE_SYNC | WRITE_FLUSH_FUA); | 192 | REQ_SYNC | REQ_PREFLUSH | REQ_FUA); |
193 | } else { | 193 | } else { |
194 | err = sync_dirty_buffer(nilfs->ns_sbh[0]); | 194 | err = sync_dirty_buffer(nilfs->ns_sbh[0]); |
195 | } | 195 | } |
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 636abcbd4650..52eef16edb01 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c | |||
@@ -627,7 +627,7 @@ static int o2hb_issue_node_write(struct o2hb_region *reg, | |||
627 | slot = o2nm_this_node(); | 627 | slot = o2nm_this_node(); |
628 | 628 | ||
629 | bio = o2hb_setup_one_bio(reg, write_wc, &slot, slot+1, REQ_OP_WRITE, | 629 | bio = o2hb_setup_one_bio(reg, write_wc, &slot, slot+1, REQ_OP_WRITE, |
630 | WRITE_SYNC); | 630 | REQ_SYNC); |
631 | if (IS_ERR(bio)) { | 631 | if (IS_ERR(bio)) { |
632 | status = PTR_ERR(bio); | 632 | status = PTR_ERR(bio); |
633 | mlog_errno(status); | 633 | mlog_errno(status); |
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index bc2dde2423c2..aa40c242f1db 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c | |||
@@ -1111,7 +1111,8 @@ static int flush_commit_list(struct super_block *s, | |||
1111 | mark_buffer_dirty(jl->j_commit_bh) ; | 1111 | mark_buffer_dirty(jl->j_commit_bh) ; |
1112 | depth = reiserfs_write_unlock_nested(s); | 1112 | depth = reiserfs_write_unlock_nested(s); |
1113 | if (reiserfs_barrier_flush(s)) | 1113 | if (reiserfs_barrier_flush(s)) |
1114 | __sync_dirty_buffer(jl->j_commit_bh, WRITE_FLUSH_FUA); | 1114 | __sync_dirty_buffer(jl->j_commit_bh, |
1115 | REQ_PREFLUSH | REQ_FUA); | ||
1115 | else | 1116 | else |
1116 | sync_dirty_buffer(jl->j_commit_bh); | 1117 | sync_dirty_buffer(jl->j_commit_bh); |
1117 | reiserfs_write_lock_nested(s, depth); | 1118 | reiserfs_write_lock_nested(s, depth); |
@@ -1269,7 +1270,8 @@ static int _update_journal_header_block(struct super_block *sb, | |||
1269 | depth = reiserfs_write_unlock_nested(sb); | 1270 | depth = reiserfs_write_unlock_nested(sb); |
1270 | 1271 | ||
1271 | if (reiserfs_barrier_flush(sb)) | 1272 | if (reiserfs_barrier_flush(sb)) |
1272 | __sync_dirty_buffer(journal->j_header_bh, WRITE_FLUSH_FUA); | 1273 | __sync_dirty_buffer(journal->j_header_bh, |
1274 | REQ_PREFLUSH | REQ_FUA); | ||
1273 | else | 1275 | else |
1274 | sync_dirty_buffer(journal->j_header_bh); | 1276 | sync_dirty_buffer(journal->j_header_bh); |
1275 | 1277 | ||
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 3e57a56cf829..594e02c485b2 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c | |||
@@ -495,8 +495,10 @@ xfs_submit_ioend( | |||
495 | 495 | ||
496 | ioend->io_bio->bi_private = ioend; | 496 | ioend->io_bio->bi_private = ioend; |
497 | ioend->io_bio->bi_end_io = xfs_end_bio; | 497 | ioend->io_bio->bi_end_io = xfs_end_bio; |
498 | bio_set_op_attrs(ioend->io_bio, REQ_OP_WRITE, | 498 | ioend->io_bio->bi_opf = REQ_OP_WRITE; |
499 | (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0); | 499 | if (wbc->sync_mode == WB_SYNC_ALL) |
500 | ioend->io_bio->bi_opf |= REQ_SYNC; | ||
501 | |||
500 | /* | 502 | /* |
501 | * If we are failing the IO now, just mark the ioend with an | 503 | * If we are failing the IO now, just mark the ioend with an |
502 | * error and finish it. This will run IO completion immediately | 504 | * error and finish it. This will run IO completion immediately |
@@ -567,8 +569,9 @@ xfs_chain_bio( | |||
567 | 569 | ||
568 | bio_chain(ioend->io_bio, new); | 570 | bio_chain(ioend->io_bio, new); |
569 | bio_get(ioend->io_bio); /* for xfs_destroy_ioend */ | 571 | bio_get(ioend->io_bio); /* for xfs_destroy_ioend */ |
570 | bio_set_op_attrs(ioend->io_bio, REQ_OP_WRITE, | 572 | ioend->io_bio->bi_opf = REQ_OP_WRITE; |
571 | (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0); | 573 | if (wbc->sync_mode == WB_SYNC_ALL) |
574 | ioend->io_bio->bi_opf |= REQ_SYNC; | ||
572 | submit_bio(ioend->io_bio); | 575 | submit_bio(ioend->io_bio); |
573 | ioend->io_bio = new; | 576 | ioend->io_bio = new; |
574 | } | 577 | } |
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index b5b9bffe3520..33c435f3316c 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c | |||
@@ -1304,7 +1304,7 @@ _xfs_buf_ioapply( | |||
1304 | if (bp->b_flags & XBF_WRITE) { | 1304 | if (bp->b_flags & XBF_WRITE) { |
1305 | op = REQ_OP_WRITE; | 1305 | op = REQ_OP_WRITE; |
1306 | if (bp->b_flags & XBF_SYNCIO) | 1306 | if (bp->b_flags & XBF_SYNCIO) |
1307 | op_flags = WRITE_SYNC; | 1307 | op_flags = REQ_SYNC; |
1308 | if (bp->b_flags & XBF_FUA) | 1308 | if (bp->b_flags & XBF_FUA) |
1309 | op_flags |= REQ_FUA; | 1309 | op_flags |= REQ_FUA; |
1310 | if (bp->b_flags & XBF_FLUSH) | 1310 | if (bp->b_flags & XBF_FLUSH) |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 46a74209917f..7a1b78ab7c15 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -151,58 +151,11 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, | |||
151 | */ | 151 | */ |
152 | #define CHECK_IOVEC_ONLY -1 | 152 | #define CHECK_IOVEC_ONLY -1 |
153 | 153 | ||
154 | /* | ||
155 | * The below are the various read and write flags that we support. Some of | ||
156 | * them include behavioral modifiers that send information down to the | ||
157 | * block layer and IO scheduler. They should be used along with a req_op. | ||
158 | * Terminology: | ||
159 | * | ||
160 | * The block layer uses device plugging to defer IO a little bit, in | ||
161 | * the hope that we will see more IO very shortly. This increases | ||
162 | * coalescing of adjacent IO and thus reduces the number of IOs we | ||
163 | * have to send to the device. It also allows for better queuing, | ||
164 | * if the IO isn't mergeable. If the caller is going to be waiting | ||
165 | * for the IO, then he must ensure that the device is unplugged so | ||
166 | * that the IO is dispatched to the driver. | ||
167 | * | ||
168 | * All IO is handled async in Linux. This is fine for background | ||
169 | * writes, but for reads or writes that someone waits for completion | ||
170 | * on, we want to notify the block layer and IO scheduler so that they | ||
171 | * know about it. That allows them to make better scheduling | ||
172 | * decisions. So when the below references 'sync' and 'async', it | ||
173 | * is referencing this priority hint. | ||
174 | * | ||
175 | * With that in mind, the available types are: | ||
176 | * | ||
177 | * READ A normal read operation. Device will be plugged. | ||
178 | * READ_SYNC A synchronous read. Device is not plugged, caller can | ||
179 | * immediately wait on this read without caring about | ||
180 | * unplugging. | ||
181 | * WRITE A normal async write. Device will be plugged. | ||
182 | * WRITE_SYNC Synchronous write. Identical to WRITE, but passes down | ||
183 | * the hint that someone will be waiting on this IO | ||
184 | * shortly. The write equivalent of READ_SYNC. | ||
185 | * WRITE_ODIRECT Special case write for O_DIRECT only. | ||
186 | * WRITE_FLUSH Like WRITE_SYNC but with preceding cache flush. | ||
187 | * WRITE_FUA Like WRITE_SYNC but data is guaranteed to be on | ||
188 | * non-volatile media on completion. | ||
189 | * WRITE_FLUSH_FUA Combination of WRITE_FLUSH and FUA. The IO is preceded | ||
190 | * by a cache flush and data is guaranteed to be on | ||
191 | * non-volatile media on completion. | ||
192 | * | ||
193 | */ | ||
194 | #define RW_MASK REQ_OP_WRITE | 154 | #define RW_MASK REQ_OP_WRITE |
195 | 155 | ||
196 | #define READ REQ_OP_READ | 156 | #define READ REQ_OP_READ |
197 | #define WRITE REQ_OP_WRITE | 157 | #define WRITE REQ_OP_WRITE |
198 | 158 | ||
199 | #define READ_SYNC 0 | ||
200 | #define WRITE_SYNC REQ_SYNC | ||
201 | #define WRITE_ODIRECT (REQ_SYNC | REQ_IDLE) | ||
202 | #define WRITE_FLUSH REQ_PREFLUSH | ||
203 | #define WRITE_FUA REQ_FUA | ||
204 | #define WRITE_FLUSH_FUA (REQ_PREFLUSH | REQ_FUA) | ||
205 | |||
206 | /* | 159 | /* |
207 | * Attribute flags. These should be or-ed together to figure out what | 160 | * Attribute flags. These should be or-ed together to figure out what |
208 | * has been changed! | 161 | * has been changed! |
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index a9d34424450d..5da2c829a718 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h | |||
@@ -55,7 +55,7 @@ TRACE_DEFINE_ENUM(CP_DISCARD); | |||
55 | { IPU, "IN-PLACE" }, \ | 55 | { IPU, "IN-PLACE" }, \ |
56 | { OPU, "OUT-OF-PLACE" }) | 56 | { OPU, "OUT-OF-PLACE" }) |
57 | 57 | ||
58 | #define F2FS_BIO_FLAG_MASK(t) (t & (REQ_RAHEAD | WRITE_FLUSH_FUA)) | 58 | #define F2FS_BIO_FLAG_MASK(t) (t & (REQ_RAHEAD | REQ_PREFLUSH | REQ_FUA)) |
59 | #define F2FS_BIO_EXTRA_MASK(t) (t & (REQ_META | REQ_PRIO)) | 59 | #define F2FS_BIO_EXTRA_MASK(t) (t & (REQ_META | REQ_PRIO)) |
60 | 60 | ||
61 | #define show_bio_type(op_flags) show_bio_op_flags(op_flags), \ | 61 | #define show_bio_type(op_flags) show_bio_op_flags(op_flags), \ |
@@ -65,11 +65,9 @@ TRACE_DEFINE_ENUM(CP_DISCARD); | |||
65 | __print_symbolic(F2FS_BIO_FLAG_MASK(flags), \ | 65 | __print_symbolic(F2FS_BIO_FLAG_MASK(flags), \ |
66 | { 0, "WRITE" }, \ | 66 | { 0, "WRITE" }, \ |
67 | { REQ_RAHEAD, "READAHEAD" }, \ | 67 | { REQ_RAHEAD, "READAHEAD" }, \ |
68 | { READ_SYNC, "READ_SYNC" }, \ | 68 | { REQ_SYNC, "REQ_SYNC" }, \ |
69 | { WRITE_SYNC, "WRITE_SYNC" }, \ | 69 | { REQ_PREFLUSH, "REQ_PREFLUSH" }, \ |
70 | { WRITE_FLUSH, "WRITE_FLUSH" }, \ | 70 | { REQ_FUA, "REQ_FUA" }) |
71 | { WRITE_FUA, "WRITE_FUA" }, \ | ||
72 | { WRITE_FLUSH_FUA, "WRITE_FLUSH_FUA" }) | ||
73 | 71 | ||
74 | #define show_bio_extra(type) \ | 72 | #define show_bio_extra(type) \ |
75 | __print_symbolic(F2FS_BIO_EXTRA_MASK(type), \ | 73 | __print_symbolic(F2FS_BIO_EXTRA_MASK(type), \ |
diff --git a/kernel/power/swap.c b/kernel/power/swap.c index a3b1e617bcdc..32e0c232efba 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c | |||
@@ -307,7 +307,7 @@ static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags) | |||
307 | { | 307 | { |
308 | int error; | 308 | int error; |
309 | 309 | ||
310 | hib_submit_io(REQ_OP_READ, READ_SYNC, swsusp_resume_block, | 310 | hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block, |
311 | swsusp_header, NULL); | 311 | swsusp_header, NULL); |
312 | if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) || | 312 | if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) || |
313 | !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) { | 313 | !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) { |
@@ -317,7 +317,7 @@ static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags) | |||
317 | swsusp_header->flags = flags; | 317 | swsusp_header->flags = flags; |
318 | if (flags & SF_CRC32_MODE) | 318 | if (flags & SF_CRC32_MODE) |
319 | swsusp_header->crc32 = handle->crc32; | 319 | swsusp_header->crc32 = handle->crc32; |
320 | error = hib_submit_io(REQ_OP_WRITE, WRITE_SYNC, | 320 | error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC, |
321 | swsusp_resume_block, swsusp_header, NULL); | 321 | swsusp_resume_block, swsusp_header, NULL); |
322 | } else { | 322 | } else { |
323 | printk(KERN_ERR "PM: Swap header not found!\n"); | 323 | printk(KERN_ERR "PM: Swap header not found!\n"); |
@@ -397,7 +397,7 @@ static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb) | |||
397 | } else { | 397 | } else { |
398 | src = buf; | 398 | src = buf; |
399 | } | 399 | } |
400 | return hib_submit_io(REQ_OP_WRITE, WRITE_SYNC, offset, src, hb); | 400 | return hib_submit_io(REQ_OP_WRITE, REQ_SYNC, offset, src, hb); |
401 | } | 401 | } |
402 | 402 | ||
403 | static void release_swap_writer(struct swap_map_handle *handle) | 403 | static void release_swap_writer(struct swap_map_handle *handle) |
@@ -1000,8 +1000,7 @@ static int get_swap_reader(struct swap_map_handle *handle, | |||
1000 | return -ENOMEM; | 1000 | return -ENOMEM; |
1001 | } | 1001 | } |
1002 | 1002 | ||
1003 | error = hib_submit_io(REQ_OP_READ, READ_SYNC, offset, | 1003 | error = hib_submit_io(REQ_OP_READ, 0, offset, tmp->map, NULL); |
1004 | tmp->map, NULL); | ||
1005 | if (error) { | 1004 | if (error) { |
1006 | release_swap_reader(handle); | 1005 | release_swap_reader(handle); |
1007 | return error; | 1006 | return error; |
@@ -1025,7 +1024,7 @@ static int swap_read_page(struct swap_map_handle *handle, void *buf, | |||
1025 | offset = handle->cur->entries[handle->k]; | 1024 | offset = handle->cur->entries[handle->k]; |
1026 | if (!offset) | 1025 | if (!offset) |
1027 | return -EFAULT; | 1026 | return -EFAULT; |
1028 | error = hib_submit_io(REQ_OP_READ, READ_SYNC, offset, buf, hb); | 1027 | error = hib_submit_io(REQ_OP_READ, 0, offset, buf, hb); |
1029 | if (error) | 1028 | if (error) |
1030 | return error; | 1029 | return error; |
1031 | if (++handle->k >= MAP_PAGE_ENTRIES) { | 1030 | if (++handle->k >= MAP_PAGE_ENTRIES) { |
@@ -1534,7 +1533,7 @@ int swsusp_check(void) | |||
1534 | if (!IS_ERR(hib_resume_bdev)) { | 1533 | if (!IS_ERR(hib_resume_bdev)) { |
1535 | set_blocksize(hib_resume_bdev, PAGE_SIZE); | 1534 | set_blocksize(hib_resume_bdev, PAGE_SIZE); |
1536 | clear_page(swsusp_header); | 1535 | clear_page(swsusp_header); |
1537 | error = hib_submit_io(REQ_OP_READ, READ_SYNC, | 1536 | error = hib_submit_io(REQ_OP_READ, 0, |
1538 | swsusp_resume_block, | 1537 | swsusp_resume_block, |
1539 | swsusp_header, NULL); | 1538 | swsusp_header, NULL); |
1540 | if (error) | 1539 | if (error) |
@@ -1543,7 +1542,7 @@ int swsusp_check(void) | |||
1543 | if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) { | 1542 | if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) { |
1544 | memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10); | 1543 | memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10); |
1545 | /* Reset swap signature now */ | 1544 | /* Reset swap signature now */ |
1546 | error = hib_submit_io(REQ_OP_WRITE, WRITE_SYNC, | 1545 | error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC, |
1547 | swsusp_resume_block, | 1546 | swsusp_resume_block, |
1548 | swsusp_header, NULL); | 1547 | swsusp_header, NULL); |
1549 | } else { | 1548 | } else { |
@@ -1588,11 +1587,11 @@ int swsusp_unmark(void) | |||
1588 | { | 1587 | { |
1589 | int error; | 1588 | int error; |
1590 | 1589 | ||
1591 | hib_submit_io(REQ_OP_READ, READ_SYNC, swsusp_resume_block, | 1590 | hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block, |
1592 | swsusp_header, NULL); | 1591 | swsusp_header, NULL); |
1593 | if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) { | 1592 | if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) { |
1594 | memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10); | 1593 | memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10); |
1595 | error = hib_submit_io(REQ_OP_WRITE, WRITE_SYNC, | 1594 | error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC, |
1596 | swsusp_resume_block, | 1595 | swsusp_resume_block, |
1597 | swsusp_header, NULL); | 1596 | swsusp_header, NULL); |
1598 | } else { | 1597 | } else { |