summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Christie <mchristi@redhat.com>2016-06-05 15:32:25 -0400
committerJens Axboe <axboe@fb.com>2016-06-07 15:41:38 -0400
commit28a8f0d317bf225ff15008f5dd66ae16242dd843 (patch)
tree4ed24aee241907a3612a61f8cc634acd10989c21
parenta418090aa88b9b531ac1f504d6bb8c0e9b04ccb7 (diff)
block, drivers, fs: rename REQ_FLUSH to REQ_PREFLUSH
To avoid confusion between REQ_OP_FLUSH, which is handled by request_fn drivers, and upper layers requesting the block layer perform a flush sequence along with possibly a WRITE, this patch renames REQ_FLUSH to REQ_PREFLUSH. Signed-off-by: Mike Christie <mchristi@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--Documentation/block/writeback_cache_control.txt22
-rw-r--r--Documentation/device-mapper/log-writes.txt10
-rw-r--r--block/blk-core.c12
-rw-r--r--block/blk-flush.c16
-rw-r--r--block/blk-mq.c4
-rw-r--r--drivers/block/drbd/drbd_actlog.c4
-rw-r--r--drivers/block/drbd/drbd_main.c2
-rw-r--r--drivers/block/drbd/drbd_protocol.h2
-rw-r--r--drivers/block/drbd/drbd_receiver.c2
-rw-r--r--drivers/block/drbd/drbd_req.c2
-rw-r--r--drivers/md/bcache/journal.c2
-rw-r--r--drivers/md/bcache/request.c8
-rw-r--r--drivers/md/dm-cache-target.c12
-rw-r--r--drivers/md/dm-crypt.c7
-rw-r--r--drivers/md/dm-era-target.c4
-rw-r--r--drivers/md/dm-io.c2
-rw-r--r--drivers/md/dm-log-writes.c2
-rw-r--r--drivers/md/dm-raid1.c5
-rw-r--r--drivers/md/dm-region-hash.c4
-rw-r--r--drivers/md/dm-snap.c6
-rw-r--r--drivers/md/dm-stripe.c2
-rw-r--r--drivers/md/dm-thin.c8
-rw-r--r--drivers/md/dm.c12
-rw-r--r--drivers/md/linear.c2
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/multipath.c2
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid1.c3
-rw-r--r--drivers/md/raid10.c2
-rw-r--r--drivers/md/raid5-cache.c2
-rw-r--r--drivers/md/raid5.c2
-rw-r--r--fs/btrfs/check-integrity.c8
-rw-r--r--fs/jbd2/journal.c2
-rw-r--r--fs/xfs/xfs_buf.c2
-rw-r--r--include/linux/blk_types.h8
-rw-r--r--include/linux/fs.h4
-rw-r--r--include/trace/events/f2fs.h2
-rw-r--r--kernel/trace/blktrace.c5
39 files changed, 102 insertions, 98 deletions
diff --git a/Documentation/block/writeback_cache_control.txt b/Documentation/block/writeback_cache_control.txt
index da70bdacd503..8a6bdada5f6b 100644
--- a/Documentation/block/writeback_cache_control.txt
+++ b/Documentation/block/writeback_cache_control.txt
@@ -20,11 +20,11 @@ a forced cache flush, and the Force Unit Access (FUA) flag for requests.
20Explicit cache flushes 20Explicit cache flushes
21---------------------- 21----------------------
22 22
23The REQ_FLUSH flag can be OR ed into the r/w flags of a bio submitted from 23The REQ_PREFLUSH flag can be OR ed into the r/w flags of a bio submitted from
24the filesystem and will make sure the volatile cache of the storage device 24the filesystem and will make sure the volatile cache of the storage device
25has been flushed before the actual I/O operation is started. This explicitly 25has been flushed before the actual I/O operation is started. This explicitly
26guarantees that previously completed write requests are on non-volatile 26guarantees that previously completed write requests are on non-volatile
27storage before the flagged bio starts. In addition the REQ_FLUSH flag can be 27storage before the flagged bio starts. In addition the REQ_PREFLUSH flag can be
28set on an otherwise empty bio structure, which causes only an explicit cache 28set on an otherwise empty bio structure, which causes only an explicit cache
29flush without any dependent I/O. It is recommend to use 29flush without any dependent I/O. It is recommend to use
30the blkdev_issue_flush() helper for a pure cache flush. 30the blkdev_issue_flush() helper for a pure cache flush.
@@ -41,21 +41,21 @@ signaled after the data has been committed to non-volatile storage.
41Implementation details for filesystems 41Implementation details for filesystems
42-------------------------------------- 42--------------------------------------
43 43
44Filesystems can simply set the REQ_FLUSH and REQ_FUA bits and do not have to 44Filesystems can simply set the REQ_PREFLUSH and REQ_FUA bits and do not have to
45worry if the underlying devices need any explicit cache flushing and how 45worry if the underlying devices need any explicit cache flushing and how
46the Forced Unit Access is implemented. The REQ_FLUSH and REQ_FUA flags 46the Forced Unit Access is implemented. The REQ_PREFLUSH and REQ_FUA flags
47may both be set on a single bio. 47may both be set on a single bio.
48 48
49 49
50Implementation details for make_request_fn based block drivers 50Implementation details for make_request_fn based block drivers
51-------------------------------------------------------------- 51--------------------------------------------------------------
52 52
53These drivers will always see the REQ_FLUSH and REQ_FUA bits as they sit 53These drivers will always see the REQ_PREFLUSH and REQ_FUA bits as they sit
54directly below the submit_bio interface. For remapping drivers the REQ_FUA 54directly below the submit_bio interface. For remapping drivers the REQ_FUA
55bits need to be propagated to underlying devices, and a global flush needs 55bits need to be propagated to underlying devices, and a global flush needs
56to be implemented for bios with the REQ_FLUSH bit set. For real device 56to be implemented for bios with the REQ_PREFLUSH bit set. For real device
57drivers that do not have a volatile cache the REQ_FLUSH and REQ_FUA bits 57drivers that do not have a volatile cache the REQ_PREFLUSH and REQ_FUA bits
58on non-empty bios can simply be ignored, and REQ_FLUSH requests without 58on non-empty bios can simply be ignored, and REQ_PREFLUSH requests without
59data can be completed successfully without doing any work. Drivers for 59data can be completed successfully without doing any work. Drivers for
60devices with volatile caches need to implement the support for these 60devices with volatile caches need to implement the support for these
61flags themselves without any help from the block layer. 61flags themselves without any help from the block layer.
@@ -65,8 +65,8 @@ Implementation details for request_fn based block drivers
65-------------------------------------------------------------- 65--------------------------------------------------------------
66 66
67For devices that do not support volatile write caches there is no driver 67For devices that do not support volatile write caches there is no driver
68support required, the block layer completes empty REQ_FLUSH requests before 68support required, the block layer completes empty REQ_PREFLUSH requests before
69entering the driver and strips off the REQ_FLUSH and REQ_FUA bits from 69entering the driver and strips off the REQ_PREFLUSH and REQ_FUA bits from
70requests that have a payload. For devices with volatile write caches the 70requests that have a payload. For devices with volatile write caches the
71driver needs to tell the block layer that it supports flushing caches by 71driver needs to tell the block layer that it supports flushing caches by
72doing: 72doing:
@@ -74,7 +74,7 @@ doing:
74 blk_queue_write_cache(sdkp->disk->queue, true, false); 74 blk_queue_write_cache(sdkp->disk->queue, true, false);
75 75
76and handle empty REQ_OP_FLUSH requests in its prep_fn/request_fn. Note that 76and handle empty REQ_OP_FLUSH requests in its prep_fn/request_fn. Note that
77REQ_FLUSH requests with a payload are automatically turned into a sequence 77REQ_PREFLUSH requests with a payload are automatically turned into a sequence
78of an empty REQ_OP_FLUSH request followed by the actual write by the block 78of an empty REQ_OP_FLUSH request followed by the actual write by the block
79layer. For devices that also support the FUA bit the block layer needs 79layer. For devices that also support the FUA bit the block layer needs
80to be told to pass through the REQ_FUA bit using: 80to be told to pass through the REQ_FUA bit using:
diff --git a/Documentation/device-mapper/log-writes.txt b/Documentation/device-mapper/log-writes.txt
index c10f30c9b534..f4ebcbaf50f3 100644
--- a/Documentation/device-mapper/log-writes.txt
+++ b/Documentation/device-mapper/log-writes.txt
@@ -14,14 +14,14 @@ Log Ordering
14 14
15We log things in order of completion once we are sure the write is no longer in 15We log things in order of completion once we are sure the write is no longer in
16cache. This means that normal WRITE requests are not actually logged until the 16cache. This means that normal WRITE requests are not actually logged until the
17next REQ_FLUSH request. This is to make it easier for userspace to replay the 17next REQ_PREFLUSH request. This is to make it easier for userspace to replay
18log in a way that correlates to what is on disk and not what is in cache, to 18the log in a way that correlates to what is on disk and not what is in cache,
19make it easier to detect improper waiting/flushing. 19to make it easier to detect improper waiting/flushing.
20 20
21This works by attaching all WRITE requests to a list once the write completes. 21This works by attaching all WRITE requests to a list once the write completes.
22Once we see a REQ_FLUSH request we splice this list onto the request and once 22Once we see a REQ_PREFLUSH request we splice this list onto the request and once
23the FLUSH request completes we log all of the WRITEs and then the FLUSH. Only 23the FLUSH request completes we log all of the WRITEs and then the FLUSH. Only
24completed WRITEs, at the time the REQ_FLUSH is issued, are added in order to 24completed WRITEs, at the time the REQ_PREFLUSH is issued, are added in order to
25simulate the worst case scenario with regard to power failures. Consider the 25simulate the worst case scenario with regard to power failures. Consider the
26following example (W means write, C means complete): 26following example (W means write, C means complete):
27 27
diff --git a/block/blk-core.c b/block/blk-core.c
index c7d66c23a708..32a283eb7274 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1029,7 +1029,7 @@ static bool blk_rq_should_init_elevator(struct bio *bio)
1029 * Flush requests do not use the elevator so skip initialization. 1029 * Flush requests do not use the elevator so skip initialization.
1030 * This allows a request to share the flush and elevator data. 1030 * This allows a request to share the flush and elevator data.
1031 */ 1031 */
1032 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) 1032 if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA))
1033 return false; 1033 return false;
1034 1034
1035 return true; 1035 return true;
@@ -1736,7 +1736,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
1736 return BLK_QC_T_NONE; 1736 return BLK_QC_T_NONE;
1737 } 1737 }
1738 1738
1739 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) { 1739 if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) {
1740 spin_lock_irq(q->queue_lock); 1740 spin_lock_irq(q->queue_lock);
1741 where = ELEVATOR_INSERT_FLUSH; 1741 where = ELEVATOR_INSERT_FLUSH;
1742 goto get_rq; 1742 goto get_rq;
@@ -1968,9 +1968,9 @@ generic_make_request_checks(struct bio *bio)
1968 * drivers without flush support don't have to worry 1968 * drivers without flush support don't have to worry
1969 * about them. 1969 * about them.
1970 */ 1970 */
1971 if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && 1971 if ((bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) &&
1972 !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) { 1972 !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
1973 bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA); 1973 bio->bi_rw &= ~(REQ_PREFLUSH | REQ_FUA);
1974 if (!nr_sectors) { 1974 if (!nr_sectors) {
1975 err = 0; 1975 err = 0;
1976 goto end_io; 1976 goto end_io;
@@ -2217,7 +2217,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
2217 */ 2217 */
2218 BUG_ON(blk_queued_rq(rq)); 2218 BUG_ON(blk_queued_rq(rq));
2219 2219
2220 if (rq->cmd_flags & (REQ_FLUSH|REQ_FUA)) 2220 if (rq->cmd_flags & (REQ_PREFLUSH | REQ_FUA))
2221 where = ELEVATOR_INSERT_FLUSH; 2221 where = ELEVATOR_INSERT_FLUSH;
2222 2222
2223 add_acct_request(q, rq, where); 2223 add_acct_request(q, rq, where);
@@ -3311,7 +3311,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
3311 /* 3311 /*
3312 * rq is already accounted, so use raw insert 3312 * rq is already accounted, so use raw insert
3313 */ 3313 */
3314 if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) 3314 if (rq->cmd_flags & (REQ_PREFLUSH | REQ_FUA))
3315 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); 3315 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
3316 else 3316 else
3317 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); 3317 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 21f0d5b0d2ca..d308def812db 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -10,8 +10,8 @@
10 * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request 10 * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
11 * properties and hardware capability. 11 * properties and hardware capability.
12 * 12 *
13 * If a request doesn't have data, only REQ_FLUSH makes sense, which 13 * If a request doesn't have data, only REQ_PREFLUSH makes sense, which
14 * indicates a simple flush request. If there is data, REQ_FLUSH indicates 14 * indicates a simple flush request. If there is data, REQ_PREFLUSH indicates
15 * that the device cache should be flushed before the data is executed, and 15 * that the device cache should be flushed before the data is executed, and
16 * REQ_FUA means that the data must be on non-volatile media on request 16 * REQ_FUA means that the data must be on non-volatile media on request
17 * completion. 17 * completion.
@@ -20,11 +20,11 @@
20 * difference. The requests are either completed immediately if there's no 20 * difference. The requests are either completed immediately if there's no
21 * data or executed as normal requests otherwise. 21 * data or executed as normal requests otherwise.
22 * 22 *
23 * If the device has writeback cache and supports FUA, REQ_FLUSH is 23 * If the device has writeback cache and supports FUA, REQ_PREFLUSH is
24 * translated to PREFLUSH but REQ_FUA is passed down directly with DATA. 24 * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
25 * 25 *
26 * If the device has writeback cache and doesn't support FUA, REQ_FLUSH is 26 * If the device has writeback cache and doesn't support FUA, REQ_PREFLUSH
27 * translated to PREFLUSH and REQ_FUA to POSTFLUSH. 27 * is translated to PREFLUSH and REQ_FUA to POSTFLUSH.
28 * 28 *
29 * The actual execution of flush is double buffered. Whenever a request 29 * The actual execution of flush is double buffered. Whenever a request
30 * needs to execute PRE or POSTFLUSH, it queues at 30 * needs to execute PRE or POSTFLUSH, it queues at
@@ -103,7 +103,7 @@ static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
103 policy |= REQ_FSEQ_DATA; 103 policy |= REQ_FSEQ_DATA;
104 104
105 if (fflags & (1UL << QUEUE_FLAG_WC)) { 105 if (fflags & (1UL << QUEUE_FLAG_WC)) {
106 if (rq->cmd_flags & REQ_FLUSH) 106 if (rq->cmd_flags & REQ_PREFLUSH)
107 policy |= REQ_FSEQ_PREFLUSH; 107 policy |= REQ_FSEQ_PREFLUSH;
108 if (!(fflags & (1UL << QUEUE_FLAG_FUA)) && 108 if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
109 (rq->cmd_flags & REQ_FUA)) 109 (rq->cmd_flags & REQ_FUA))
@@ -391,9 +391,9 @@ void blk_insert_flush(struct request *rq)
391 391
392 /* 392 /*
393 * @policy now records what operations need to be done. Adjust 393 * @policy now records what operations need to be done. Adjust
394 * REQ_FLUSH and FUA for the driver. 394 * REQ_PREFLUSH and FUA for the driver.
395 */ 395 */
396 rq->cmd_flags &= ~REQ_FLUSH; 396 rq->cmd_flags &= ~REQ_PREFLUSH;
397 if (!(fflags & (1UL << QUEUE_FLAG_FUA))) 397 if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
398 rq->cmd_flags &= ~REQ_FUA; 398 rq->cmd_flags &= ~REQ_FUA;
399 399
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 29bcd9c07a34..13f460368759 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1247,7 +1247,7 @@ static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie)
1247static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) 1247static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1248{ 1248{
1249 const int is_sync = rw_is_sync(bio_op(bio), bio->bi_rw); 1249 const int is_sync = rw_is_sync(bio_op(bio), bio->bi_rw);
1250 const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA); 1250 const int is_flush_fua = bio->bi_rw & (REQ_PREFLUSH | REQ_FUA);
1251 struct blk_map_ctx data; 1251 struct blk_map_ctx data;
1252 struct request *rq; 1252 struct request *rq;
1253 unsigned int request_count = 0; 1253 unsigned int request_count = 0;
@@ -1344,7 +1344,7 @@ done:
1344static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) 1344static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
1345{ 1345{
1346 const int is_sync = rw_is_sync(bio_op(bio), bio->bi_rw); 1346 const int is_sync = rw_is_sync(bio_op(bio), bio->bi_rw);
1347 const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA); 1347 const int is_flush_fua = bio->bi_rw & (REQ_PREFLUSH | REQ_FUA);
1348 struct blk_plug *plug; 1348 struct blk_plug *plug;
1349 unsigned int request_count = 0; 1349 unsigned int request_count = 0;
1350 struct blk_map_ctx data; 1350 struct blk_map_ctx data;
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index f236a31cc095..d524973f94b3 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -148,7 +148,7 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
148 device->md_io.error = -ENODEV; 148 device->md_io.error = -ENODEV;
149 149
150 if ((op == REQ_OP_WRITE) && !test_bit(MD_NO_FUA, &device->flags)) 150 if ((op == REQ_OP_WRITE) && !test_bit(MD_NO_FUA, &device->flags))
151 op_flags |= REQ_FUA | REQ_FLUSH; 151 op_flags |= REQ_FUA | REQ_PREFLUSH;
152 op_flags |= REQ_SYNC | REQ_NOIDLE; 152 op_flags |= REQ_SYNC | REQ_NOIDLE;
153 153
154 bio = bio_alloc_drbd(GFP_NOIO); 154 bio = bio_alloc_drbd(GFP_NOIO);
@@ -847,7 +847,7 @@ int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size,
847 unsigned long count = 0; 847 unsigned long count = 0;
848 sector_t esector, nr_sectors; 848 sector_t esector, nr_sectors;
849 849
850 /* This would be an empty REQ_FLUSH, be silent. */ 850 /* This would be an empty REQ_PREFLUSH, be silent. */
851 if ((mode == SET_OUT_OF_SYNC) && size == 0) 851 if ((mode == SET_OUT_OF_SYNC) && size == 0)
852 return 0; 852 return 0;
853 853
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index d55febcaa414..2b37744db0fa 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1609,7 +1609,7 @@ static u32 bio_flags_to_wire(struct drbd_connection *connection,
1609 if (connection->agreed_pro_version >= 95) 1609 if (connection->agreed_pro_version >= 95)
1610 return (bio->bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) | 1610 return (bio->bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
1611 (bio->bi_rw & REQ_FUA ? DP_FUA : 0) | 1611 (bio->bi_rw & REQ_FUA ? DP_FUA : 0) |
1612 (bio->bi_rw & REQ_FLUSH ? DP_FLUSH : 0) | 1612 (bio->bi_rw & REQ_PREFLUSH ? DP_FLUSH : 0) |
1613 (bio_op(bio) == REQ_OP_DISCARD ? DP_DISCARD : 0); 1613 (bio_op(bio) == REQ_OP_DISCARD ? DP_DISCARD : 0);
1614 else 1614 else
1615 return bio->bi_rw & REQ_SYNC ? DP_RW_SYNC : 0; 1615 return bio->bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
diff --git a/drivers/block/drbd/drbd_protocol.h b/drivers/block/drbd/drbd_protocol.h
index ef9245363dcc..129f8c76c9b1 100644
--- a/drivers/block/drbd/drbd_protocol.h
+++ b/drivers/block/drbd/drbd_protocol.h
@@ -112,7 +112,7 @@ struct p_header100 {
112#define DP_MAY_SET_IN_SYNC 4 112#define DP_MAY_SET_IN_SYNC 4
113#define DP_UNPLUG 8 /* not used anymore */ 113#define DP_UNPLUG 8 /* not used anymore */
114#define DP_FUA 16 /* equals REQ_FUA */ 114#define DP_FUA 16 /* equals REQ_FUA */
115#define DP_FLUSH 32 /* equals REQ_FLUSH */ 115#define DP_FLUSH 32 /* equals REQ_PREFLUSH */
116#define DP_DISCARD 64 /* equals REQ_DISCARD */ 116#define DP_DISCARD 64 /* equals REQ_DISCARD */
117#define DP_SEND_RECEIVE_ACK 128 /* This is a proto B write request */ 117#define DP_SEND_RECEIVE_ACK 128 /* This is a proto B write request */
118#define DP_SEND_WRITE_ACK 256 /* This is a proto C write request */ 118#define DP_SEND_WRITE_ACK 256 /* This is a proto C write request */
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 6c5997894475..1ee002352ea2 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -2158,7 +2158,7 @@ static unsigned long wire_flags_to_bio_flags(u32 dpf)
2158{ 2158{
2159 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) | 2159 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
2160 (dpf & DP_FUA ? REQ_FUA : 0) | 2160 (dpf & DP_FUA ? REQ_FUA : 0) |
2161 (dpf & DP_FLUSH ? REQ_FLUSH : 0); 2161 (dpf & DP_FLUSH ? REQ_PREFLUSH : 0);
2162} 2162}
2163 2163
2164static unsigned long wire_flags_to_bio_op(u32 dpf) 2164static unsigned long wire_flags_to_bio_op(u32 dpf)
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 2255dcfebd2b..eef6e9575b4e 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -1132,7 +1132,7 @@ static int drbd_process_write_request(struct drbd_request *req)
1132 * replicating, in which case there is no point. */ 1132 * replicating, in which case there is no point. */
1133 if (unlikely(req->i.size == 0)) { 1133 if (unlikely(req->i.size == 0)) {
1134 /* The only size==0 bios we expect are empty flushes. */ 1134 /* The only size==0 bios we expect are empty flushes. */
1135 D_ASSERT(device, req->master_bio->bi_rw & REQ_FLUSH); 1135 D_ASSERT(device, req->master_bio->bi_rw & REQ_PREFLUSH);
1136 if (remote) 1136 if (remote)
1137 _req_mod(req, QUEUE_AS_DRBD_BARRIER); 1137 _req_mod(req, QUEUE_AS_DRBD_BARRIER);
1138 return remote; 1138 return remote;
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index a3c3b309ff4a..6925023e12d4 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -631,7 +631,7 @@ static void journal_write_unlocked(struct closure *cl)
631 bio->bi_end_io = journal_write_endio; 631 bio->bi_end_io = journal_write_endio;
632 bio->bi_private = w; 632 bio->bi_private = w;
633 bio_set_op_attrs(bio, REQ_OP_WRITE, 633 bio_set_op_attrs(bio, REQ_OP_WRITE,
634 REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA); 634 REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA);
635 bch_bio_map(bio, w->data); 635 bch_bio_map(bio, w->data);
636 636
637 trace_bcache_journal_write(bio); 637 trace_bcache_journal_write(bio);
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 016b0aa7199c..69f16f43f8ab 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -205,10 +205,10 @@ static void bch_data_insert_start(struct closure *cl)
205 return bch_data_invalidate(cl); 205 return bch_data_invalidate(cl);
206 206
207 /* 207 /*
208 * Journal writes are marked REQ_FLUSH; if the original write was a 208 * Journal writes are marked REQ_PREFLUSH; if the original write was a
209 * flush, it'll wait on the journal write. 209 * flush, it'll wait on the journal write.
210 */ 210 */
211 bio->bi_rw &= ~(REQ_FLUSH|REQ_FUA); 211 bio->bi_rw &= ~(REQ_PREFLUSH|REQ_FUA);
212 212
213 do { 213 do {
214 unsigned i; 214 unsigned i;
@@ -668,7 +668,7 @@ static inline struct search *search_alloc(struct bio *bio,
668 s->iop.write_prio = 0; 668 s->iop.write_prio = 0;
669 s->iop.error = 0; 669 s->iop.error = 0;
670 s->iop.flags = 0; 670 s->iop.flags = 0;
671 s->iop.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0; 671 s->iop.flush_journal = (bio->bi_rw & (REQ_PREFLUSH|REQ_FUA)) != 0;
672 s->iop.wq = bcache_wq; 672 s->iop.wq = bcache_wq;
673 673
674 return s; 674 return s;
@@ -920,7 +920,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
920 bch_writeback_add(dc); 920 bch_writeback_add(dc);
921 s->iop.bio = bio; 921 s->iop.bio = bio;
922 922
923 if (bio->bi_rw & REQ_FLUSH) { 923 if (bio->bi_rw & REQ_PREFLUSH) {
924 /* Also need to send a flush to the backing device */ 924 /* Also need to send a flush to the backing device */
925 struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0, 925 struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
926 dc->disk.bio_split); 926 dc->disk.bio_split);
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 540e80eb317d..718744db62df 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -788,7 +788,7 @@ static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
788 788
789 spin_lock_irqsave(&cache->lock, flags); 789 spin_lock_irqsave(&cache->lock, flags);
790 if (cache->need_tick_bio && 790 if (cache->need_tick_bio &&
791 !(bio->bi_rw & (REQ_FUA | REQ_FLUSH)) && 791 !(bio->bi_rw & (REQ_FUA | REQ_PREFLUSH)) &&
792 bio_op(bio) != REQ_OP_DISCARD) { 792 bio_op(bio) != REQ_OP_DISCARD) {
793 pb->tick = true; 793 pb->tick = true;
794 cache->need_tick_bio = false; 794 cache->need_tick_bio = false;
@@ -830,7 +830,7 @@ static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
830 830
831static int bio_triggers_commit(struct cache *cache, struct bio *bio) 831static int bio_triggers_commit(struct cache *cache, struct bio *bio)
832{ 832{
833 return bio->bi_rw & (REQ_FLUSH | REQ_FUA); 833 return bio->bi_rw & (REQ_PREFLUSH | REQ_FUA);
834} 834}
835 835
836/* 836/*
@@ -1069,7 +1069,7 @@ static void dec_io_migrations(struct cache *cache)
1069static bool discard_or_flush(struct bio *bio) 1069static bool discard_or_flush(struct bio *bio)
1070{ 1070{
1071 return bio_op(bio) == REQ_OP_DISCARD || 1071 return bio_op(bio) == REQ_OP_DISCARD ||
1072 bio->bi_rw & (REQ_FLUSH | REQ_FUA); 1072 bio->bi_rw & (REQ_PREFLUSH | REQ_FUA);
1073} 1073}
1074 1074
1075static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell) 1075static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell)
@@ -1614,8 +1614,8 @@ static void process_flush_bio(struct cache *cache, struct bio *bio)
1614 remap_to_cache(cache, bio, 0); 1614 remap_to_cache(cache, bio, 0);
1615 1615
1616 /* 1616 /*
1617 * REQ_FLUSH is not directed at any particular block so we don't 1617 * REQ_PREFLUSH is not directed at any particular block so we don't
1618 * need to inc_ds(). REQ_FUA's are split into a write + REQ_FLUSH 1618 * need to inc_ds(). REQ_FUA's are split into a write + REQ_PREFLUSH
1619 * by dm-core. 1619 * by dm-core.
1620 */ 1620 */
1621 issue(cache, bio); 1621 issue(cache, bio);
@@ -1980,7 +1980,7 @@ static void process_deferred_bios(struct cache *cache)
1980 1980
1981 bio = bio_list_pop(&bios); 1981 bio = bio_list_pop(&bios);
1982 1982
1983 if (bio->bi_rw & REQ_FLUSH) 1983 if (bio->bi_rw & REQ_PREFLUSH)
1984 process_flush_bio(cache, bio); 1984 process_flush_bio(cache, bio);
1985 else if (bio_op(bio) == REQ_OP_DISCARD) 1985 else if (bio_op(bio) == REQ_OP_DISCARD)
1986 process_discard_bio(cache, &structs, bio); 1986 process_discard_bio(cache, &structs, bio);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 057d19b5e196..96dd5d7e454a 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1911,11 +1911,12 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
1911 struct crypt_config *cc = ti->private; 1911 struct crypt_config *cc = ti->private;
1912 1912
1913 /* 1913 /*
1914 * If bio is REQ_FLUSH or REQ_OP_DISCARD, just bypass crypt queues. 1914 * If bio is REQ_PREFLUSH or REQ_OP_DISCARD, just bypass crypt queues.
1915 * - for REQ_FLUSH device-mapper core ensures that no IO is in-flight 1915 * - for REQ_PREFLUSH device-mapper core ensures that no IO is in-flight
1916 * - for REQ_OP_DISCARD caller must use flush if IO ordering matters 1916 * - for REQ_OP_DISCARD caller must use flush if IO ordering matters
1917 */ 1917 */
1918 if (unlikely(bio->bi_rw & REQ_FLUSH || bio_op(bio) == REQ_OP_DISCARD)) { 1918 if (unlikely(bio->bi_rw & REQ_PREFLUSH ||
1919 bio_op(bio) == REQ_OP_DISCARD)) {
1919 bio->bi_bdev = cc->dev->bdev; 1920 bio->bi_bdev = cc->dev->bdev;
1920 if (bio_sectors(bio)) 1921 if (bio_sectors(bio))
1921 bio->bi_iter.bi_sector = cc->start + 1922 bio->bi_iter.bi_sector = cc->start +
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
index 665bf3285618..2faf49d8f4d7 100644
--- a/drivers/md/dm-era-target.c
+++ b/drivers/md/dm-era-target.c
@@ -1540,9 +1540,9 @@ static int era_map(struct dm_target *ti, struct bio *bio)
1540 remap_to_origin(era, bio); 1540 remap_to_origin(era, bio);
1541 1541
1542 /* 1542 /*
1543 * REQ_FLUSH bios carry no data, so we're not interested in them. 1543 * REQ_PREFLUSH bios carry no data, so we're not interested in them.
1544 */ 1544 */
1545 if (!(bio->bi_rw & REQ_FLUSH) && 1545 if (!(bio->bi_rw & REQ_PREFLUSH) &&
1546 (bio_data_dir(bio) == WRITE) && 1546 (bio_data_dir(bio) == WRITE) &&
1547 !metadata_current_marked(era->md, block)) { 1547 !metadata_current_marked(era->md, block)) {
1548 defer_bio(era, bio); 1548 defer_bio(era, bio);
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 22e0597d631e..0e225fd4a8d1 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -380,7 +380,7 @@ static void dispatch_io(int op, int op_flags, unsigned int num_regions,
380 */ 380 */
381 for (i = 0; i < num_regions; i++) { 381 for (i = 0; i < num_regions; i++) {
382 *dp = old_pages; 382 *dp = old_pages;
383 if (where[i].count || (op_flags & REQ_FLUSH)) 383 if (where[i].count || (op_flags & REQ_PREFLUSH))
384 do_region(op, op_flags, i, where + i, dp, io); 384 do_region(op, op_flags, i, where + i, dp, io);
385 } 385 }
386 386
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 0edb8ea51e46..b5dbf7a0515e 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -555,7 +555,7 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio)
555 struct bio_vec bv; 555 struct bio_vec bv;
556 size_t alloc_size; 556 size_t alloc_size;
557 int i = 0; 557 int i = 0;
558 bool flush_bio = (bio->bi_rw & REQ_FLUSH); 558 bool flush_bio = (bio->bi_rw & REQ_PREFLUSH);
559 bool fua_bio = (bio->bi_rw & REQ_FUA); 559 bool fua_bio = (bio->bi_rw & REQ_FUA);
560 bool discard_bio = (bio_op(bio) == REQ_OP_DISCARD); 560 bool discard_bio = (bio_op(bio) == REQ_OP_DISCARD);
561 561
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 907df2ba8fd4..9f5f460c0e92 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -704,7 +704,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
704 bio_list_init(&requeue); 704 bio_list_init(&requeue);
705 705
706 while ((bio = bio_list_pop(writes))) { 706 while ((bio = bio_list_pop(writes))) {
707 if ((bio->bi_rw & REQ_FLUSH) || 707 if ((bio->bi_rw & REQ_PREFLUSH) ||
708 (bio_op(bio) == REQ_OP_DISCARD)) { 708 (bio_op(bio) == REQ_OP_DISCARD)) {
709 bio_list_add(&sync, bio); 709 bio_list_add(&sync, bio);
710 continue; 710 continue;
@@ -1253,7 +1253,8 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
1253 * We need to dec pending if this was a write. 1253 * We need to dec pending if this was a write.
1254 */ 1254 */
1255 if (rw == WRITE) { 1255 if (rw == WRITE) {
1256 if (!(bio->bi_rw & REQ_FLUSH) && bio_op(bio) != REQ_OP_DISCARD) 1256 if (!(bio->bi_rw & REQ_PREFLUSH) &&
1257 bio_op(bio) != REQ_OP_DISCARD)
1257 dm_rh_dec(ms->rh, bio_record->write_region); 1258 dm_rh_dec(ms->rh, bio_record->write_region);
1258 return error; 1259 return error;
1259 } 1260 }
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index 3550ca7c6577..b11813431f31 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -398,7 +398,7 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio)
398 region_t region = dm_rh_bio_to_region(rh, bio); 398 region_t region = dm_rh_bio_to_region(rh, bio);
399 int recovering = 0; 399 int recovering = 0;
400 400
401 if (bio->bi_rw & REQ_FLUSH) { 401 if (bio->bi_rw & REQ_PREFLUSH) {
402 rh->flush_failure = 1; 402 rh->flush_failure = 1;
403 return; 403 return;
404 } 404 }
@@ -526,7 +526,7 @@ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
526 struct bio *bio; 526 struct bio *bio;
527 527
528 for (bio = bios->head; bio; bio = bio->bi_next) { 528 for (bio = bios->head; bio; bio = bio->bi_next) {
529 if (bio->bi_rw & REQ_FLUSH || bio_op(bio) == REQ_OP_DISCARD) 529 if (bio->bi_rw & REQ_PREFLUSH || bio_op(bio) == REQ_OP_DISCARD)
530 continue; 530 continue;
531 rh_inc(rh, dm_rh_bio_to_region(rh, bio)); 531 rh_inc(rh, dm_rh_bio_to_region(rh, bio));
532 } 532 }
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 70bb0e8b62ce..69ab1ff5f5c9 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1680,7 +1680,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
1680 1680
1681 init_tracked_chunk(bio); 1681 init_tracked_chunk(bio);
1682 1682
1683 if (bio->bi_rw & REQ_FLUSH) { 1683 if (bio->bi_rw & REQ_PREFLUSH) {
1684 bio->bi_bdev = s->cow->bdev; 1684 bio->bi_bdev = s->cow->bdev;
1685 return DM_MAPIO_REMAPPED; 1685 return DM_MAPIO_REMAPPED;
1686 } 1686 }
@@ -1799,7 +1799,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
1799 1799
1800 init_tracked_chunk(bio); 1800 init_tracked_chunk(bio);
1801 1801
1802 if (bio->bi_rw & REQ_FLUSH) { 1802 if (bio->bi_rw & REQ_PREFLUSH) {
1803 if (!dm_bio_get_target_bio_nr(bio)) 1803 if (!dm_bio_get_target_bio_nr(bio))
1804 bio->bi_bdev = s->origin->bdev; 1804 bio->bi_bdev = s->origin->bdev;
1805 else 1805 else
@@ -2285,7 +2285,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio)
2285 2285
2286 bio->bi_bdev = o->dev->bdev; 2286 bio->bi_bdev = o->dev->bdev;
2287 2287
2288 if (unlikely(bio->bi_rw & REQ_FLUSH)) 2288 if (unlikely(bio->bi_rw & REQ_PREFLUSH))
2289 return DM_MAPIO_REMAPPED; 2289 return DM_MAPIO_REMAPPED;
2290 2290
2291 if (bio_rw(bio) != WRITE) 2291 if (bio_rw(bio) != WRITE)
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index b738178ca068..48f1c01d7b9f 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -286,7 +286,7 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
286 uint32_t stripe; 286 uint32_t stripe;
287 unsigned target_bio_nr; 287 unsigned target_bio_nr;
288 288
289 if (bio->bi_rw & REQ_FLUSH) { 289 if (bio->bi_rw & REQ_PREFLUSH) {
290 target_bio_nr = dm_bio_get_target_bio_nr(bio); 290 target_bio_nr = dm_bio_get_target_bio_nr(bio);
291 BUG_ON(target_bio_nr >= sc->stripes); 291 BUG_ON(target_bio_nr >= sc->stripes);
292 bio->bi_bdev = sc->stripe[target_bio_nr].dev->bdev; 292 bio->bi_bdev = sc->stripe[target_bio_nr].dev->bdev;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 1b684cbb9ba2..5f9e3d799d66 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -697,7 +697,7 @@ static void remap_to_origin(struct thin_c *tc, struct bio *bio)
697 697
698static int bio_triggers_commit(struct thin_c *tc, struct bio *bio) 698static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
699{ 699{
700 return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && 700 return (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) &&
701 dm_thin_changed_this_transaction(tc->td); 701 dm_thin_changed_this_transaction(tc->td);
702} 702}
703 703
@@ -868,7 +868,7 @@ static void __inc_remap_and_issue_cell(void *context,
868 struct bio *bio; 868 struct bio *bio;
869 869
870 while ((bio = bio_list_pop(&cell->bios))) { 870 while ((bio = bio_list_pop(&cell->bios))) {
871 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA) || 871 if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) ||
872 bio_op(bio) == REQ_OP_DISCARD) 872 bio_op(bio) == REQ_OP_DISCARD)
873 bio_list_add(&info->defer_bios, bio); 873 bio_list_add(&info->defer_bios, bio);
874 else { 874 else {
@@ -1641,7 +1641,7 @@ static void __remap_and_issue_shared_cell(void *context,
1641 1641
1642 while ((bio = bio_list_pop(&cell->bios))) { 1642 while ((bio = bio_list_pop(&cell->bios))) {
1643 if ((bio_data_dir(bio) == WRITE) || 1643 if ((bio_data_dir(bio) == WRITE) ||
1644 (bio->bi_rw & (REQ_FLUSH | REQ_FUA) || 1644 (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) ||
1645 bio_op(bio) == REQ_OP_DISCARD)) 1645 bio_op(bio) == REQ_OP_DISCARD))
1646 bio_list_add(&info->defer_bios, bio); 1646 bio_list_add(&info->defer_bios, bio);
1647 else { 1647 else {
@@ -2556,7 +2556,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
2556 return DM_MAPIO_SUBMITTED; 2556 return DM_MAPIO_SUBMITTED;
2557 } 2557 }
2558 2558
2559 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA) || 2559 if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) ||
2560 bio_op(bio) == REQ_OP_DISCARD) { 2560 bio_op(bio) == REQ_OP_DISCARD) {
2561 thin_defer_bio_with_throttle(tc, bio); 2561 thin_defer_bio_with_throttle(tc, bio);
2562 return DM_MAPIO_SUBMITTED; 2562 return DM_MAPIO_SUBMITTED;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index fcc68c8edba0..aba7ed9abb3a 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1003,12 +1003,12 @@ static void dec_pending(struct dm_io *io, int error)
1003 if (io_error == DM_ENDIO_REQUEUE) 1003 if (io_error == DM_ENDIO_REQUEUE)
1004 return; 1004 return;
1005 1005
1006 if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) { 1006 if ((bio->bi_rw & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
1007 /* 1007 /*
1008 * Preflush done for flush with data, reissue 1008 * Preflush done for flush with data, reissue
1009 * without REQ_FLUSH. 1009 * without REQ_PREFLUSH.
1010 */ 1010 */
1011 bio->bi_rw &= ~REQ_FLUSH; 1011 bio->bi_rw &= ~REQ_PREFLUSH;
1012 queue_io(md, bio); 1012 queue_io(md, bio);
1013 } else { 1013 } else {
1014 /* done with normal IO or empty flush */ 1014 /* done with normal IO or empty flush */
@@ -1477,7 +1477,7 @@ EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
1477 1477
1478/* 1478/*
1479 * A target may call dm_accept_partial_bio only from the map routine. It is 1479 * A target may call dm_accept_partial_bio only from the map routine. It is
1480 * allowed for all bio types except REQ_FLUSH. 1480 * allowed for all bio types except REQ_PREFLUSH.
1481 * 1481 *
1482 * dm_accept_partial_bio informs the dm that the target only wants to process 1482 * dm_accept_partial_bio informs the dm that the target only wants to process
1483 * additional n_sectors sectors of the bio and the rest of the data should be 1483 * additional n_sectors sectors of the bio and the rest of the data should be
@@ -1507,7 +1507,7 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
1507{ 1507{
1508 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 1508 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
1509 unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; 1509 unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
1510 BUG_ON(bio->bi_rw & REQ_FLUSH); 1510 BUG_ON(bio->bi_rw & REQ_PREFLUSH);
1511 BUG_ON(bi_size > *tio->len_ptr); 1511 BUG_ON(bi_size > *tio->len_ptr);
1512 BUG_ON(n_sectors > bi_size); 1512 BUG_ON(n_sectors > bi_size);
1513 *tio->len_ptr -= bi_size - n_sectors; 1513 *tio->len_ptr -= bi_size - n_sectors;
@@ -1795,7 +1795,7 @@ static void __split_and_process_bio(struct mapped_device *md,
1795 1795
1796 start_io_acct(ci.io); 1796 start_io_acct(ci.io);
1797 1797
1798 if (bio->bi_rw & REQ_FLUSH) { 1798 if (bio->bi_rw & REQ_PREFLUSH) {
1799 ci.bio = &ci.md->flush_bio; 1799 ci.bio = &ci.md->flush_bio;
1800 ci.sector_count = 0; 1800 ci.sector_count = 0;
1801 error = __send_empty_flush(&ci); 1801 error = __send_empty_flush(&ci);
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 1ad3f485672c..70ff888d25d0 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -221,7 +221,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
221 struct bio *split; 221 struct bio *split;
222 sector_t start_sector, end_sector, data_offset; 222 sector_t start_sector, end_sector, data_offset;
223 223
224 if (unlikely(bio->bi_rw & REQ_FLUSH)) { 224 if (unlikely(bio->bi_rw & REQ_PREFLUSH)) {
225 md_flush_request(mddev, bio); 225 md_flush_request(mddev, bio);
226 return; 226 return;
227 } 227 }
diff --git a/drivers/md/md.c b/drivers/md/md.c
index bd4844fe0e98..1f123f5a29da 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -414,7 +414,7 @@ static void md_submit_flush_data(struct work_struct *ws)
414 /* an empty barrier - all done */ 414 /* an empty barrier - all done */
415 bio_endio(bio); 415 bio_endio(bio);
416 else { 416 else {
417 bio->bi_rw &= ~REQ_FLUSH; 417 bio->bi_rw &= ~REQ_PREFLUSH;
418 mddev->pers->make_request(mddev, bio); 418 mddev->pers->make_request(mddev, bio);
419 } 419 }
420 420
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 2e0918fc376d..b4f335245bd6 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -424,7 +424,7 @@ struct mddev {
424 424
425 /* Generic flush handling. 425 /* Generic flush handling.
426 * The last to finish preflush schedules a worker to submit 426 * The last to finish preflush schedules a worker to submit
427 * the rest of the request (without the REQ_FLUSH flag). 427 * the rest of the request (without the REQ_PREFLUSH flag).
428 */ 428 */
429 struct bio *flush_bio; 429 struct bio *flush_bio;
430 atomic_t flush_pending; 430 atomic_t flush_pending;
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index dd483bb2e111..72ea98e89e57 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -111,7 +111,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
111 struct multipath_bh * mp_bh; 111 struct multipath_bh * mp_bh;
112 struct multipath_info *multipath; 112 struct multipath_info *multipath;
113 113
114 if (unlikely(bio->bi_rw & REQ_FLUSH)) { 114 if (unlikely(bio->bi_rw & REQ_PREFLUSH)) {
115 md_flush_request(mddev, bio); 115 md_flush_request(mddev, bio);
116 return; 116 return;
117 } 117 }
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 051a10ca4e09..c3d439083212 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -458,7 +458,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
458 struct md_rdev *tmp_dev; 458 struct md_rdev *tmp_dev;
459 struct bio *split; 459 struct bio *split;
460 460
461 if (unlikely(bio->bi_rw & REQ_FLUSH)) { 461 if (unlikely(bio->bi_rw & REQ_PREFLUSH)) {
462 md_flush_request(mddev, bio); 462 md_flush_request(mddev, bio);
463 return; 463 return;
464 } 464 }
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a3427230f7cf..10e53cd6a995 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1056,7 +1056,8 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)
1056 const int op = bio_op(bio); 1056 const int op = bio_op(bio);
1057 const int rw = bio_data_dir(bio); 1057 const int rw = bio_data_dir(bio);
1058 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); 1058 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
1059 const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); 1059 const unsigned long do_flush_fua = (bio->bi_rw &
1060 (REQ_PREFLUSH | REQ_FUA));
1060 const unsigned long do_sec = (bio->bi_rw & REQ_SECURE); 1061 const unsigned long do_sec = (bio->bi_rw & REQ_SECURE);
1061 struct md_rdev *blocked_rdev; 1062 struct md_rdev *blocked_rdev;
1062 struct blk_plug_cb *cb; 1063 struct blk_plug_cb *cb;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 615045a11bac..245640b50153 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1447,7 +1447,7 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio)
1447 1447
1448 struct bio *split; 1448 struct bio *split;
1449 1449
1450 if (unlikely(bio->bi_rw & REQ_FLUSH)) { 1450 if (unlikely(bio->bi_rw & REQ_PREFLUSH)) {
1451 md_flush_request(mddev, bio); 1451 md_flush_request(mddev, bio);
1452 return; 1452 return;
1453 } 1453 }
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 92651381b094..5504ce2bac06 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -536,7 +536,7 @@ int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
536 bio_endio(bio); 536 bio_endio(bio);
537 return 0; 537 return 0;
538 } 538 }
539 bio->bi_rw &= ~REQ_FLUSH; 539 bio->bi_rw &= ~REQ_PREFLUSH;
540 return -EAGAIN; 540 return -EAGAIN;
541} 541}
542 542
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index b9122e2c6aa1..7aacf5b55e15 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5150,7 +5150,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
5150 DEFINE_WAIT(w); 5150 DEFINE_WAIT(w);
5151 bool do_prepare; 5151 bool do_prepare;
5152 5152
5153 if (unlikely(bi->bi_rw & REQ_FLUSH)) { 5153 if (unlikely(bi->bi_rw & REQ_PREFLUSH)) {
5154 int ret = r5l_handle_flush_request(conf->log, bi); 5154 int ret = r5l_handle_flush_request(conf->log, bi);
5155 5155
5156 if (ret == 0) 5156 if (ret == 0)
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index da944ffddbaf..ca70bc78b27d 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -2207,7 +2207,7 @@ static void btrfsic_bio_end_io(struct bio *bp)
2207 block->dev_bytenr, block->mirror_num); 2207 block->dev_bytenr, block->mirror_num);
2208 next_block = block->next_in_same_bio; 2208 next_block = block->next_in_same_bio;
2209 block->iodone_w_error = iodone_w_error; 2209 block->iodone_w_error = iodone_w_error;
2210 if (block->submit_bio_bh_rw & REQ_FLUSH) { 2210 if (block->submit_bio_bh_rw & REQ_PREFLUSH) {
2211 dev_state->last_flush_gen++; 2211 dev_state->last_flush_gen++;
2212 if ((dev_state->state->print_mask & 2212 if ((dev_state->state->print_mask &
2213 BTRFSIC_PRINT_MASK_END_IO_BIO_BH)) 2213 BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
@@ -2243,7 +2243,7 @@ static void btrfsic_bh_end_io(struct buffer_head *bh, int uptodate)
2243 block->dev_bytenr, block->mirror_num); 2243 block->dev_bytenr, block->mirror_num);
2244 2244
2245 block->iodone_w_error = iodone_w_error; 2245 block->iodone_w_error = iodone_w_error;
2246 if (block->submit_bio_bh_rw & REQ_FLUSH) { 2246 if (block->submit_bio_bh_rw & REQ_PREFLUSH) {
2247 dev_state->last_flush_gen++; 2247 dev_state->last_flush_gen++;
2248 if ((dev_state->state->print_mask & 2248 if ((dev_state->state->print_mask &
2249 BTRFSIC_PRINT_MASK_END_IO_BIO_BH)) 2249 BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
@@ -2884,7 +2884,7 @@ int btrfsic_submit_bh(int op, int op_flags, struct buffer_head *bh)
2884 btrfsic_process_written_block(dev_state, dev_bytenr, 2884 btrfsic_process_written_block(dev_state, dev_bytenr,
2885 &bh->b_data, 1, NULL, 2885 &bh->b_data, 1, NULL,
2886 NULL, bh, op_flags); 2886 NULL, bh, op_flags);
2887 } else if (NULL != dev_state && (op_flags & REQ_FLUSH)) { 2887 } else if (NULL != dev_state && (op_flags & REQ_PREFLUSH)) {
2888 if (dev_state->state->print_mask & 2888 if (dev_state->state->print_mask &
2889 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) 2889 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
2890 printk(KERN_INFO 2890 printk(KERN_INFO
@@ -2982,7 +2982,7 @@ static void __btrfsic_submit_bio(struct bio *bio)
2982 kunmap(bio->bi_io_vec[i].bv_page); 2982 kunmap(bio->bi_io_vec[i].bv_page);
2983 } 2983 }
2984 kfree(mapped_datav); 2984 kfree(mapped_datav);
2985 } else if (NULL != dev_state && (bio->bi_rw & REQ_FLUSH)) { 2985 } else if (NULL != dev_state && (bio->bi_rw & REQ_PREFLUSH)) {
2986 if (dev_state->state->print_mask & 2986 if (dev_state->state->print_mask &
2987 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) 2987 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
2988 printk(KERN_INFO 2988 printk(KERN_INFO
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 34bc99637d5a..dc68f562f681 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1354,7 +1354,7 @@ static int jbd2_write_superblock(journal_t *journal, int write_flags)
1354 1354
1355 trace_jbd2_write_superblock(journal, write_flags); 1355 trace_jbd2_write_superblock(journal, write_flags);
1356 if (!(journal->j_flags & JBD2_BARRIER)) 1356 if (!(journal->j_flags & JBD2_BARRIER))
1357 write_flags &= ~(REQ_FUA | REQ_FLUSH); 1357 write_flags &= ~(REQ_FUA | REQ_PREFLUSH);
1358 lock_buffer(bh); 1358 lock_buffer(bh);
1359 if (buffer_write_io_error(bh)) { 1359 if (buffer_write_io_error(bh)) {
1360 /* 1360 /*
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index d8acd3716dbd..686d8f160f5c 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1237,7 +1237,7 @@ _xfs_buf_ioapply(
1237 if (bp->b_flags & XBF_FUA) 1237 if (bp->b_flags & XBF_FUA)
1238 op_flags |= REQ_FUA; 1238 op_flags |= REQ_FUA;
1239 if (bp->b_flags & XBF_FLUSH) 1239 if (bp->b_flags & XBF_FLUSH)
1240 op_flags |= REQ_FLUSH; 1240 op_flags |= REQ_PREFLUSH;
1241 1241
1242 /* 1242 /*
1243 * Run the write verifier callback function if it exists. If 1243 * Run the write verifier callback function if it exists. If
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 32d87522f349..562ab8301217 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -168,7 +168,7 @@ enum rq_flag_bits {
168 __REQ_NOIDLE, /* don't anticipate more IO after this one */ 168 __REQ_NOIDLE, /* don't anticipate more IO after this one */
169 __REQ_INTEGRITY, /* I/O includes block integrity payload */ 169 __REQ_INTEGRITY, /* I/O includes block integrity payload */
170 __REQ_FUA, /* forced unit access */ 170 __REQ_FUA, /* forced unit access */
171 __REQ_FLUSH, /* request for cache flush */ 171 __REQ_PREFLUSH, /* request for cache flush */
172 172
173 /* bio only flags */ 173 /* bio only flags */
174 __REQ_RAHEAD, /* read ahead, can fail anytime */ 174 __REQ_RAHEAD, /* read ahead, can fail anytime */
@@ -212,12 +212,12 @@ enum rq_flag_bits {
212 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) 212 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
213#define REQ_COMMON_MASK \ 213#define REQ_COMMON_MASK \
214 (REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | REQ_NOIDLE | \ 214 (REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | REQ_NOIDLE | \
215 REQ_FLUSH | REQ_FUA | REQ_SECURE | REQ_INTEGRITY | REQ_NOMERGE) 215 REQ_PREFLUSH | REQ_FUA | REQ_SECURE | REQ_INTEGRITY | REQ_NOMERGE)
216#define REQ_CLONE_MASK REQ_COMMON_MASK 216#define REQ_CLONE_MASK REQ_COMMON_MASK
217 217
218/* This mask is used for both bio and request merge checking */ 218/* This mask is used for both bio and request merge checking */
219#define REQ_NOMERGE_FLAGS \ 219#define REQ_NOMERGE_FLAGS \
220 (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA | REQ_FLUSH_SEQ) 220 (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_PREFLUSH | REQ_FUA | REQ_FLUSH_SEQ)
221 221
222#define REQ_RAHEAD (1ULL << __REQ_RAHEAD) 222#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
223#define REQ_THROTTLED (1ULL << __REQ_THROTTLED) 223#define REQ_THROTTLED (1ULL << __REQ_THROTTLED)
@@ -235,7 +235,7 @@ enum rq_flag_bits {
235#define REQ_PREEMPT (1ULL << __REQ_PREEMPT) 235#define REQ_PREEMPT (1ULL << __REQ_PREEMPT)
236#define REQ_ALLOCED (1ULL << __REQ_ALLOCED) 236#define REQ_ALLOCED (1ULL << __REQ_ALLOCED)
237#define REQ_COPY_USER (1ULL << __REQ_COPY_USER) 237#define REQ_COPY_USER (1ULL << __REQ_COPY_USER)
238#define REQ_FLUSH (1ULL << __REQ_FLUSH) 238#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH)
239#define REQ_FLUSH_SEQ (1ULL << __REQ_FLUSH_SEQ) 239#define REQ_FLUSH_SEQ (1ULL << __REQ_FLUSH_SEQ)
240#define REQ_IO_STAT (1ULL << __REQ_IO_STAT) 240#define REQ_IO_STAT (1ULL << __REQ_IO_STAT)
241#define REQ_MIXED_MERGE (1ULL << __REQ_MIXED_MERGE) 241#define REQ_MIXED_MERGE (1ULL << __REQ_MIXED_MERGE)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index ccd166477487..183024525d40 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -204,9 +204,9 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
204#define READ_SYNC REQ_SYNC 204#define READ_SYNC REQ_SYNC
205#define WRITE_SYNC (REQ_SYNC | REQ_NOIDLE) 205#define WRITE_SYNC (REQ_SYNC | REQ_NOIDLE)
206#define WRITE_ODIRECT REQ_SYNC 206#define WRITE_ODIRECT REQ_SYNC
207#define WRITE_FLUSH (REQ_SYNC | REQ_NOIDLE | REQ_FLUSH) 207#define WRITE_FLUSH (REQ_SYNC | REQ_NOIDLE | REQ_PREFLUSH)
208#define WRITE_FUA (REQ_SYNC | REQ_NOIDLE | REQ_FUA) 208#define WRITE_FUA (REQ_SYNC | REQ_NOIDLE | REQ_FUA)
209#define WRITE_FLUSH_FUA (REQ_SYNC | REQ_NOIDLE | REQ_FLUSH | REQ_FUA) 209#define WRITE_FLUSH_FUA (REQ_SYNC | REQ_NOIDLE | REQ_PREFLUSH | REQ_FUA)
210 210
211/* 211/*
212 * Attribute flags. These should be or-ed together to figure out what 212 * Attribute flags. These should be or-ed together to figure out what
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 30efa44a473c..878963a1f058 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -33,7 +33,7 @@ TRACE_DEFINE_ENUM(SSR);
33TRACE_DEFINE_ENUM(__REQ_RAHEAD); 33TRACE_DEFINE_ENUM(__REQ_RAHEAD);
34TRACE_DEFINE_ENUM(__REQ_SYNC); 34TRACE_DEFINE_ENUM(__REQ_SYNC);
35TRACE_DEFINE_ENUM(__REQ_NOIDLE); 35TRACE_DEFINE_ENUM(__REQ_NOIDLE);
36TRACE_DEFINE_ENUM(__REQ_FLUSH); 36TRACE_DEFINE_ENUM(__REQ_PREFLUSH);
37TRACE_DEFINE_ENUM(__REQ_FUA); 37TRACE_DEFINE_ENUM(__REQ_FUA);
38TRACE_DEFINE_ENUM(__REQ_PRIO); 38TRACE_DEFINE_ENUM(__REQ_PRIO);
39TRACE_DEFINE_ENUM(__REQ_META); 39TRACE_DEFINE_ENUM(__REQ_META);
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 0c70fbb6ea8d..03b0dd98ff0e 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -189,6 +189,7 @@ static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
189 BLK_TC_ACT(BLK_TC_WRITE) }; 189 BLK_TC_ACT(BLK_TC_WRITE) };
190 190
191#define BLK_TC_RAHEAD BLK_TC_AHEAD 191#define BLK_TC_RAHEAD BLK_TC_AHEAD
192#define BLK_TC_PREFLUSH BLK_TC_FLUSH
192 193
193/* The ilog2() calls fall out because they're constant */ 194/* The ilog2() calls fall out because they're constant */
194#define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \ 195#define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \
@@ -219,7 +220,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
219 what |= MASK_TC_BIT(op_flags, SYNC); 220 what |= MASK_TC_BIT(op_flags, SYNC);
220 what |= MASK_TC_BIT(op_flags, RAHEAD); 221 what |= MASK_TC_BIT(op_flags, RAHEAD);
221 what |= MASK_TC_BIT(op_flags, META); 222 what |= MASK_TC_BIT(op_flags, META);
222 what |= MASK_TC_BIT(op_flags, FLUSH); 223 what |= MASK_TC_BIT(op_flags, PREFLUSH);
223 what |= MASK_TC_BIT(op_flags, FUA); 224 what |= MASK_TC_BIT(op_flags, FUA);
224 if (op == REQ_OP_DISCARD) 225 if (op == REQ_OP_DISCARD)
225 what |= BLK_TC_ACT(BLK_TC_DISCARD); 226 what |= BLK_TC_ACT(BLK_TC_DISCARD);
@@ -1779,7 +1780,7 @@ void blk_fill_rwbs(char *rwbs, int op, u32 rw, int bytes)
1779{ 1780{
1780 int i = 0; 1781 int i = 0;
1781 1782
1782 if (rw & REQ_FLUSH) 1783 if (rw & REQ_PREFLUSH)
1783 rwbs[i++] = 'F'; 1784 rwbs[i++] = 'F';
1784 1785
1785 switch (op) { 1786 switch (op) {