diff options
author | Mike Christie <mchristi@redhat.com> | 2016-06-05 15:32:25 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2016-06-07 15:41:38 -0400 |
commit | 28a8f0d317bf225ff15008f5dd66ae16242dd843 (patch) | |
tree | 4ed24aee241907a3612a61f8cc634acd10989c21 /block | |
parent | a418090aa88b9b531ac1f504d6bb8c0e9b04ccb7 (diff) |
block, drivers, fs: rename REQ_FLUSH to REQ_PREFLUSH
To avoid confusion between REQ_OP_FLUSH, which is handled by
request_fn drivers, and upper layers requesting the block layer
perform a flush sequence along with possibly a WRITE, this patch
renames REQ_FLUSH to REQ_PREFLUSH.
Signed-off-by: Mike Christie <mchristi@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 12 | ||||
-rw-r--r-- | block/blk-flush.c | 16 | ||||
-rw-r--r-- | block/blk-mq.c | 4 |
3 files changed, 16 insertions, 16 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index c7d66c23a708..32a283eb7274 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -1029,7 +1029,7 @@ static bool blk_rq_should_init_elevator(struct bio *bio) | |||
1029 | * Flush requests do not use the elevator so skip initialization. | 1029 | * Flush requests do not use the elevator so skip initialization. |
1030 | * This allows a request to share the flush and elevator data. | 1030 | * This allows a request to share the flush and elevator data. |
1031 | */ | 1031 | */ |
1032 | if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) | 1032 | if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) |
1033 | return false; | 1033 | return false; |
1034 | 1034 | ||
1035 | return true; | 1035 | return true; |
@@ -1736,7 +1736,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) | |||
1736 | return BLK_QC_T_NONE; | 1736 | return BLK_QC_T_NONE; |
1737 | } | 1737 | } |
1738 | 1738 | ||
1739 | if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) { | 1739 | if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) { |
1740 | spin_lock_irq(q->queue_lock); | 1740 | spin_lock_irq(q->queue_lock); |
1741 | where = ELEVATOR_INSERT_FLUSH; | 1741 | where = ELEVATOR_INSERT_FLUSH; |
1742 | goto get_rq; | 1742 | goto get_rq; |
@@ -1968,9 +1968,9 @@ generic_make_request_checks(struct bio *bio) | |||
1968 | * drivers without flush support don't have to worry | 1968 | * drivers without flush support don't have to worry |
1969 | * about them. | 1969 | * about them. |
1970 | */ | 1970 | */ |
1971 | if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && | 1971 | if ((bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) && |
1972 | !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) { | 1972 | !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) { |
1973 | bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA); | 1973 | bio->bi_rw &= ~(REQ_PREFLUSH | REQ_FUA); |
1974 | if (!nr_sectors) { | 1974 | if (!nr_sectors) { |
1975 | err = 0; | 1975 | err = 0; |
1976 | goto end_io; | 1976 | goto end_io; |
@@ -2217,7 +2217,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq) | |||
2217 | */ | 2217 | */ |
2218 | BUG_ON(blk_queued_rq(rq)); | 2218 | BUG_ON(blk_queued_rq(rq)); |
2219 | 2219 | ||
2220 | if (rq->cmd_flags & (REQ_FLUSH|REQ_FUA)) | 2220 | if (rq->cmd_flags & (REQ_PREFLUSH | REQ_FUA)) |
2221 | where = ELEVATOR_INSERT_FLUSH; | 2221 | where = ELEVATOR_INSERT_FLUSH; |
2222 | 2222 | ||
2223 | add_acct_request(q, rq, where); | 2223 | add_acct_request(q, rq, where); |
@@ -3311,7 +3311,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) | |||
3311 | /* | 3311 | /* |
3312 | * rq is already accounted, so use raw insert | 3312 | * rq is already accounted, so use raw insert |
3313 | */ | 3313 | */ |
3314 | if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) | 3314 | if (rq->cmd_flags & (REQ_PREFLUSH | REQ_FUA)) |
3315 | __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); | 3315 | __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); |
3316 | else | 3316 | else |
3317 | __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); | 3317 | __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); |
diff --git a/block/blk-flush.c b/block/blk-flush.c index 21f0d5b0d2ca..d308def812db 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c | |||
@@ -10,8 +10,8 @@ | |||
10 | * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request | 10 | * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request |
11 | * properties and hardware capability. | 11 | * properties and hardware capability. |
12 | * | 12 | * |
13 | * If a request doesn't have data, only REQ_FLUSH makes sense, which | 13 | * If a request doesn't have data, only REQ_PREFLUSH makes sense, which |
14 | * indicates a simple flush request. If there is data, REQ_FLUSH indicates | 14 | * indicates a simple flush request. If there is data, REQ_PREFLUSH indicates |
15 | * that the device cache should be flushed before the data is executed, and | 15 | * that the device cache should be flushed before the data is executed, and |
16 | * REQ_FUA means that the data must be on non-volatile media on request | 16 | * REQ_FUA means that the data must be on non-volatile media on request |
17 | * completion. | 17 | * completion. |
@@ -20,11 +20,11 @@ | |||
20 | * difference. The requests are either completed immediately if there's no | 20 | * difference. The requests are either completed immediately if there's no |
21 | * data or executed as normal requests otherwise. | 21 | * data or executed as normal requests otherwise. |
22 | * | 22 | * |
23 | * If the device has writeback cache and supports FUA, REQ_FLUSH is | 23 | * If the device has writeback cache and supports FUA, REQ_PREFLUSH is |
24 | * translated to PREFLUSH but REQ_FUA is passed down directly with DATA. | 24 | * translated to PREFLUSH but REQ_FUA is passed down directly with DATA. |
25 | * | 25 | * |
26 | * If the device has writeback cache and doesn't support FUA, REQ_FLUSH is | 26 | * If the device has writeback cache and doesn't support FUA, REQ_PREFLUSH |
27 | * translated to PREFLUSH and REQ_FUA to POSTFLUSH. | 27 | * is translated to PREFLUSH and REQ_FUA to POSTFLUSH. |
28 | * | 28 | * |
29 | * The actual execution of flush is double buffered. Whenever a request | 29 | * The actual execution of flush is double buffered. Whenever a request |
30 | * needs to execute PRE or POSTFLUSH, it queues at | 30 | * needs to execute PRE or POSTFLUSH, it queues at |
@@ -103,7 +103,7 @@ static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq) | |||
103 | policy |= REQ_FSEQ_DATA; | 103 | policy |= REQ_FSEQ_DATA; |
104 | 104 | ||
105 | if (fflags & (1UL << QUEUE_FLAG_WC)) { | 105 | if (fflags & (1UL << QUEUE_FLAG_WC)) { |
106 | if (rq->cmd_flags & REQ_FLUSH) | 106 | if (rq->cmd_flags & REQ_PREFLUSH) |
107 | policy |= REQ_FSEQ_PREFLUSH; | 107 | policy |= REQ_FSEQ_PREFLUSH; |
108 | if (!(fflags & (1UL << QUEUE_FLAG_FUA)) && | 108 | if (!(fflags & (1UL << QUEUE_FLAG_FUA)) && |
109 | (rq->cmd_flags & REQ_FUA)) | 109 | (rq->cmd_flags & REQ_FUA)) |
@@ -391,9 +391,9 @@ void blk_insert_flush(struct request *rq) | |||
391 | 391 | ||
392 | /* | 392 | /* |
393 | * @policy now records what operations need to be done. Adjust | 393 | * @policy now records what operations need to be done. Adjust |
394 | * REQ_FLUSH and FUA for the driver. | 394 | * REQ_PREFLUSH and FUA for the driver. |
395 | */ | 395 | */ |
396 | rq->cmd_flags &= ~REQ_FLUSH; | 396 | rq->cmd_flags &= ~REQ_PREFLUSH; |
397 | if (!(fflags & (1UL << QUEUE_FLAG_FUA))) | 397 | if (!(fflags & (1UL << QUEUE_FLAG_FUA))) |
398 | rq->cmd_flags &= ~REQ_FUA; | 398 | rq->cmd_flags &= ~REQ_FUA; |
399 | 399 | ||
diff --git a/block/blk-mq.c b/block/blk-mq.c index 29bcd9c07a34..13f460368759 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -1247,7 +1247,7 @@ static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie) | |||
1247 | static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) | 1247 | static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) |
1248 | { | 1248 | { |
1249 | const int is_sync = rw_is_sync(bio_op(bio), bio->bi_rw); | 1249 | const int is_sync = rw_is_sync(bio_op(bio), bio->bi_rw); |
1250 | const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA); | 1250 | const int is_flush_fua = bio->bi_rw & (REQ_PREFLUSH | REQ_FUA); |
1251 | struct blk_map_ctx data; | 1251 | struct blk_map_ctx data; |
1252 | struct request *rq; | 1252 | struct request *rq; |
1253 | unsigned int request_count = 0; | 1253 | unsigned int request_count = 0; |
@@ -1344,7 +1344,7 @@ done: | |||
1344 | static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) | 1344 | static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) |
1345 | { | 1345 | { |
1346 | const int is_sync = rw_is_sync(bio_op(bio), bio->bi_rw); | 1346 | const int is_sync = rw_is_sync(bio_op(bio), bio->bi_rw); |
1347 | const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA); | 1347 | const int is_flush_fua = bio->bi_rw & (REQ_PREFLUSH | REQ_FUA); |
1348 | struct blk_plug *plug; | 1348 | struct blk_plug *plug; |
1349 | unsigned int request_count = 0; | 1349 | unsigned int request_count = 0; |
1350 | struct blk_map_ctx data; | 1350 | struct blk_map_ctx data; |