diff options
author | Mike Christie <mchristi@redhat.com> | 2016-06-05 15:32:24 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2016-06-07 15:41:38 -0400 |
commit | a418090aa88b9b531ac1f504d6bb8c0e9b04ccb7 (patch) | |
tree | 716d67920a87f973f6ed3bd2acdefec9ef781fa8 | |
parent | 3a5e02ced11e22ecd9da3d6710afe15bcfee1d10 (diff) |
block: do not use REQ_FLUSH for tracking flush support
The last patch added a REQ_OP_FLUSH for request_fn drivers
and the next patch renames REQ_FLUSH to REQ_PREFLUSH which
will be used by file systems and make_request_fn drivers so
they can send a write/flush combo.
This patch drops xen's use of REQ_FLUSH to track if it supports
REQ_OP_FLUSH requests, so REQ_FLUSH can be deleted.
Signed-off-by: Mike Christie <mchristi@redhat.com>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Juergen Gross <kernel@pfupf.net>
Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r-- | drivers/block/xen-blkfront.c | 47 |
1 files changed, 24 insertions, 23 deletions
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 3aeb25bd5057..343ef7abe5fd 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -196,6 +196,7 @@ struct blkfront_info | |||
196 | unsigned int nr_ring_pages; | 196 | unsigned int nr_ring_pages; |
197 | struct request_queue *rq; | 197 | struct request_queue *rq; |
198 | unsigned int feature_flush; | 198 | unsigned int feature_flush; |
199 | unsigned int feature_fua; | ||
199 | unsigned int feature_discard:1; | 200 | unsigned int feature_discard:1; |
200 | unsigned int feature_secdiscard:1; | 201 | unsigned int feature_secdiscard:1; |
201 | unsigned int discard_granularity; | 202 | unsigned int discard_granularity; |
@@ -763,19 +764,14 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri | |||
763 | * implement it the same way. (It's also a FLUSH+FUA, | 764 | * implement it the same way. (It's also a FLUSH+FUA, |
764 | * since it is guaranteed ordered WRT previous writes.) | 765 | * since it is guaranteed ordered WRT previous writes.) |
765 | */ | 766 | */ |
766 | switch (info->feature_flush & | 767 | if (info->feature_flush && info->feature_fua) |
767 | ((REQ_FLUSH|REQ_FUA))) { | ||
768 | case REQ_FLUSH|REQ_FUA: | ||
769 | ring_req->operation = | 768 | ring_req->operation = |
770 | BLKIF_OP_WRITE_BARRIER; | 769 | BLKIF_OP_WRITE_BARRIER; |
771 | break; | 770 | else if (info->feature_flush) |
772 | case REQ_FLUSH: | ||
773 | ring_req->operation = | 771 | ring_req->operation = |
774 | BLKIF_OP_FLUSH_DISKCACHE; | 772 | BLKIF_OP_FLUSH_DISKCACHE; |
775 | break; | 773 | else |
776 | default: | ||
777 | ring_req->operation = 0; | 774 | ring_req->operation = 0; |
778 | } | ||
779 | } | 775 | } |
780 | ring_req->u.rw.nr_segments = num_grant; | 776 | ring_req->u.rw.nr_segments = num_grant; |
781 | if (unlikely(require_extra_req)) { | 777 | if (unlikely(require_extra_req)) { |
@@ -866,9 +862,9 @@ static inline bool blkif_request_flush_invalid(struct request *req, | |||
866 | { | 862 | { |
867 | return ((req->cmd_type != REQ_TYPE_FS) || | 863 | return ((req->cmd_type != REQ_TYPE_FS) || |
868 | ((req_op(req) == REQ_OP_FLUSH) && | 864 | ((req_op(req) == REQ_OP_FLUSH) && |
869 | !(info->feature_flush & REQ_FLUSH)) || | 865 | !info->feature_flush) || |
870 | ((req->cmd_flags & REQ_FUA) && | 866 | ((req->cmd_flags & REQ_FUA) && |
871 | !(info->feature_flush & REQ_FUA))); | 867 | !info->feature_fua)); |
872 | } | 868 | } |
873 | 869 | ||
874 | static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx, | 870 | static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx, |
@@ -985,24 +981,22 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, | |||
985 | return 0; | 981 | return 0; |
986 | } | 982 | } |
987 | 983 | ||
988 | static const char *flush_info(unsigned int feature_flush) | 984 | static const char *flush_info(struct blkfront_info *info) |
989 | { | 985 | { |
990 | switch (feature_flush & ((REQ_FLUSH | REQ_FUA))) { | 986 | if (info->feature_flush && info->feature_fua) |
991 | case REQ_FLUSH|REQ_FUA: | ||
992 | return "barrier: enabled;"; | 987 | return "barrier: enabled;"; |
993 | case REQ_FLUSH: | 988 | else if (info->feature_flush) |
994 | return "flush diskcache: enabled;"; | 989 | return "flush diskcache: enabled;"; |
995 | default: | 990 | else |
996 | return "barrier or flush: disabled;"; | 991 | return "barrier or flush: disabled;"; |
997 | } | ||
998 | } | 992 | } |
999 | 993 | ||
1000 | static void xlvbd_flush(struct blkfront_info *info) | 994 | static void xlvbd_flush(struct blkfront_info *info) |
1001 | { | 995 | { |
1002 | blk_queue_write_cache(info->rq, info->feature_flush & REQ_FLUSH, | 996 | blk_queue_write_cache(info->rq, info->feature_flush ? true : false, |
1003 | info->feature_flush & REQ_FUA); | 997 | info->feature_fua ? true : false); |
1004 | pr_info("blkfront: %s: %s %s %s %s %s\n", | 998 | pr_info("blkfront: %s: %s %s %s %s %s\n", |
1005 | info->gd->disk_name, flush_info(info->feature_flush), | 999 | info->gd->disk_name, flush_info(info), |
1006 | "persistent grants:", info->feature_persistent ? | 1000 | "persistent grants:", info->feature_persistent ? |
1007 | "enabled;" : "disabled;", "indirect descriptors:", | 1001 | "enabled;" : "disabled;", "indirect descriptors:", |
1008 | info->max_indirect_segments ? "enabled;" : "disabled;"); | 1002 | info->max_indirect_segments ? "enabled;" : "disabled;"); |
@@ -1621,6 +1615,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) | |||
1621 | if (unlikely(error)) { | 1615 | if (unlikely(error)) { |
1622 | if (error == -EOPNOTSUPP) | 1616 | if (error == -EOPNOTSUPP) |
1623 | error = 0; | 1617 | error = 0; |
1618 | info->feature_fua = 0; | ||
1624 | info->feature_flush = 0; | 1619 | info->feature_flush = 0; |
1625 | xlvbd_flush(info); | 1620 | xlvbd_flush(info); |
1626 | } | 1621 | } |
@@ -2315,6 +2310,7 @@ static void blkfront_gather_backend_features(struct blkfront_info *info) | |||
2315 | unsigned int indirect_segments; | 2310 | unsigned int indirect_segments; |
2316 | 2311 | ||
2317 | info->feature_flush = 0; | 2312 | info->feature_flush = 0; |
2313 | info->feature_fua = 0; | ||
2318 | 2314 | ||
2319 | err = xenbus_gather(XBT_NIL, info->xbdev->otherend, | 2315 | err = xenbus_gather(XBT_NIL, info->xbdev->otherend, |
2320 | "feature-barrier", "%d", &barrier, | 2316 | "feature-barrier", "%d", &barrier, |
@@ -2327,8 +2323,11 @@ static void blkfront_gather_backend_features(struct blkfront_info *info) | |||
2327 | * | 2323 | * |
2328 | * If there are barriers, then we use flush. | 2324 | * If there are barriers, then we use flush. |
2329 | */ | 2325 | */ |
2330 | if (!err && barrier) | 2326 | if (!err && barrier) { |
2331 | info->feature_flush = REQ_FLUSH | REQ_FUA; | 2327 | info->feature_flush = 1; |
2328 | info->feature_fua = 1; | ||
2329 | } | ||
2330 | |||
2332 | /* | 2331 | /* |
2333 | * And if there is "feature-flush-cache" use that above | 2332 | * And if there is "feature-flush-cache" use that above |
2334 | * barriers. | 2333 | * barriers. |
@@ -2337,8 +2336,10 @@ static void blkfront_gather_backend_features(struct blkfront_info *info) | |||
2337 | "feature-flush-cache", "%d", &flush, | 2336 | "feature-flush-cache", "%d", &flush, |
2338 | NULL); | 2337 | NULL); |
2339 | 2338 | ||
2340 | if (!err && flush) | 2339 | if (!err && flush) { |
2341 | info->feature_flush = REQ_FLUSH; | 2340 | info->feature_flush = 1; |
2341 | info->feature_fua = 0; | ||
2342 | } | ||
2342 | 2343 | ||
2343 | err = xenbus_gather(XBT_NIL, info->xbdev->otherend, | 2344 | err = xenbus_gather(XBT_NIL, info->xbdev->otherend, |
2344 | "feature-discard", "%d", &discard, | 2345 | "feature-discard", "%d", &discard, |