diff options
author | David Woodhouse <David.Woodhouse@intel.com> | 2008-08-09 11:42:20 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-10-09 02:56:02 -0400 |
commit | e17fc0a1ccf88f6d4dcb363729f3141b0958c325 (patch) | |
tree | 0a7c2dc1c3159c2af14d87c67ca83e158b2c78b5 /block/blk-core.c | |
parent | d30a2605be9d5132d95944916e8f578fcfe4f976 (diff) |
Allow elevators to sort/merge discard requests
But blkdev_issue_discard() still emits requests which are interpreted as
soft barriers, because naïve callers might otherwise issue subsequent
writes to those same sectors, which might cross on the queue (if they're
reallocated quickly enough).
Callers still _can_ issue non-barrier discard requests, but they have to
take care of queue ordering for themselves.
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r-- | block/blk-core.c | 12 |
1 files changed, 7 insertions, 5 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 1e143c4f9d34..1261516dd42a 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -1077,12 +1077,13 @@ void init_request_from_bio(struct request *req, struct bio *bio) | |||
1077 | /* | 1077 | /* |
1078 | * REQ_BARRIER implies no merging, but lets make it explicit | 1078 | * REQ_BARRIER implies no merging, but lets make it explicit |
1079 | */ | 1079 | */ |
1080 | if (unlikely(bio_barrier(bio))) | ||
1081 | req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE); | ||
1082 | if (unlikely(bio_discard(bio))) { | 1080 | if (unlikely(bio_discard(bio))) { |
1083 | req->cmd_flags |= (REQ_SOFTBARRIER | REQ_DISCARD); | 1081 | req->cmd_flags |= REQ_DISCARD; |
1082 | if (bio_barrier(bio)) | ||
1083 | req->cmd_flags |= REQ_SOFTBARRIER; | ||
1084 | req->q->prepare_discard_fn(req->q, req); | 1084 | req->q->prepare_discard_fn(req->q, req); |
1085 | } | 1085 | } else if (unlikely(bio_barrier(bio))) |
1086 | req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE); | ||
1086 | 1087 | ||
1087 | if (bio_sync(bio)) | 1088 | if (bio_sync(bio)) |
1088 | req->cmd_flags |= REQ_RW_SYNC; | 1089 | req->cmd_flags |= REQ_RW_SYNC; |
@@ -1114,7 +1115,8 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
1114 | blk_queue_bounce(q, &bio); | 1115 | blk_queue_bounce(q, &bio); |
1115 | 1116 | ||
1116 | barrier = bio_barrier(bio); | 1117 | barrier = bio_barrier(bio); |
1117 | if (unlikely(barrier) && (q->next_ordered == QUEUE_ORDERED_NONE)) { | 1118 | if (unlikely(barrier) && bio_has_data(bio) && |
1119 | (q->next_ordered == QUEUE_ORDERED_NONE)) { | ||
1118 | err = -EOPNOTSUPP; | 1120 | err = -EOPNOTSUPP; |
1119 | goto end_io; | 1121 | goto end_io; |
1120 | } | 1122 | } |