aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-09-03 05:56:16 -0400
committerJens Axboe <jaxboe@fusionio.com>2010-09-10 06:35:36 -0400
commit6958f145459ca7ad9715024de97445addacb8510 (patch)
treec8c945eb68ceb88bd34647d7bcaedd13a0d753ca
parent589d7ed02ade0d06a3510da2e15a7edfdb2ef3d8 (diff)
block: kill QUEUE_ORDERED_BY_TAG
Nobody is making meaningful use of ORDERED_BY_TAG now and queue draining for barrier requests will be removed soon which will render the advantage of tag ordering moot. Kill ORDERED_BY_TAG. The following users are affected. * brd: converted to ORDERED_DRAIN. * virtio_blk: ORDERED_TAG path was already marked deprecated. Removed. * xen-blkfront: ORDERED_TAG case dropped. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Christoph Hellwig <hch@infradead.org> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Michael S. Tsirkin <mst@redhat.com> Cc: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: Chris Wright <chrisw@sous-sol.org> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
-rw-r--r--block/blk-barrier.c35
-rw-r--r--drivers/block/brd.c2
-rw-r--r--drivers/block/virtio_blk.c9
-rw-r--r--drivers/block/xen-blkfront.c8
-rw-r--r--drivers/scsi/sd.c4
-rw-r--r--include/linux/blkdev.h17
6 files changed, 13 insertions, 62 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index f0faefca032f..c807e9ca3a68 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -26,10 +26,7 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered)
26 if (ordered != QUEUE_ORDERED_NONE && 26 if (ordered != QUEUE_ORDERED_NONE &&
27 ordered != QUEUE_ORDERED_DRAIN && 27 ordered != QUEUE_ORDERED_DRAIN &&
28 ordered != QUEUE_ORDERED_DRAIN_FLUSH && 28 ordered != QUEUE_ORDERED_DRAIN_FLUSH &&
29 ordered != QUEUE_ORDERED_DRAIN_FUA && 29 ordered != QUEUE_ORDERED_DRAIN_FUA) {
30 ordered != QUEUE_ORDERED_TAG &&
31 ordered != QUEUE_ORDERED_TAG_FLUSH &&
32 ordered != QUEUE_ORDERED_TAG_FUA) {
33 printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered); 30 printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered);
34 return -EINVAL; 31 return -EINVAL;
35 } 32 }
@@ -155,21 +152,9 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
155 * For an empty barrier, there's no actual BAR request, which 152 * For an empty barrier, there's no actual BAR request, which
156 * in turn makes POSTFLUSH unnecessary. Mask them off. 153 * in turn makes POSTFLUSH unnecessary. Mask them off.
157 */ 154 */
158 if (!blk_rq_sectors(rq)) { 155 if (!blk_rq_sectors(rq))
159 q->ordered &= ~(QUEUE_ORDERED_DO_BAR | 156 q->ordered &= ~(QUEUE_ORDERED_DO_BAR |
160 QUEUE_ORDERED_DO_POSTFLUSH); 157 QUEUE_ORDERED_DO_POSTFLUSH);
161 /*
162 * Empty barrier on a write-through device w/ ordered
163 * tag has no command to issue and without any command
164 * to issue, ordering by tag can't be used. Drain
165 * instead.
166 */
167 if ((q->ordered & QUEUE_ORDERED_BY_TAG) &&
168 !(q->ordered & QUEUE_ORDERED_DO_PREFLUSH)) {
169 q->ordered &= ~QUEUE_ORDERED_BY_TAG;
170 q->ordered |= QUEUE_ORDERED_BY_DRAIN;
171 }
172 }
173 158
174 /* stash away the original request */ 159 /* stash away the original request */
175 blk_dequeue_request(rq); 160 blk_dequeue_request(rq);
@@ -210,7 +195,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
210 } else 195 } else
211 skip |= QUEUE_ORDSEQ_PREFLUSH; 196 skip |= QUEUE_ORDSEQ_PREFLUSH;
212 197
213 if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && queue_in_flight(q)) 198 if (queue_in_flight(q))
214 rq = NULL; 199 rq = NULL;
215 else 200 else
216 skip |= QUEUE_ORDSEQ_DRAIN; 201 skip |= QUEUE_ORDSEQ_DRAIN;
@@ -257,16 +242,10 @@ bool blk_do_ordered(struct request_queue *q, struct request **rqp)
257 rq != &q->pre_flush_rq && rq != &q->post_flush_rq) 242 rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
258 return true; 243 return true;
259 244
260 if (q->ordered & QUEUE_ORDERED_BY_TAG) { 245 /* Ordered by draining. Wait for turn. */
261 /* Ordered by tag. Blocking the next barrier is enough. */ 246 WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
262 if (is_barrier && rq != &q->bar_rq) 247 if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
263 *rqp = NULL; 248 *rqp = NULL;
264 } else {
265 /* Ordered by draining. Wait for turn. */
266 WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
267 if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
268 *rqp = NULL;
269 }
270 249
271 return true; 250 return true;
272} 251}
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 1c7f63792ff8..47a41272d26b 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -482,7 +482,7 @@ static struct brd_device *brd_alloc(int i)
482 if (!brd->brd_queue) 482 if (!brd->brd_queue)
483 goto out_free_dev; 483 goto out_free_dev;
484 blk_queue_make_request(brd->brd_queue, brd_make_request); 484 blk_queue_make_request(brd->brd_queue, brd_make_request);
485 blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_TAG); 485 blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_DRAIN);
486 blk_queue_max_hw_sectors(brd->brd_queue, 1024); 486 blk_queue_max_hw_sectors(brd->brd_queue, 1024);
487 blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY); 487 blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
488 488
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 2aafafca2b13..79652809eee8 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -395,15 +395,6 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
395 * to implement write barrier support. 395 * to implement write barrier support.
396 */ 396 */
397 blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH); 397 blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH);
398 } else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER)) {
399 /*
400 * If the BARRIER feature is supported the host expects us
401 * to order request by tags. This implies there is not
402 * volatile write cache on the host, and that the host
403 * never re-orders outstanding I/O. This feature is not
404 * useful for real life scenarious and deprecated.
405 */
406 blk_queue_ordered(q, QUEUE_ORDERED_TAG);
407 } else { 398 } else {
408 /* 399 /*
409 * If the FLUSH feature is not supported we must assume that 400 * If the FLUSH feature is not supported we must assume that
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index ac1b682edecb..50ec6f834996 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -424,8 +424,7 @@ static int xlvbd_barrier(struct blkfront_info *info)
424 const char *barrier; 424 const char *barrier;
425 425
426 switch (info->feature_barrier) { 426 switch (info->feature_barrier) {
427 case QUEUE_ORDERED_DRAIN: barrier = "enabled (drain)"; break; 427 case QUEUE_ORDERED_DRAIN: barrier = "enabled"; break;
428 case QUEUE_ORDERED_TAG: barrier = "enabled (tag)"; break;
429 case QUEUE_ORDERED_NONE: barrier = "disabled"; break; 428 case QUEUE_ORDERED_NONE: barrier = "disabled"; break;
430 default: return -EINVAL; 429 default: return -EINVAL;
431 } 430 }
@@ -1078,8 +1077,7 @@ static void blkfront_connect(struct blkfront_info *info)
1078 * we're dealing with a very old backend which writes 1077 * we're dealing with a very old backend which writes
1079 * synchronously; draining will do what needs to get done. 1078 * synchronously; draining will do what needs to get done.
1080 * 1079 *
1081 * If there are barriers, then we can do full queued writes 1080 * If there are barriers, then we use flush.
1082 * with tagged barriers.
1083 * 1081 *
1084 * If barriers are not supported, then there's no much we can 1082 * If barriers are not supported, then there's no much we can
1085 * do, so just set ordering to NONE. 1083 * do, so just set ordering to NONE.
@@ -1087,7 +1085,7 @@ static void blkfront_connect(struct blkfront_info *info)
1087 if (err) 1085 if (err)
1088 info->feature_barrier = QUEUE_ORDERED_DRAIN; 1086 info->feature_barrier = QUEUE_ORDERED_DRAIN;
1089 else if (barrier) 1087 else if (barrier)
1090 info->feature_barrier = QUEUE_ORDERED_TAG; 1088 info->feature_barrier = QUEUE_ORDERED_DRAIN_FLUSH;
1091 else 1089 else
1092 info->feature_barrier = QUEUE_ORDERED_NONE; 1090 info->feature_barrier = QUEUE_ORDERED_NONE;
1093 1091
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 2714becc2eaf..cdfc51ab9cf2 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2151,9 +2151,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
2151 2151
2152 /* 2152 /*
2153 * We now have all cache related info, determine how we deal 2153 * We now have all cache related info, determine how we deal
2154 * with ordered requests. Note that as the current SCSI 2154 * with ordered requests.
2155 * dispatch function can alter request order, we cannot use
2156 * QUEUE_ORDERED_TAG_* even when ordered tag is supported.
2157 */ 2155 */
2158 if (sdkp->WCE) 2156 if (sdkp->WCE)
2159 ordered = sdkp->DPOFUA 2157 ordered = sdkp->DPOFUA
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 015375c7d031..7077bc0d6138 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -470,12 +470,7 @@ enum {
470 * DRAIN : ordering by draining is enough 470 * DRAIN : ordering by draining is enough
471 * DRAIN_FLUSH : ordering by draining w/ pre and post flushes 471 * DRAIN_FLUSH : ordering by draining w/ pre and post flushes
472 * DRAIN_FUA : ordering by draining w/ pre flush and FUA write 472 * DRAIN_FUA : ordering by draining w/ pre flush and FUA write
473 * TAG : ordering by tag is enough
474 * TAG_FLUSH : ordering by tag w/ pre and post flushes
475 * TAG_FUA : ordering by tag w/ pre flush and FUA write
476 */ 473 */
477 QUEUE_ORDERED_BY_DRAIN = 0x01,
478 QUEUE_ORDERED_BY_TAG = 0x02,
479 QUEUE_ORDERED_DO_PREFLUSH = 0x10, 474 QUEUE_ORDERED_DO_PREFLUSH = 0x10,
480 QUEUE_ORDERED_DO_BAR = 0x20, 475 QUEUE_ORDERED_DO_BAR = 0x20,
481 QUEUE_ORDERED_DO_POSTFLUSH = 0x40, 476 QUEUE_ORDERED_DO_POSTFLUSH = 0x40,
@@ -483,8 +478,7 @@ enum {
483 478
484 QUEUE_ORDERED_NONE = 0x00, 479 QUEUE_ORDERED_NONE = 0x00,
485 480
486 QUEUE_ORDERED_DRAIN = QUEUE_ORDERED_BY_DRAIN | 481 QUEUE_ORDERED_DRAIN = QUEUE_ORDERED_DO_BAR,
487 QUEUE_ORDERED_DO_BAR,
488 QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN | 482 QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN |
489 QUEUE_ORDERED_DO_PREFLUSH | 483 QUEUE_ORDERED_DO_PREFLUSH |
490 QUEUE_ORDERED_DO_POSTFLUSH, 484 QUEUE_ORDERED_DO_POSTFLUSH,
@@ -492,15 +486,6 @@ enum {
492 QUEUE_ORDERED_DO_PREFLUSH | 486 QUEUE_ORDERED_DO_PREFLUSH |
493 QUEUE_ORDERED_DO_FUA, 487 QUEUE_ORDERED_DO_FUA,
494 488
495 QUEUE_ORDERED_TAG = QUEUE_ORDERED_BY_TAG |
496 QUEUE_ORDERED_DO_BAR,
497 QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG |
498 QUEUE_ORDERED_DO_PREFLUSH |
499 QUEUE_ORDERED_DO_POSTFLUSH,
500 QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG |
501 QUEUE_ORDERED_DO_PREFLUSH |
502 QUEUE_ORDERED_DO_FUA,
503
504 /* 489 /*
505 * Ordered operation sequence 490 * Ordered operation sequence
506 */ 491 */