aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2008-11-27 23:32:03 -0500
committerJens Axboe <jens.axboe@oracle.com>2008-12-29 02:28:44 -0500
commita7384677b2f4cd40948fd7ce024ba5e1821444ba (patch)
tree679af09dde4d2659bc6a7dcd1d3dc34752899fa6 /block
parent313e42999dbc0f234ca5909a236f78f082cb43b1 (diff)
block: remove duplicate or unused barrier/discard error paths
* Because barrier mode can be changed dynamically, whether barrier is supported or not can be determined only when actually issuing the barrier and there is no point in checking it earlier. Drop barrier support check in generic_make_request() and __make_request(), and update comment around the support check in blk_do_ordered(). * There is no reason to check discard support in both generic_make_request() and __make_request(). Drop the check in __make_request(). While at it, move error action block to the end of the function and add unlikely() to q existence test. * Barrier request, be it empty or not, is never passed to low level driver and thus it's meaningless to try to copy back req->sector to bio->bi_sector on error. In addition, the notion of failed sector doesn't make any sense for empty barrier to begin with. Drop the code block from __end_that_request_first(). Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-barrier.c4
-rw-r--r--block/blk-core.c44
2 files changed, 13 insertions, 35 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 1d7adc72c95d..43d479a1e664 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -216,8 +216,8 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp)
216 return 1; 216 return 1;
217 } else { 217 } else {
218 /* 218 /*
219 * This can happen when the queue switches to 219 * Queue ordering not supported. Terminate
220 * ORDERED_NONE while this request is on it. 220 * with prejudice.
221 */ 221 */
222 elv_dequeue_request(q, rq); 222 elv_dequeue_request(q, rq);
223 if (__blk_end_request(rq, -EOPNOTSUPP, 223 if (__blk_end_request(rq, -EOPNOTSUPP,
diff --git a/block/blk-core.c b/block/blk-core.c
index 2fdcd0cff57f..b1fd4f5f07d3 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1139,7 +1139,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
1139static int __make_request(struct request_queue *q, struct bio *bio) 1139static int __make_request(struct request_queue *q, struct bio *bio)
1140{ 1140{
1141 struct request *req; 1141 struct request *req;
1142 int el_ret, nr_sectors, barrier, discard, err; 1142 int el_ret, nr_sectors;
1143 const unsigned short prio = bio_prio(bio); 1143 const unsigned short prio = bio_prio(bio);
1144 const int sync = bio_sync(bio); 1144 const int sync = bio_sync(bio);
1145 int rw_flags; 1145 int rw_flags;
@@ -1153,22 +1153,9 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1153 */ 1153 */
1154 blk_queue_bounce(q, &bio); 1154 blk_queue_bounce(q, &bio);
1155 1155
1156 barrier = bio_barrier(bio);
1157 if (unlikely(barrier) && bio_has_data(bio) &&
1158 (q->next_ordered == QUEUE_ORDERED_NONE)) {
1159 err = -EOPNOTSUPP;
1160 goto end_io;
1161 }
1162
1163 discard = bio_discard(bio);
1164 if (unlikely(discard) && !q->prepare_discard_fn) {
1165 err = -EOPNOTSUPP;
1166 goto end_io;
1167 }
1168
1169 spin_lock_irq(q->queue_lock); 1156 spin_lock_irq(q->queue_lock);
1170 1157
1171 if (unlikely(barrier) || elv_queue_empty(q)) 1158 if (unlikely(bio_barrier(bio)) || elv_queue_empty(q))
1172 goto get_rq; 1159 goto get_rq;
1173 1160
1174 el_ret = elv_merge(q, &req, bio); 1161 el_ret = elv_merge(q, &req, bio);
@@ -1262,10 +1249,6 @@ out:
1262 __generic_unplug_device(q); 1249 __generic_unplug_device(q);
1263 spin_unlock_irq(q->queue_lock); 1250 spin_unlock_irq(q->queue_lock);
1264 return 0; 1251 return 0;
1265
1266end_io:
1267 bio_endio(bio, err);
1268 return 0;
1269} 1252}
1270 1253
1271/* 1254/*
@@ -1418,15 +1401,13 @@ static inline void __generic_make_request(struct bio *bio)
1418 char b[BDEVNAME_SIZE]; 1401 char b[BDEVNAME_SIZE];
1419 1402
1420 q = bdev_get_queue(bio->bi_bdev); 1403 q = bdev_get_queue(bio->bi_bdev);
1421 if (!q) { 1404 if (unlikely(!q)) {
1422 printk(KERN_ERR 1405 printk(KERN_ERR
1423 "generic_make_request: Trying to access " 1406 "generic_make_request: Trying to access "
1424 "nonexistent block-device %s (%Lu)\n", 1407 "nonexistent block-device %s (%Lu)\n",
1425 bdevname(bio->bi_bdev, b), 1408 bdevname(bio->bi_bdev, b),
1426 (long long) bio->bi_sector); 1409 (long long) bio->bi_sector);
1427end_io: 1410 goto end_io;
1428 bio_endio(bio, err);
1429 break;
1430 } 1411 }
1431 1412
1432 if (unlikely(nr_sectors > q->max_hw_sectors)) { 1413 if (unlikely(nr_sectors > q->max_hw_sectors)) {
@@ -1463,14 +1444,19 @@ end_io:
1463 1444
1464 if (bio_check_eod(bio, nr_sectors)) 1445 if (bio_check_eod(bio, nr_sectors))
1465 goto end_io; 1446 goto end_io;
1466 if ((bio_empty_barrier(bio) && !q->prepare_flush_fn) || 1447
1467 (bio_discard(bio) && !q->prepare_discard_fn)) { 1448 if (bio_discard(bio) && !q->prepare_discard_fn) {
1468 err = -EOPNOTSUPP; 1449 err = -EOPNOTSUPP;
1469 goto end_io; 1450 goto end_io;
1470 } 1451 }
1471 1452
1472 ret = q->make_request_fn(q, bio); 1453 ret = q->make_request_fn(q, bio);
1473 } while (ret); 1454 } while (ret);
1455
1456 return;
1457
1458end_io:
1459 bio_endio(bio, err);
1474} 1460}
1475 1461
1476/* 1462/*
@@ -1720,14 +1706,6 @@ static int __end_that_request_first(struct request *req, int error,
1720 while ((bio = req->bio) != NULL) { 1706 while ((bio = req->bio) != NULL) {
1721 int nbytes; 1707 int nbytes;
1722 1708
1723 /*
1724 * For an empty barrier request, the low level driver must
1725 * store a potential error location in ->sector. We pass
1726 * that back up in ->bi_sector.
1727 */
1728 if (blk_empty_barrier(req))
1729 bio->bi_sector = req->sector;
1730
1731 if (nr_bytes >= bio->bi_size) { 1709 if (nr_bytes >= bio->bi_size) {
1732 req->bio = bio->bi_next; 1710 req->bio = bio->bi_next;
1733 nbytes = bio->bi_size; 1711 nbytes = bio->bi_size;