aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2011-09-12 06:12:01 -0400
committerJens Axboe <jaxboe@fusionio.com>2011-09-12 06:12:01 -0400
commit5a7bbad27a410350e64a2d7f5ec18fc73836c14f (patch)
tree3447cd62dbcbd77b4071e2eb7576f1d7632ef2d3 /block
parentc20e8de27fef9f59869c81c288ad6cf28200e00c (diff)
block: remove support for bio remapping from ->make_request
There is very little benefit in allowing to let a ->make_request instance update the bios device and sector and loop around it in __generic_make_request when we can archive the same through calling generic_make_request from the driver and letting the loop in generic_make_request handle it. Note that various drivers got the return value from ->make_request and returned non-zero values for errors. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: NeilBrown <neilb@suse.de> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c153
1 files changed, 62 insertions, 91 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index ab673f0b8c30..f58e019be67b 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1211,7 +1211,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
1211 blk_rq_bio_prep(req->q, req, bio); 1211 blk_rq_bio_prep(req->q, req, bio);
1212} 1212}
1213 1213
1214int blk_queue_bio(struct request_queue *q, struct bio *bio) 1214void blk_queue_bio(struct request_queue *q, struct bio *bio)
1215{ 1215{
1216 const bool sync = !!(bio->bi_rw & REQ_SYNC); 1216 const bool sync = !!(bio->bi_rw & REQ_SYNC);
1217 struct blk_plug *plug; 1217 struct blk_plug *plug;
@@ -1236,7 +1236,7 @@ int blk_queue_bio(struct request_queue *q, struct bio *bio)
1236 * any locks. 1236 * any locks.
1237 */ 1237 */
1238 if (attempt_plug_merge(current, q, bio)) 1238 if (attempt_plug_merge(current, q, bio))
1239 goto out; 1239 return;
1240 1240
1241 spin_lock_irq(q->queue_lock); 1241 spin_lock_irq(q->queue_lock);
1242 1242
@@ -1312,8 +1312,6 @@ get_rq:
1312out_unlock: 1312out_unlock:
1313 spin_unlock_irq(q->queue_lock); 1313 spin_unlock_irq(q->queue_lock);
1314 } 1314 }
1315out:
1316 return 0;
1317} 1315}
1318EXPORT_SYMBOL_GPL(blk_queue_bio); /* for device mapper only */ 1316EXPORT_SYMBOL_GPL(blk_queue_bio); /* for device mapper only */
1319 1317
@@ -1441,112 +1439,85 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
1441static inline void __generic_make_request(struct bio *bio) 1439static inline void __generic_make_request(struct bio *bio)
1442{ 1440{
1443 struct request_queue *q; 1441 struct request_queue *q;
1444 sector_t old_sector; 1442 int nr_sectors = bio_sectors(bio);
1445 int ret, nr_sectors = bio_sectors(bio);
1446 dev_t old_dev;
1447 int err = -EIO; 1443 int err = -EIO;
1444 char b[BDEVNAME_SIZE];
1445 struct hd_struct *part;
1448 1446
1449 might_sleep(); 1447 might_sleep();
1450 1448
1451 if (bio_check_eod(bio, nr_sectors)) 1449 if (bio_check_eod(bio, nr_sectors))
1452 goto end_io; 1450 goto end_io;
1453 1451
1454 /* 1452 q = bdev_get_queue(bio->bi_bdev);
1455 * Resolve the mapping until finished. (drivers are 1453 if (unlikely(!q)) {
1456 * still free to implement/resolve their own stacking 1454 printk(KERN_ERR
1457 * by explicitly returning 0) 1455 "generic_make_request: Trying to access "
1458 * 1456 "nonexistent block-device %s (%Lu)\n",
1459 * NOTE: we don't repeat the blk_size check for each new device. 1457 bdevname(bio->bi_bdev, b),
1460 * Stacking drivers are expected to know what they are doing. 1458 (long long) bio->bi_sector);
1461 */ 1459 goto end_io;
1462 old_sector = -1; 1460 }
1463 old_dev = 0;
1464 do {
1465 char b[BDEVNAME_SIZE];
1466 struct hd_struct *part;
1467
1468 q = bdev_get_queue(bio->bi_bdev);
1469 if (unlikely(!q)) {
1470 printk(KERN_ERR
1471 "generic_make_request: Trying to access "
1472 "nonexistent block-device %s (%Lu)\n",
1473 bdevname(bio->bi_bdev, b),
1474 (long long) bio->bi_sector);
1475 goto end_io;
1476 }
1477
1478 if (unlikely(!(bio->bi_rw & REQ_DISCARD) &&
1479 nr_sectors > queue_max_hw_sectors(q))) {
1480 printk(KERN_ERR "bio too big device %s (%u > %u)\n",
1481 bdevname(bio->bi_bdev, b),
1482 bio_sectors(bio),
1483 queue_max_hw_sectors(q));
1484 goto end_io;
1485 }
1486
1487 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
1488 goto end_io;
1489
1490 part = bio->bi_bdev->bd_part;
1491 if (should_fail_request(part, bio->bi_size) ||
1492 should_fail_request(&part_to_disk(part)->part0,
1493 bio->bi_size))
1494 goto end_io;
1495 1461
1496 /* 1462 if (unlikely(!(bio->bi_rw & REQ_DISCARD) &&
1497 * If this device has partitions, remap block n 1463 nr_sectors > queue_max_hw_sectors(q))) {
1498 * of partition p to block n+start(p) of the disk. 1464 printk(KERN_ERR "bio too big device %s (%u > %u)\n",
1499 */ 1465 bdevname(bio->bi_bdev, b),
1500 blk_partition_remap(bio); 1466 bio_sectors(bio),
1467 queue_max_hw_sectors(q));
1468 goto end_io;
1469 }
1501 1470
1502 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) 1471 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
1503 goto end_io; 1472 goto end_io;
1504 1473
1505 if (old_sector != -1) 1474 part = bio->bi_bdev->bd_part;
1506 trace_block_bio_remap(q, bio, old_dev, old_sector); 1475 if (should_fail_request(part, bio->bi_size) ||
1476 should_fail_request(&part_to_disk(part)->part0,
1477 bio->bi_size))
1478 goto end_io;
1507 1479
1508 old_sector = bio->bi_sector; 1480 /*
1509 old_dev = bio->bi_bdev->bd_dev; 1481 * If this device has partitions, remap block n
1482 * of partition p to block n+start(p) of the disk.
1483 */
1484 blk_partition_remap(bio);
1510 1485
1511 if (bio_check_eod(bio, nr_sectors)) 1486 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio))
1512 goto end_io; 1487 goto end_io;
1513 1488
1514 /* 1489 if (bio_check_eod(bio, nr_sectors))
1515 * Filter flush bio's early so that make_request based 1490 goto end_io;
1516 * drivers without flush support don't have to worry
1517 * about them.
1518 */
1519 if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) {
1520 bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
1521 if (!nr_sectors) {
1522 err = 0;
1523 goto end_io;
1524 }
1525 }
1526 1491
1527 if ((bio->bi_rw & REQ_DISCARD) && 1492 /*
1528 (!blk_queue_discard(q) || 1493 * Filter flush bio's early so that make_request based
1529 ((bio->bi_rw & REQ_SECURE) && 1494 * drivers without flush support don't have to worry
1530 !blk_queue_secdiscard(q)))) { 1495 * about them.
1531 err = -EOPNOTSUPP; 1496 */
1497 if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) {
1498 bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
1499 if (!nr_sectors) {
1500 err = 0;
1532 goto end_io; 1501 goto end_io;
1533 } 1502 }
1503 }
1534 1504
1535 if (blk_throtl_bio(q, &bio)) 1505 if ((bio->bi_rw & REQ_DISCARD) &&
1536 goto end_io; 1506 (!blk_queue_discard(q) ||
1537 1507 ((bio->bi_rw & REQ_SECURE) &&
1538 /* 1508 !blk_queue_secdiscard(q)))) {
1539 * If bio = NULL, bio has been throttled and will be submitted 1509 err = -EOPNOTSUPP;
1540 * later. 1510 goto end_io;
1541 */ 1511 }
1542 if (!bio)
1543 break;
1544
1545 trace_block_bio_queue(q, bio);
1546 1512
1547 ret = q->make_request_fn(q, bio); 1513 if (blk_throtl_bio(q, &bio))
1548 } while (ret); 1514 goto end_io;
1549 1515
1516 /* if bio = NULL, bio has been throttled and will be submitted later. */
1517 if (!bio)
1518 return;
1519 trace_block_bio_queue(q, bio);
1520 q->make_request_fn(q, bio);
1550 return; 1521 return;
1551 1522
1552end_io: 1523end_io: