aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-core.c81
-rw-r--r--include/linux/blkdev.h3
2 files changed, 84 insertions, 0 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 527d43e982bb..b8ffbfe85ca4 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1530,6 +1530,87 @@ void submit_bio(int rw, struct bio *bio)
1530EXPORT_SYMBOL(submit_bio); 1530EXPORT_SYMBOL(submit_bio);
1531 1531
1532/** 1532/**
1533 * blk_rq_check_limits - Helper function to check a request for the queue limit
1534 * @q: the queue
1535 * @rq: the request being checked
1536 *
1537 * Description:
1538 * @rq may have been made based on weaker limitations of upper-level queues
1539 * in request stacking drivers, and it may violate the limitation of @q.
1540 * Since the block layer and the underlying device driver trust @rq
1541 * after it is inserted to @q, it should be checked against @q before
1542 * the insertion using this generic function.
1543 *
1544 * This function should also be useful for request stacking drivers
1545 * in some cases below, so export this fuction.
1546 * Request stacking drivers like request-based dm may change the queue
1547 * limits while requests are in the queue (e.g. dm's table swapping).
1548 * Such request stacking drivers should check those requests agaist
1549 * the new queue limits again when they dispatch those requests,
1550 * although such checkings are also done against the old queue limits
1551 * when submitting requests.
1552 */
1553int blk_rq_check_limits(struct request_queue *q, struct request *rq)
1554{
1555 if (rq->nr_sectors > q->max_sectors ||
1556 rq->data_len > q->max_hw_sectors << 9) {
1557 printk(KERN_ERR "%s: over max size limit.\n", __func__);
1558 return -EIO;
1559 }
1560
1561 /*
1562 * queue's settings related to segment counting like q->bounce_pfn
1563 * may differ from that of other stacking queues.
1564 * Recalculate it to check the request correctly on this queue's
1565 * limitation.
1566 */
1567 blk_recalc_rq_segments(rq);
1568 if (rq->nr_phys_segments > q->max_phys_segments ||
1569 rq->nr_phys_segments > q->max_hw_segments) {
1570 printk(KERN_ERR "%s: over max segments limit.\n", __func__);
1571 return -EIO;
1572 }
1573
1574 return 0;
1575}
1576EXPORT_SYMBOL_GPL(blk_rq_check_limits);
1577
1578/**
1579 * blk_insert_cloned_request - Helper for stacking drivers to submit a request
1580 * @q: the queue to submit the request
1581 * @rq: the request being queued
1582 */
1583int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1584{
1585 unsigned long flags;
1586
1587 if (blk_rq_check_limits(q, rq))
1588 return -EIO;
1589
1590#ifdef CONFIG_FAIL_MAKE_REQUEST
1591 if (rq->rq_disk && rq->rq_disk->part0.make_it_fail &&
1592 should_fail(&fail_make_request, blk_rq_bytes(rq)))
1593 return -EIO;
1594#endif
1595
1596 spin_lock_irqsave(q->queue_lock, flags);
1597
1598 /*
1599 * Submitting request must be dequeued before calling this function
1600 * because it will be linked to another request_queue
1601 */
1602 BUG_ON(blk_queued_rq(rq));
1603
1604 drive_stat_acct(rq, 1);
1605 __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0);
1606
1607 spin_unlock_irqrestore(q->queue_lock, flags);
1608
1609 return 0;
1610}
1611EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
1612
1613/**
1533 * __end_that_request_first - end I/O on a request 1614 * __end_that_request_first - end I/O on a request
1534 * @req: the request being processed 1615 * @req: the request being processed
1535 * @error: %0 for success, < %0 for error 1616 * @error: %0 for success, < %0 for error
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index e82a84c9f37a..964c246bc271 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -693,6 +693,9 @@ extern void __blk_put_request(struct request_queue *, struct request *);
693extern struct request *blk_get_request(struct request_queue *, int, gfp_t); 693extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
694extern void blk_insert_request(struct request_queue *, struct request *, int, void *); 694extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
695extern void blk_requeue_request(struct request_queue *, struct request *); 695extern void blk_requeue_request(struct request_queue *, struct request *);
696extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
697extern int blk_insert_cloned_request(struct request_queue *q,
698 struct request *rq);
696extern void blk_plug_device(struct request_queue *); 699extern void blk_plug_device(struct request_queue *);
697extern void blk_plug_device_unlocked(struct request_queue *); 700extern void blk_plug_device_unlocked(struct request_queue *);
698extern int blk_remove_plug(struct request_queue *); 701extern int blk_remove_plug(struct request_queue *);