aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-core.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c64
1 files changed, 36 insertions, 28 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 71da5111120c..9fe174dc74d1 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1147,7 +1147,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
1147 */ 1147 */
1148static inline bool queue_should_plug(struct request_queue *q) 1148static inline bool queue_should_plug(struct request_queue *q)
1149{ 1149{
1150 return !(blk_queue_nonrot(q) && blk_queue_queuing(q)); 1150 return !(blk_queue_nonrot(q) && blk_queue_tagged(q));
1151} 1151}
1152 1152
1153static int __make_request(struct request_queue *q, struct bio *bio) 1153static int __make_request(struct request_queue *q, struct bio *bio)
@@ -1490,9 +1490,9 @@ end_io:
1490/* 1490/*
1491 * We only want one ->make_request_fn to be active at a time, 1491 * We only want one ->make_request_fn to be active at a time,
1492 * else stack usage with stacked devices could be a problem. 1492 * else stack usage with stacked devices could be a problem.
1493 * So use current->bio_{list,tail} to keep a list of requests 1493 * So use current->bio_list to keep a list of requests
1494 * submited by a make_request_fn function. 1494 * submited by a make_request_fn function.
1495 * current->bio_tail is also used as a flag to say if 1495 * current->bio_list is also used as a flag to say if
1496 * generic_make_request is currently active in this task or not. 1496 * generic_make_request is currently active in this task or not.
1497 * If it is NULL, then no make_request is active. If it is non-NULL, 1497 * If it is NULL, then no make_request is active. If it is non-NULL,
1498 * then a make_request is active, and new requests should be added 1498 * then a make_request is active, and new requests should be added
@@ -1500,11 +1500,11 @@ end_io:
1500 */ 1500 */
1501void generic_make_request(struct bio *bio) 1501void generic_make_request(struct bio *bio)
1502{ 1502{
1503 if (current->bio_tail) { 1503 struct bio_list bio_list_on_stack;
1504
1505 if (current->bio_list) {
1504 /* make_request is active */ 1506 /* make_request is active */
1505 *(current->bio_tail) = bio; 1507 bio_list_add(current->bio_list, bio);
1506 bio->bi_next = NULL;
1507 current->bio_tail = &bio->bi_next;
1508 return; 1508 return;
1509 } 1509 }
1510 /* following loop may be a bit non-obvious, and so deserves some 1510 /* following loop may be a bit non-obvious, and so deserves some
@@ -1512,30 +1512,27 @@ void generic_make_request(struct bio *bio)
1512 * Before entering the loop, bio->bi_next is NULL (as all callers 1512 * Before entering the loop, bio->bi_next is NULL (as all callers
1513 * ensure that) so we have a list with a single bio. 1513 * ensure that) so we have a list with a single bio.
1514 * We pretend that we have just taken it off a longer list, so 1514 * We pretend that we have just taken it off a longer list, so
1515 * we assign bio_list to the next (which is NULL) and bio_tail 1515 * we assign bio_list to a pointer to the bio_list_on_stack,
1516 * to &bio_list, thus initialising the bio_list of new bios to be 1516 * thus initialising the bio_list of new bios to be
1517 * added. __generic_make_request may indeed add some more bios 1517 * added. __generic_make_request may indeed add some more bios
1518 * through a recursive call to generic_make_request. If it 1518 * through a recursive call to generic_make_request. If it
1519 * did, we find a non-NULL value in bio_list and re-enter the loop 1519 * did, we find a non-NULL value in bio_list and re-enter the loop
1520 * from the top. In this case we really did just take the bio 1520 * from the top. In this case we really did just take the bio
1521 * of the top of the list (no pretending) and so fixup bio_list and 1521 * of the top of the list (no pretending) and so remove it from
1522 * bio_tail or bi_next, and call into __generic_make_request again. 1522 * bio_list, and call into __generic_make_request again.
1523 * 1523 *
1524 * The loop was structured like this to make only one call to 1524 * The loop was structured like this to make only one call to
1525 * __generic_make_request (which is important as it is large and 1525 * __generic_make_request (which is important as it is large and
1526 * inlined) and to keep the structure simple. 1526 * inlined) and to keep the structure simple.
1527 */ 1527 */
1528 BUG_ON(bio->bi_next); 1528 BUG_ON(bio->bi_next);
1529 bio_list_init(&bio_list_on_stack);
1530 current->bio_list = &bio_list_on_stack;
1529 do { 1531 do {
1530 current->bio_list = bio->bi_next;
1531 if (bio->bi_next == NULL)
1532 current->bio_tail = &current->bio_list;
1533 else
1534 bio->bi_next = NULL;
1535 __generic_make_request(bio); 1532 __generic_make_request(bio);
1536 bio = current->bio_list; 1533 bio = bio_list_pop(current->bio_list);
1537 } while (bio); 1534 } while (bio);
1538 current->bio_tail = NULL; /* deactivate */ 1535 current->bio_list = NULL; /* deactivate */
1539} 1536}
1540EXPORT_SYMBOL(generic_make_request); 1537EXPORT_SYMBOL(generic_make_request);
1541 1538
@@ -1617,8 +1614,7 @@ int blk_rq_check_limits(struct request_queue *q, struct request *rq)
1617 * limitation. 1614 * limitation.
1618 */ 1615 */
1619 blk_recalc_rq_segments(rq); 1616 blk_recalc_rq_segments(rq);
1620 if (rq->nr_phys_segments > queue_max_phys_segments(q) || 1617 if (rq->nr_phys_segments > queue_max_segments(q)) {
1621 rq->nr_phys_segments > queue_max_hw_segments(q)) {
1622 printk(KERN_ERR "%s: over max segments limit.\n", __func__); 1618 printk(KERN_ERR "%s: over max segments limit.\n", __func__);
1623 return -EIO; 1619 return -EIO;
1624 } 1620 }
@@ -1859,15 +1855,8 @@ void blk_dequeue_request(struct request *rq)
1859 * and to it is freed is accounted as io that is in progress at 1855 * and to it is freed is accounted as io that is in progress at
1860 * the driver side. 1856 * the driver side.
1861 */ 1857 */
1862 if (blk_account_rq(rq)) { 1858 if (blk_account_rq(rq))
1863 q->in_flight[rq_is_sync(rq)]++; 1859 q->in_flight[rq_is_sync(rq)]++;
1864 /*
1865 * Mark this device as supporting hardware queuing, if
1866 * we have more IOs in flight than 4.
1867 */
1868 if (!blk_queue_queuing(q) && queue_in_flight(q) > 4)
1869 set_bit(QUEUE_FLAG_CQ, &q->queue_flags);
1870 }
1871} 1860}
1872 1861
1873/** 1862/**
@@ -2358,6 +2347,25 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2358 rq->rq_disk = bio->bi_bdev->bd_disk; 2347 rq->rq_disk = bio->bi_bdev->bd_disk;
2359} 2348}
2360 2349
2350#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
2351/**
2352 * rq_flush_dcache_pages - Helper function to flush all pages in a request
2353 * @rq: the request to be flushed
2354 *
2355 * Description:
2356 * Flush all pages in @rq.
2357 */
2358void rq_flush_dcache_pages(struct request *rq)
2359{
2360 struct req_iterator iter;
2361 struct bio_vec *bvec;
2362
2363 rq_for_each_segment(bvec, rq, iter)
2364 flush_dcache_page(bvec->bv_page);
2365}
2366EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
2367#endif
2368
2361/** 2369/**
2362 * blk_lld_busy - Check if underlying low-level drivers of a device are busy 2370 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
2363 * @q : the queue of the device being checked 2371 * @q : the queue of the device being checked