diff options
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r-- | drivers/md/dm.c | 52 |
1 files changed, 18 insertions, 34 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index eaa3af0e0632..0cf68b478878 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -477,7 +477,8 @@ static void start_io_acct(struct dm_io *io) | |||
477 | cpu = part_stat_lock(); | 477 | cpu = part_stat_lock(); |
478 | part_round_stats(cpu, &dm_disk(md)->part0); | 478 | part_round_stats(cpu, &dm_disk(md)->part0); |
479 | part_stat_unlock(); | 479 | part_stat_unlock(); |
480 | dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]); | 480 | atomic_set(&dm_disk(md)->part0.in_flight[rw], |
481 | atomic_inc_return(&md->pending[rw])); | ||
481 | } | 482 | } |
482 | 483 | ||
483 | static void end_io_acct(struct dm_io *io) | 484 | static void end_io_acct(struct dm_io *io) |
@@ -497,8 +498,8 @@ static void end_io_acct(struct dm_io *io) | |||
497 | * After this is decremented the bio must not be touched if it is | 498 | * After this is decremented the bio must not be touched if it is |
498 | * a flush. | 499 | * a flush. |
499 | */ | 500 | */ |
500 | dm_disk(md)->part0.in_flight[rw] = pending = | 501 | pending = atomic_dec_return(&md->pending[rw]); |
501 | atomic_dec_return(&md->pending[rw]); | 502 | atomic_set(&dm_disk(md)->part0.in_flight[rw], pending); |
502 | pending += atomic_read(&md->pending[rw^0x1]); | 503 | pending += atomic_read(&md->pending[rw^0x1]); |
503 | 504 | ||
504 | /* nudge anyone waiting on suspend queue */ | 505 | /* nudge anyone waiting on suspend queue */ |
@@ -807,8 +808,6 @@ void dm_requeue_unmapped_request(struct request *clone) | |||
807 | dm_unprep_request(rq); | 808 | dm_unprep_request(rq); |
808 | 809 | ||
809 | spin_lock_irqsave(q->queue_lock, flags); | 810 | spin_lock_irqsave(q->queue_lock, flags); |
810 | if (elv_queue_empty(q)) | ||
811 | blk_plug_device(q); | ||
812 | blk_requeue_request(q, rq); | 811 | blk_requeue_request(q, rq); |
813 | spin_unlock_irqrestore(q->queue_lock, flags); | 812 | spin_unlock_irqrestore(q->queue_lock, flags); |
814 | 813 | ||
@@ -1613,10 +1612,10 @@ static void dm_request_fn(struct request_queue *q) | |||
1613 | * number of in-flight I/Os after the queue is stopped in | 1612 | * number of in-flight I/Os after the queue is stopped in |
1614 | * dm_suspend(). | 1613 | * dm_suspend(). |
1615 | */ | 1614 | */ |
1616 | while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) { | 1615 | while (!blk_queue_stopped(q)) { |
1617 | rq = blk_peek_request(q); | 1616 | rq = blk_peek_request(q); |
1618 | if (!rq) | 1617 | if (!rq) |
1619 | goto plug_and_out; | 1618 | goto delay_and_out; |
1620 | 1619 | ||
1621 | /* always use block 0 to find the target for flushes for now */ | 1620 | /* always use block 0 to find the target for flushes for now */ |
1622 | pos = 0; | 1621 | pos = 0; |
@@ -1627,7 +1626,7 @@ static void dm_request_fn(struct request_queue *q) | |||
1627 | BUG_ON(!dm_target_is_valid(ti)); | 1626 | BUG_ON(!dm_target_is_valid(ti)); |
1628 | 1627 | ||
1629 | if (ti->type->busy && ti->type->busy(ti)) | 1628 | if (ti->type->busy && ti->type->busy(ti)) |
1630 | goto plug_and_out; | 1629 | goto delay_and_out; |
1631 | 1630 | ||
1632 | blk_start_request(rq); | 1631 | blk_start_request(rq); |
1633 | clone = rq->special; | 1632 | clone = rq->special; |
@@ -1647,11 +1646,8 @@ requeued: | |||
1647 | BUG_ON(!irqs_disabled()); | 1646 | BUG_ON(!irqs_disabled()); |
1648 | spin_lock(q->queue_lock); | 1647 | spin_lock(q->queue_lock); |
1649 | 1648 | ||
1650 | plug_and_out: | 1649 | delay_and_out: |
1651 | if (!elv_queue_empty(q)) | 1650 | blk_delay_queue(q, HZ / 10); |
1652 | /* Some requests still remain, retry later */ | ||
1653 | blk_plug_device(q); | ||
1654 | |||
1655 | out: | 1651 | out: |
1656 | dm_table_put(map); | 1652 | dm_table_put(map); |
1657 | 1653 | ||
@@ -1680,20 +1676,6 @@ static int dm_lld_busy(struct request_queue *q) | |||
1680 | return r; | 1676 | return r; |
1681 | } | 1677 | } |
1682 | 1678 | ||
1683 | static void dm_unplug_all(struct request_queue *q) | ||
1684 | { | ||
1685 | struct mapped_device *md = q->queuedata; | ||
1686 | struct dm_table *map = dm_get_live_table(md); | ||
1687 | |||
1688 | if (map) { | ||
1689 | if (dm_request_based(md)) | ||
1690 | generic_unplug_device(q); | ||
1691 | |||
1692 | dm_table_unplug_all(map); | ||
1693 | dm_table_put(map); | ||
1694 | } | ||
1695 | } | ||
1696 | |||
1697 | static int dm_any_congested(void *congested_data, int bdi_bits) | 1679 | static int dm_any_congested(void *congested_data, int bdi_bits) |
1698 | { | 1680 | { |
1699 | int r = bdi_bits; | 1681 | int r = bdi_bits; |
@@ -1817,7 +1799,6 @@ static void dm_init_md_queue(struct mapped_device *md) | |||
1817 | md->queue->backing_dev_info.congested_data = md; | 1799 | md->queue->backing_dev_info.congested_data = md; |
1818 | blk_queue_make_request(md->queue, dm_request); | 1800 | blk_queue_make_request(md->queue, dm_request); |
1819 | blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); | 1801 | blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); |
1820 | md->queue->unplug_fn = dm_unplug_all; | ||
1821 | blk_queue_merge_bvec(md->queue, dm_merge_bvec); | 1802 | blk_queue_merge_bvec(md->queue, dm_merge_bvec); |
1822 | blk_queue_flush(md->queue, REQ_FLUSH | REQ_FUA); | 1803 | blk_queue_flush(md->queue, REQ_FLUSH | REQ_FUA); |
1823 | } | 1804 | } |
@@ -2263,8 +2244,6 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible) | |||
2263 | int r = 0; | 2244 | int r = 0; |
2264 | DECLARE_WAITQUEUE(wait, current); | 2245 | DECLARE_WAITQUEUE(wait, current); |
2265 | 2246 | ||
2266 | dm_unplug_all(md->queue); | ||
2267 | |||
2268 | add_wait_queue(&md->wait, &wait); | 2247 | add_wait_queue(&md->wait, &wait); |
2269 | 2248 | ||
2270 | while (1) { | 2249 | while (1) { |
@@ -2539,7 +2518,6 @@ int dm_resume(struct mapped_device *md) | |||
2539 | 2518 | ||
2540 | clear_bit(DMF_SUSPENDED, &md->flags); | 2519 | clear_bit(DMF_SUSPENDED, &md->flags); |
2541 | 2520 | ||
2542 | dm_table_unplug_all(map); | ||
2543 | r = 0; | 2521 | r = 0; |
2544 | out: | 2522 | out: |
2545 | dm_table_put(map); | 2523 | dm_table_put(map); |
@@ -2643,9 +2621,10 @@ int dm_noflush_suspending(struct dm_target *ti) | |||
2643 | } | 2621 | } |
2644 | EXPORT_SYMBOL_GPL(dm_noflush_suspending); | 2622 | EXPORT_SYMBOL_GPL(dm_noflush_suspending); |
2645 | 2623 | ||
2646 | struct dm_md_mempools *dm_alloc_md_mempools(unsigned type) | 2624 | struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity) |
2647 | { | 2625 | { |
2648 | struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL); | 2626 | struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL); |
2627 | unsigned int pool_size = (type == DM_TYPE_BIO_BASED) ? 16 : MIN_IOS; | ||
2649 | 2628 | ||
2650 | if (!pools) | 2629 | if (!pools) |
2651 | return NULL; | 2630 | return NULL; |
@@ -2662,13 +2641,18 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type) | |||
2662 | if (!pools->tio_pool) | 2641 | if (!pools->tio_pool) |
2663 | goto free_io_pool_and_out; | 2642 | goto free_io_pool_and_out; |
2664 | 2643 | ||
2665 | pools->bs = (type == DM_TYPE_BIO_BASED) ? | 2644 | pools->bs = bioset_create(pool_size, 0); |
2666 | bioset_create(16, 0) : bioset_create(MIN_IOS, 0); | ||
2667 | if (!pools->bs) | 2645 | if (!pools->bs) |
2668 | goto free_tio_pool_and_out; | 2646 | goto free_tio_pool_and_out; |
2669 | 2647 | ||
2648 | if (integrity && bioset_integrity_create(pools->bs, pool_size)) | ||
2649 | goto free_bioset_and_out; | ||
2650 | |||
2670 | return pools; | 2651 | return pools; |
2671 | 2652 | ||
2653 | free_bioset_and_out: | ||
2654 | bioset_free(pools->bs); | ||
2655 | |||
2672 | free_tio_pool_and_out: | 2656 | free_tio_pool_and_out: |
2673 | mempool_destroy(pools->tio_pool); | 2657 | mempool_destroy(pools->tio_pool); |
2674 | 2658 | ||