aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r--drivers/md/dm.c162
1 files changed, 136 insertions, 26 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index be003e5fea3d..5a843c1f4d64 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -190,6 +190,15 @@ struct mapped_device {
190 struct bio barrier_bio; 190 struct bio barrier_bio;
191}; 191};
192 192
193/*
194 * For mempools pre-allocation at the table loading time.
195 */
196struct dm_md_mempools {
197 mempool_t *io_pool;
198 mempool_t *tio_pool;
199 struct bio_set *bs;
200};
201
193#define MIN_IOS 256 202#define MIN_IOS 256
194static struct kmem_cache *_io_cache; 203static struct kmem_cache *_io_cache;
195static struct kmem_cache *_tio_cache; 204static struct kmem_cache *_tio_cache;
@@ -1739,10 +1748,22 @@ static struct mapped_device *alloc_dev(int minor)
1739 INIT_LIST_HEAD(&md->uevent_list); 1748 INIT_LIST_HEAD(&md->uevent_list);
1740 spin_lock_init(&md->uevent_lock); 1749 spin_lock_init(&md->uevent_lock);
1741 1750
1742 md->queue = blk_alloc_queue(GFP_KERNEL); 1751 md->queue = blk_init_queue(dm_request_fn, NULL);
1743 if (!md->queue) 1752 if (!md->queue)
1744 goto bad_queue; 1753 goto bad_queue;
1745 1754
1755 /*
1756 * Request-based dm devices cannot be stacked on top of bio-based dm
1757 * devices. The type of this dm device has not been decided yet,
1758 * although we initialized the queue using blk_init_queue().
1759 * The type is decided at the first table loading time.
1760 * To prevent problematic device stacking, clear the queue flag
1761 * for request stacking support until then.
1762 *
1763 * This queue is new, so no concurrency on the queue_flags.
1764 */
1765 queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
1766 md->saved_make_request_fn = md->queue->make_request_fn;
1746 md->queue->queuedata = md; 1767 md->queue->queuedata = md;
1747 md->queue->backing_dev_info.congested_fn = dm_any_congested; 1768 md->queue->backing_dev_info.congested_fn = dm_any_congested;
1748 md->queue->backing_dev_info.congested_data = md; 1769 md->queue->backing_dev_info.congested_data = md;
@@ -1751,18 +1772,9 @@ static struct mapped_device *alloc_dev(int minor)
1751 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); 1772 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
1752 md->queue->unplug_fn = dm_unplug_all; 1773 md->queue->unplug_fn = dm_unplug_all;
1753 blk_queue_merge_bvec(md->queue, dm_merge_bvec); 1774 blk_queue_merge_bvec(md->queue, dm_merge_bvec);
1754 1775 blk_queue_softirq_done(md->queue, dm_softirq_done);
1755 md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache); 1776 blk_queue_prep_rq(md->queue, dm_prep_fn);
1756 if (!md->io_pool) 1777 blk_queue_lld_busy(md->queue, dm_lld_busy);
1757 goto bad_io_pool;
1758
1759 md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache);
1760 if (!md->tio_pool)
1761 goto bad_tio_pool;
1762
1763 md->bs = bioset_create(16, 0);
1764 if (!md->bs)
1765 goto bad_no_bioset;
1766 1778
1767 md->disk = alloc_disk(1); 1779 md->disk = alloc_disk(1);
1768 if (!md->disk) 1780 if (!md->disk)
@@ -1804,12 +1816,6 @@ bad_bdev:
1804bad_thread: 1816bad_thread:
1805 put_disk(md->disk); 1817 put_disk(md->disk);
1806bad_disk: 1818bad_disk:
1807 bioset_free(md->bs);
1808bad_no_bioset:
1809 mempool_destroy(md->tio_pool);
1810bad_tio_pool:
1811 mempool_destroy(md->io_pool);
1812bad_io_pool:
1813 blk_cleanup_queue(md->queue); 1819 blk_cleanup_queue(md->queue);
1814bad_queue: 1820bad_queue:
1815 free_minor(minor); 1821 free_minor(minor);
@@ -1829,9 +1835,12 @@ static void free_dev(struct mapped_device *md)
1829 unlock_fs(md); 1835 unlock_fs(md);
1830 bdput(md->bdev); 1836 bdput(md->bdev);
1831 destroy_workqueue(md->wq); 1837 destroy_workqueue(md->wq);
1832 mempool_destroy(md->tio_pool); 1838 if (md->tio_pool)
1833 mempool_destroy(md->io_pool); 1839 mempool_destroy(md->tio_pool);
1834 bioset_free(md->bs); 1840 if (md->io_pool)
1841 mempool_destroy(md->io_pool);
1842 if (md->bs)
1843 bioset_free(md->bs);
1835 blk_integrity_unregister(md->disk); 1844 blk_integrity_unregister(md->disk);
1836 del_gendisk(md->disk); 1845 del_gendisk(md->disk);
1837 free_minor(minor); 1846 free_minor(minor);
@@ -1846,6 +1855,29 @@ static void free_dev(struct mapped_device *md)
1846 kfree(md); 1855 kfree(md);
1847} 1856}
1848 1857
1858static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
1859{
1860 struct dm_md_mempools *p;
1861
1862 if (md->io_pool && md->tio_pool && md->bs)
1863 /* the md already has necessary mempools */
1864 goto out;
1865
1866 p = dm_table_get_md_mempools(t);
1867 BUG_ON(!p || md->io_pool || md->tio_pool || md->bs);
1868
1869 md->io_pool = p->io_pool;
1870 p->io_pool = NULL;
1871 md->tio_pool = p->tio_pool;
1872 p->tio_pool = NULL;
1873 md->bs = p->bs;
1874 p->bs = NULL;
1875
1876out:
1877 /* mempool bind completed, now no need any mempools in the table */
1878 dm_table_free_md_mempools(t);
1879}
1880
1849/* 1881/*
1850 * Bind a table to the device. 1882 * Bind a table to the device.
1851 */ 1883 */
@@ -1897,6 +1929,18 @@ static int __bind(struct mapped_device *md, struct dm_table *t,
1897 1929
1898 dm_table_event_callback(t, event_callback, md); 1930 dm_table_event_callback(t, event_callback, md);
1899 1931
1932 /*
1933 * The queue hasn't been stopped yet, if the old table type wasn't
1934 * for request-based during suspension. So stop it to prevent
1935 * I/O mapping before resume.
1936 * This must be done before setting the queue restrictions,
1937 * because request-based dm may be run just after the setting.
1938 */
1939 if (dm_table_request_based(t) && !blk_queue_stopped(q))
1940 stop_queue(q);
1941
1942 __bind_mempools(md, t);
1943
1900 write_lock(&md->map_lock); 1944 write_lock(&md->map_lock);
1901 md->map = t; 1945 md->map = t;
1902 dm_table_set_restrictions(t, q, limits); 1946 dm_table_set_restrictions(t, q, limits);
@@ -2110,10 +2154,14 @@ static void dm_wq_work(struct work_struct *work)
2110 2154
2111 up_write(&md->io_lock); 2155 up_write(&md->io_lock);
2112 2156
2113 if (bio_barrier(c)) 2157 if (dm_request_based(md))
2114 process_barrier(md, c); 2158 generic_make_request(c);
2115 else 2159 else {
2116 __split_and_process_bio(md, c); 2160 if (bio_barrier(c))
2161 process_barrier(md, c);
2162 else
2163 __split_and_process_bio(md, c);
2164 }
2117 2165
2118 down_write(&md->io_lock); 2166 down_write(&md->io_lock);
2119 } 2167 }
@@ -2146,6 +2194,13 @@ int dm_swap_table(struct mapped_device *md, struct dm_table *table)
2146 if (r) 2194 if (r)
2147 goto out; 2195 goto out;
2148 2196
2197 /* cannot change the device type, once a table is bound */
2198 if (md->map &&
2199 (dm_table_get_type(md->map) != dm_table_get_type(table))) {
2200 DMWARN("can't change the device type after a table is bound");
2201 goto out;
2202 }
2203
2149 __unbind(md); 2204 __unbind(md);
2150 r = __bind(md, table, &limits); 2205 r = __bind(md, table, &limits);
2151 2206
@@ -2542,6 +2597,61 @@ int dm_noflush_suspending(struct dm_target *ti)
2542} 2597}
2543EXPORT_SYMBOL_GPL(dm_noflush_suspending); 2598EXPORT_SYMBOL_GPL(dm_noflush_suspending);
2544 2599
2600struct dm_md_mempools *dm_alloc_md_mempools(unsigned type)
2601{
2602 struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL);
2603
2604 if (!pools)
2605 return NULL;
2606
2607 pools->io_pool = (type == DM_TYPE_BIO_BASED) ?
2608 mempool_create_slab_pool(MIN_IOS, _io_cache) :
2609 mempool_create_slab_pool(MIN_IOS, _rq_bio_info_cache);
2610 if (!pools->io_pool)
2611 goto free_pools_and_out;
2612
2613 pools->tio_pool = (type == DM_TYPE_BIO_BASED) ?
2614 mempool_create_slab_pool(MIN_IOS, _tio_cache) :
2615 mempool_create_slab_pool(MIN_IOS, _rq_tio_cache);
2616 if (!pools->tio_pool)
2617 goto free_io_pool_and_out;
2618
2619 pools->bs = (type == DM_TYPE_BIO_BASED) ?
2620 bioset_create(16, 0) : bioset_create(MIN_IOS, 0);
2621 if (!pools->bs)
2622 goto free_tio_pool_and_out;
2623
2624 return pools;
2625
2626free_tio_pool_and_out:
2627 mempool_destroy(pools->tio_pool);
2628
2629free_io_pool_and_out:
2630 mempool_destroy(pools->io_pool);
2631
2632free_pools_and_out:
2633 kfree(pools);
2634
2635 return NULL;
2636}
2637
2638void dm_free_md_mempools(struct dm_md_mempools *pools)
2639{
2640 if (!pools)
2641 return;
2642
2643 if (pools->io_pool)
2644 mempool_destroy(pools->io_pool);
2645
2646 if (pools->tio_pool)
2647 mempool_destroy(pools->tio_pool);
2648
2649 if (pools->bs)
2650 bioset_free(pools->bs);
2651
2652 kfree(pools);
2653}
2654
2545static struct block_device_operations dm_blk_dops = { 2655static struct block_device_operations dm_blk_dops = {
2546 .open = dm_blk_open, 2656 .open = dm_blk_open,
2547 .release = dm_blk_close, 2657 .release = dm_blk_close,