aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-thin.c
diff options
context:
space:
mode:
authorJoe Thornber <ejt@redhat.com>2012-03-28 13:41:29 -0400
committerAlasdair G Kergon <agk@redhat.com>2012-03-28 13:41:29 -0400
commit67e2e2b281812b5caf4923a38aadc6b89e34f064 (patch)
treec04255840de5e70a0aa2880d1f1c8bfe1b2e7817 /drivers/md/dm-thin.c
parent104655fd4dcebd50068ef30253a001da72e3a081 (diff)
dm thin: add pool target flags to control discard
Add dm thin target arguments to control discard support. ignore_discard: Disables discard support no_discard_passdown: Don't pass discards down to the underlying data device, but just remove the mapping within the thin provisioning target. Signed-off-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md/dm-thin.c')
-rw-r--r--drivers/md/dm-thin.c135
1 files changed, 108 insertions, 27 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 703bbbc4f16f..213ae32a0fc4 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -489,6 +489,13 @@ static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
489 * devices. 489 * devices.
490 */ 490 */
491struct new_mapping; 491struct new_mapping;
492
493struct pool_features {
494 unsigned zero_new_blocks:1;
495 unsigned discard_enabled:1;
496 unsigned discard_passdown:1;
497};
498
492struct pool { 499struct pool {
493 struct list_head list; 500 struct list_head list;
494 struct dm_target *ti; /* Only set if a pool target is bound */ 501 struct dm_target *ti; /* Only set if a pool target is bound */
@@ -502,7 +509,7 @@ struct pool {
502 dm_block_t offset_mask; 509 dm_block_t offset_mask;
503 dm_block_t low_water_blocks; 510 dm_block_t low_water_blocks;
504 511
505 unsigned zero_new_blocks:1; 512 struct pool_features pf;
506 unsigned low_water_triggered:1; /* A dm event has been sent */ 513 unsigned low_water_triggered:1; /* A dm event has been sent */
507 unsigned no_free_space:1; /* A -ENOSPC warning has been issued */ 514 unsigned no_free_space:1; /* A -ENOSPC warning has been issued */
508 515
@@ -543,7 +550,7 @@ struct pool_c {
543 struct dm_target_callbacks callbacks; 550 struct dm_target_callbacks callbacks;
544 551
545 dm_block_t low_water_blocks; 552 dm_block_t low_water_blocks;
546 unsigned zero_new_blocks:1; 553 struct pool_features pf;
547}; 554};
548 555
549/* 556/*
@@ -1051,7 +1058,7 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
1051 * zeroing pre-existing data, we can issue the bio immediately. 1058 * zeroing pre-existing data, we can issue the bio immediately.
1052 * Otherwise we use kcopyd to zero the data first. 1059 * Otherwise we use kcopyd to zero the data first.
1053 */ 1060 */
1054 if (!pool->zero_new_blocks) 1061 if (!pool->pf.zero_new_blocks)
1055 process_prepared_mapping(m); 1062 process_prepared_mapping(m);
1056 1063
1057 else if (io_overwrites_block(pool, bio)) { 1064 else if (io_overwrites_block(pool, bio)) {
@@ -1202,7 +1209,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
1202 */ 1209 */
1203 m = get_next_mapping(pool); 1210 m = get_next_mapping(pool);
1204 m->tc = tc; 1211 m->tc = tc;
1205 m->pass_discard = !lookup_result.shared; 1212 m->pass_discard = (!lookup_result.shared) & pool->pf.discard_passdown;
1206 m->virt_block = block; 1213 m->virt_block = block;
1207 m->data_block = lookup_result.block; 1214 m->data_block = lookup_result.block;
1208 m->cell = cell; 1215 m->cell = cell;
@@ -1617,7 +1624,7 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti)
1617 1624
1618 pool->ti = ti; 1625 pool->ti = ti;
1619 pool->low_water_blocks = pt->low_water_blocks; 1626 pool->low_water_blocks = pt->low_water_blocks;
1620 pool->zero_new_blocks = pt->zero_new_blocks; 1627 pool->pf = pt->pf;
1621 1628
1622 return 0; 1629 return 0;
1623} 1630}
@@ -1631,6 +1638,14 @@ static void unbind_control_target(struct pool *pool, struct dm_target *ti)
1631/*---------------------------------------------------------------- 1638/*----------------------------------------------------------------
1632 * Pool creation 1639 * Pool creation
1633 *--------------------------------------------------------------*/ 1640 *--------------------------------------------------------------*/
1641/* Initialize pool features. */
1642static void pool_features_init(struct pool_features *pf)
1643{
1644 pf->zero_new_blocks = 1;
1645 pf->discard_enabled = 1;
1646 pf->discard_passdown = 1;
1647}
1648
1634static void __pool_destroy(struct pool *pool) 1649static void __pool_destroy(struct pool *pool)
1635{ 1650{
1636 __pool_table_remove(pool); 1651 __pool_table_remove(pool);
@@ -1678,7 +1693,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
1678 pool->block_shift = ffs(block_size) - 1; 1693 pool->block_shift = ffs(block_size) - 1;
1679 pool->offset_mask = block_size - 1; 1694 pool->offset_mask = block_size - 1;
1680 pool->low_water_blocks = 0; 1695 pool->low_water_blocks = 0;
1681 pool->zero_new_blocks = 1; 1696 pool_features_init(&pool->pf);
1682 pool->prison = prison_create(PRISON_CELLS); 1697 pool->prison = prison_create(PRISON_CELLS);
1683 if (!pool->prison) { 1698 if (!pool->prison) {
1684 *error = "Error creating pool's bio prison"; 1699 *error = "Error creating pool's bio prison";
@@ -1775,7 +1790,8 @@ static void __pool_dec(struct pool *pool)
1775 1790
1776static struct pool *__pool_find(struct mapped_device *pool_md, 1791static struct pool *__pool_find(struct mapped_device *pool_md,
1777 struct block_device *metadata_dev, 1792 struct block_device *metadata_dev,
1778 unsigned long block_size, char **error) 1793 unsigned long block_size, char **error,
1794 int *created)
1779{ 1795{
1780 struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev); 1796 struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev);
1781 1797
@@ -1791,8 +1807,10 @@ static struct pool *__pool_find(struct mapped_device *pool_md,
1791 return ERR_PTR(-EINVAL); 1807 return ERR_PTR(-EINVAL);
1792 __pool_inc(pool); 1808 __pool_inc(pool);
1793 1809
1794 } else 1810 } else {
1795 pool = pool_create(pool_md, metadata_dev, block_size, error); 1811 pool = pool_create(pool_md, metadata_dev, block_size, error);
1812 *created = 1;
1813 }
1796 } 1814 }
1797 1815
1798 return pool; 1816 return pool;
@@ -1816,10 +1834,6 @@ static void pool_dtr(struct dm_target *ti)
1816 mutex_unlock(&dm_thin_pool_table.mutex); 1834 mutex_unlock(&dm_thin_pool_table.mutex);
1817} 1835}
1818 1836
1819struct pool_features {
1820 unsigned zero_new_blocks:1;
1821};
1822
1823static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf, 1837static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
1824 struct dm_target *ti) 1838 struct dm_target *ti)
1825{ 1839{
@@ -1828,7 +1842,7 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
1828 const char *arg_name; 1842 const char *arg_name;
1829 1843
1830 static struct dm_arg _args[] = { 1844 static struct dm_arg _args[] = {
1831 {0, 1, "Invalid number of pool feature arguments"}, 1845 {0, 3, "Invalid number of pool feature arguments"},
1832 }; 1846 };
1833 1847
1834 /* 1848 /*
@@ -1848,6 +1862,12 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
1848 if (!strcasecmp(arg_name, "skip_block_zeroing")) { 1862 if (!strcasecmp(arg_name, "skip_block_zeroing")) {
1849 pf->zero_new_blocks = 0; 1863 pf->zero_new_blocks = 0;
1850 continue; 1864 continue;
1865 } else if (!strcasecmp(arg_name, "ignore_discard")) {
1866 pf->discard_enabled = 0;
1867 continue;
1868 } else if (!strcasecmp(arg_name, "no_discard_passdown")) {
1869 pf->discard_passdown = 0;
1870 continue;
1851 } 1871 }
1852 1872
1853 ti->error = "Unrecognised pool feature requested"; 1873 ti->error = "Unrecognised pool feature requested";
@@ -1865,10 +1885,12 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
1865 * 1885 *
1866 * Optional feature arguments are: 1886 * Optional feature arguments are:
1867 * skip_block_zeroing: skips the zeroing of newly-provisioned blocks. 1887 * skip_block_zeroing: skips the zeroing of newly-provisioned blocks.
1888 * ignore_discard: disable discard
1889 * no_discard_passdown: don't pass discards down to the data device
1868 */ 1890 */
1869static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) 1891static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
1870{ 1892{
1871 int r; 1893 int r, pool_created = 0;
1872 struct pool_c *pt; 1894 struct pool_c *pt;
1873 struct pool *pool; 1895 struct pool *pool;
1874 struct pool_features pf; 1896 struct pool_features pf;
@@ -1928,8 +1950,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
1928 /* 1950 /*
1929 * Set default pool features. 1951 * Set default pool features.
1930 */ 1952 */
1931 memset(&pf, 0, sizeof(pf)); 1953 pool_features_init(&pf);
1932 pf.zero_new_blocks = 1;
1933 1954
1934 dm_consume_args(&as, 4); 1955 dm_consume_args(&as, 4);
1935 r = parse_pool_features(&as, &pf, ti); 1956 r = parse_pool_features(&as, &pf, ti);
@@ -1943,21 +1964,58 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
1943 } 1964 }
1944 1965
1945 pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev, 1966 pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev,
1946 block_size, &ti->error); 1967 block_size, &ti->error, &pool_created);
1947 if (IS_ERR(pool)) { 1968 if (IS_ERR(pool)) {
1948 r = PTR_ERR(pool); 1969 r = PTR_ERR(pool);
1949 goto out_free_pt; 1970 goto out_free_pt;
1950 } 1971 }
1951 1972
1973 /*
1974 * 'pool_created' reflects whether this is the first table load.
1975 * Top level discard support is not allowed to be changed after
1976 * initial load. This would require a pool reload to trigger thin
1977 * device changes.
1978 */
1979 if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) {
1980 ti->error = "Discard support cannot be disabled once enabled";
1981 r = -EINVAL;
1982 goto out_flags_changed;
1983 }
1984
1985 /*
1986 * If discard_passdown was enabled verify that the data device
1987 * supports discards. Disable discard_passdown if not; otherwise
1988 * -EOPNOTSUPP will be returned.
1989 */
1990 if (pf.discard_passdown) {
1991 struct request_queue *q = bdev_get_queue(data_dev->bdev);
1992 if (!q || !blk_queue_discard(q)) {
1993 DMWARN("Discard unsupported by data device: Disabling discard passdown.");
1994 pf.discard_passdown = 0;
1995 }
1996 }
1997
1952 pt->pool = pool; 1998 pt->pool = pool;
1953 pt->ti = ti; 1999 pt->ti = ti;
1954 pt->metadata_dev = metadata_dev; 2000 pt->metadata_dev = metadata_dev;
1955 pt->data_dev = data_dev; 2001 pt->data_dev = data_dev;
1956 pt->low_water_blocks = low_water_blocks; 2002 pt->low_water_blocks = low_water_blocks;
1957 pt->zero_new_blocks = pf.zero_new_blocks; 2003 pt->pf = pf;
1958 ti->num_flush_requests = 1; 2004 ti->num_flush_requests = 1;
1959 ti->num_discard_requests = 1; 2005 /*
1960 ti->discards_supported = 1; 2006 * Only need to enable discards if the pool should pass
2007 * them down to the data device. The thin device's discard
2008 * processing will cause mappings to be removed from the btree.
2009 */
2010 if (pf.discard_enabled && pf.discard_passdown) {
2011 ti->num_discard_requests = 1;
2012 /*
2013 * Setting 'discards_supported' circumvents the normal
2014 * stacking of discard limits (this keeps the pool and
2015 * thin devices' discard limits consistent).
2016 */
2017 ti->discards_supported = 1;
2018 }
1961 ti->private = pt; 2019 ti->private = pt;
1962 2020
1963 pt->callbacks.congested_fn = pool_is_congested; 2021 pt->callbacks.congested_fn = pool_is_congested;
@@ -1967,6 +2025,8 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
1967 2025
1968 return 0; 2026 return 0;
1969 2027
2028out_flags_changed:
2029 __pool_dec(pool);
1970out_free_pt: 2030out_free_pt:
1971 kfree(pt); 2031 kfree(pt);
1972out: 2032out:
@@ -2255,7 +2315,7 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
2255static int pool_status(struct dm_target *ti, status_type_t type, 2315static int pool_status(struct dm_target *ti, status_type_t type,
2256 char *result, unsigned maxlen) 2316 char *result, unsigned maxlen)
2257{ 2317{
2258 int r; 2318 int r, count;
2259 unsigned sz = 0; 2319 unsigned sz = 0;
2260 uint64_t transaction_id; 2320 uint64_t transaction_id;
2261 dm_block_t nr_free_blocks_data; 2321 dm_block_t nr_free_blocks_data;
@@ -2318,10 +2378,19 @@ static int pool_status(struct dm_target *ti, status_type_t type,
2318 (unsigned long)pool->sectors_per_block, 2378 (unsigned long)pool->sectors_per_block,
2319 (unsigned long long)pt->low_water_blocks); 2379 (unsigned long long)pt->low_water_blocks);
2320 2380
2321 DMEMIT("%u ", !pool->zero_new_blocks); 2381 count = !pool->pf.zero_new_blocks + !pool->pf.discard_enabled +
2382 !pool->pf.discard_passdown;
2383 DMEMIT("%u ", count);
2322 2384
2323 if (!pool->zero_new_blocks) 2385 if (!pool->pf.zero_new_blocks)
2324 DMEMIT("skip_block_zeroing "); 2386 DMEMIT("skip_block_zeroing ");
2387
2388 if (!pool->pf.discard_enabled)
2389 DMEMIT("ignore_discard ");
2390
2391 if (!pool->pf.discard_passdown)
2392 DMEMIT("no_discard_passdown ");
2393
2325 break; 2394 break;
2326 } 2395 }
2327 2396
@@ -2352,6 +2421,9 @@ static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2352 2421
2353static void set_discard_limits(struct pool *pool, struct queue_limits *limits) 2422static void set_discard_limits(struct pool *pool, struct queue_limits *limits)
2354{ 2423{
2424 /*
2425 * FIXME: these limits may be incompatible with the pool's data device
2426 */
2355 limits->max_discard_sectors = pool->sectors_per_block; 2427 limits->max_discard_sectors = pool->sectors_per_block;
2356 2428
2357 /* 2429 /*
@@ -2359,6 +2431,7 @@ static void set_discard_limits(struct pool *pool, struct queue_limits *limits)
2359 * bios that overlap 2 blocks. 2431 * bios that overlap 2 blocks.
2360 */ 2432 */
2361 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; 2433 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
2434 limits->discard_zeroes_data = pool->pf.zero_new_blocks;
2362} 2435}
2363 2436
2364static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits) 2437static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
@@ -2368,14 +2441,15 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
2368 2441
2369 blk_limits_io_min(limits, 0); 2442 blk_limits_io_min(limits, 0);
2370 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT); 2443 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
2371 set_discard_limits(pool, limits); 2444 if (pool->pf.discard_enabled)
2445 set_discard_limits(pool, limits);
2372} 2446}
2373 2447
2374static struct target_type pool_target = { 2448static struct target_type pool_target = {
2375 .name = "thin-pool", 2449 .name = "thin-pool",
2376 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | 2450 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
2377 DM_TARGET_IMMUTABLE, 2451 DM_TARGET_IMMUTABLE,
2378 .version = {1, 0, 0}, 2452 .version = {1, 1, 0},
2379 .module = THIS_MODULE, 2453 .module = THIS_MODULE,
2380 .ctr = pool_ctr, 2454 .ctr = pool_ctr,
2381 .dtr = pool_dtr, 2455 .dtr = pool_dtr,
@@ -2417,6 +2491,9 @@ static void thin_dtr(struct dm_target *ti)
2417 * pool_dev: the path to the pool (eg, /dev/mapper/my_pool) 2491 * pool_dev: the path to the pool (eg, /dev/mapper/my_pool)
2418 * dev_id: the internal device identifier 2492 * dev_id: the internal device identifier
2419 * origin_dev: a device external to the pool that should act as the origin 2493 * origin_dev: a device external to the pool that should act as the origin
2494 *
2495 * If the pool device has discards disabled, they get disabled for the thin
2496 * device as well.
2420 */ 2497 */
2421static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv) 2498static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
2422{ 2499{
@@ -2485,8 +2562,12 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
2485 2562
2486 ti->split_io = tc->pool->sectors_per_block; 2563 ti->split_io = tc->pool->sectors_per_block;
2487 ti->num_flush_requests = 1; 2564 ti->num_flush_requests = 1;
2488 ti->num_discard_requests = 1; 2565
2489 ti->discards_supported = 1; 2566 /* In case the pool supports discards, pass them on. */
2567 if (tc->pool->pf.discard_enabled) {
2568 ti->discards_supported = 1;
2569 ti->num_discard_requests = 1;
2570 }
2490 2571
2491 dm_put(pool_md); 2572 dm_put(pool_md);
2492 2573