diff options
Diffstat (limited to 'drivers/md/dm-thin.c')
-rw-r--r-- | drivers/md/dm-thin.c | 135 |
1 files changed, 88 insertions, 47 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index af1fc3b2c2ad..c29410af1e22 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -509,9 +509,9 @@ enum pool_mode { | |||
509 | struct pool_features { | 509 | struct pool_features { |
510 | enum pool_mode mode; | 510 | enum pool_mode mode; |
511 | 511 | ||
512 | unsigned zero_new_blocks:1; | 512 | bool zero_new_blocks:1; |
513 | unsigned discard_enabled:1; | 513 | bool discard_enabled:1; |
514 | unsigned discard_passdown:1; | 514 | bool discard_passdown:1; |
515 | }; | 515 | }; |
516 | 516 | ||
517 | struct thin_c; | 517 | struct thin_c; |
@@ -580,7 +580,8 @@ struct pool_c { | |||
580 | struct dm_target_callbacks callbacks; | 580 | struct dm_target_callbacks callbacks; |
581 | 581 | ||
582 | dm_block_t low_water_blocks; | 582 | dm_block_t low_water_blocks; |
583 | struct pool_features pf; | 583 | struct pool_features requested_pf; /* Features requested during table load */ |
584 | struct pool_features adjusted_pf; /* Features used after adjusting for constituent devices */ | ||
584 | }; | 585 | }; |
585 | 586 | ||
586 | /* | 587 | /* |
@@ -1839,6 +1840,47 @@ static void __requeue_bios(struct pool *pool) | |||
1839 | /*---------------------------------------------------------------- | 1840 | /*---------------------------------------------------------------- |
1840 | * Binding of control targets to a pool object | 1841 | * Binding of control targets to a pool object |
1841 | *--------------------------------------------------------------*/ | 1842 | *--------------------------------------------------------------*/ |
1843 | static bool data_dev_supports_discard(struct pool_c *pt) | ||
1844 | { | ||
1845 | struct request_queue *q = bdev_get_queue(pt->data_dev->bdev); | ||
1846 | |||
1847 | return q && blk_queue_discard(q); | ||
1848 | } | ||
1849 | |||
1850 | /* | ||
1851 | * If discard_passdown was enabled verify that the data device | ||
1852 | * supports discards. Disable discard_passdown if not. | ||
1853 | */ | ||
1854 | static void disable_passdown_if_not_supported(struct pool_c *pt) | ||
1855 | { | ||
1856 | struct pool *pool = pt->pool; | ||
1857 | struct block_device *data_bdev = pt->data_dev->bdev; | ||
1858 | struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits; | ||
1859 | sector_t block_size = pool->sectors_per_block << SECTOR_SHIFT; | ||
1860 | const char *reason = NULL; | ||
1861 | char buf[BDEVNAME_SIZE]; | ||
1862 | |||
1863 | if (!pt->adjusted_pf.discard_passdown) | ||
1864 | return; | ||
1865 | |||
1866 | if (!data_dev_supports_discard(pt)) | ||
1867 | reason = "discard unsupported"; | ||
1868 | |||
1869 | else if (data_limits->max_discard_sectors < pool->sectors_per_block) | ||
1870 | reason = "max discard sectors smaller than a block"; | ||
1871 | |||
1872 | else if (data_limits->discard_granularity > block_size) | ||
1873 | reason = "discard granularity larger than a block"; | ||
1874 | |||
1875 | else if (block_size & (data_limits->discard_granularity - 1)) | ||
1876 | reason = "discard granularity not a factor of block size"; | ||
1877 | |||
1878 | if (reason) { | ||
1879 | DMWARN("Data device (%s) %s: Disabling discard passdown.", bdevname(data_bdev, buf), reason); | ||
1880 | pt->adjusted_pf.discard_passdown = false; | ||
1881 | } | ||
1882 | } | ||
1883 | |||
1842 | static int bind_control_target(struct pool *pool, struct dm_target *ti) | 1884 | static int bind_control_target(struct pool *pool, struct dm_target *ti) |
1843 | { | 1885 | { |
1844 | struct pool_c *pt = ti->private; | 1886 | struct pool_c *pt = ti->private; |
@@ -1847,31 +1889,16 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti) | |||
1847 | * We want to make sure that degraded pools are never upgraded. | 1889 | * We want to make sure that degraded pools are never upgraded. |
1848 | */ | 1890 | */ |
1849 | enum pool_mode old_mode = pool->pf.mode; | 1891 | enum pool_mode old_mode = pool->pf.mode; |
1850 | enum pool_mode new_mode = pt->pf.mode; | 1892 | enum pool_mode new_mode = pt->adjusted_pf.mode; |
1851 | 1893 | ||
1852 | if (old_mode > new_mode) | 1894 | if (old_mode > new_mode) |
1853 | new_mode = old_mode; | 1895 | new_mode = old_mode; |
1854 | 1896 | ||
1855 | pool->ti = ti; | 1897 | pool->ti = ti; |
1856 | pool->low_water_blocks = pt->low_water_blocks; | 1898 | pool->low_water_blocks = pt->low_water_blocks; |
1857 | pool->pf = pt->pf; | 1899 | pool->pf = pt->adjusted_pf; |
1858 | set_pool_mode(pool, new_mode); | ||
1859 | 1900 | ||
1860 | /* | 1901 | set_pool_mode(pool, new_mode); |
1861 | * If discard_passdown was enabled verify that the data device | ||
1862 | * supports discards. Disable discard_passdown if not; otherwise | ||
1863 | * -EOPNOTSUPP will be returned. | ||
1864 | */ | ||
1865 | /* FIXME: pull this out into a sep fn. */ | ||
1866 | if (pt->pf.discard_passdown) { | ||
1867 | struct request_queue *q = bdev_get_queue(pt->data_dev->bdev); | ||
1868 | if (!q || !blk_queue_discard(q)) { | ||
1869 | char buf[BDEVNAME_SIZE]; | ||
1870 | DMWARN("Discard unsupported by data device (%s): Disabling discard passdown.", | ||
1871 | bdevname(pt->data_dev->bdev, buf)); | ||
1872 | pool->pf.discard_passdown = 0; | ||
1873 | } | ||
1874 | } | ||
1875 | 1902 | ||
1876 | return 0; | 1903 | return 0; |
1877 | } | 1904 | } |
@@ -1889,9 +1916,9 @@ static void unbind_control_target(struct pool *pool, struct dm_target *ti) | |||
1889 | static void pool_features_init(struct pool_features *pf) | 1916 | static void pool_features_init(struct pool_features *pf) |
1890 | { | 1917 | { |
1891 | pf->mode = PM_WRITE; | 1918 | pf->mode = PM_WRITE; |
1892 | pf->zero_new_blocks = 1; | 1919 | pf->zero_new_blocks = true; |
1893 | pf->discard_enabled = 1; | 1920 | pf->discard_enabled = true; |
1894 | pf->discard_passdown = 1; | 1921 | pf->discard_passdown = true; |
1895 | } | 1922 | } |
1896 | 1923 | ||
1897 | static void __pool_destroy(struct pool *pool) | 1924 | static void __pool_destroy(struct pool *pool) |
@@ -2119,13 +2146,13 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf, | |||
2119 | argc--; | 2146 | argc--; |
2120 | 2147 | ||
2121 | if (!strcasecmp(arg_name, "skip_block_zeroing")) | 2148 | if (!strcasecmp(arg_name, "skip_block_zeroing")) |
2122 | pf->zero_new_blocks = 0; | 2149 | pf->zero_new_blocks = false; |
2123 | 2150 | ||
2124 | else if (!strcasecmp(arg_name, "ignore_discard")) | 2151 | else if (!strcasecmp(arg_name, "ignore_discard")) |
2125 | pf->discard_enabled = 0; | 2152 | pf->discard_enabled = false; |
2126 | 2153 | ||
2127 | else if (!strcasecmp(arg_name, "no_discard_passdown")) | 2154 | else if (!strcasecmp(arg_name, "no_discard_passdown")) |
2128 | pf->discard_passdown = 0; | 2155 | pf->discard_passdown = false; |
2129 | 2156 | ||
2130 | else if (!strcasecmp(arg_name, "read_only")) | 2157 | else if (!strcasecmp(arg_name, "read_only")) |
2131 | pf->mode = PM_READ_ONLY; | 2158 | pf->mode = PM_READ_ONLY; |
@@ -2259,8 +2286,9 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
2259 | pt->metadata_dev = metadata_dev; | 2286 | pt->metadata_dev = metadata_dev; |
2260 | pt->data_dev = data_dev; | 2287 | pt->data_dev = data_dev; |
2261 | pt->low_water_blocks = low_water_blocks; | 2288 | pt->low_water_blocks = low_water_blocks; |
2262 | pt->pf = pf; | 2289 | pt->adjusted_pf = pt->requested_pf = pf; |
2263 | ti->num_flush_requests = 1; | 2290 | ti->num_flush_requests = 1; |
2291 | |||
2264 | /* | 2292 | /* |
2265 | * Only need to enable discards if the pool should pass | 2293 | * Only need to enable discards if the pool should pass |
2266 | * them down to the data device. The thin device's discard | 2294 | * them down to the data device. The thin device's discard |
@@ -2268,12 +2296,14 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
2268 | */ | 2296 | */ |
2269 | if (pf.discard_enabled && pf.discard_passdown) { | 2297 | if (pf.discard_enabled && pf.discard_passdown) { |
2270 | ti->num_discard_requests = 1; | 2298 | ti->num_discard_requests = 1; |
2299 | |||
2271 | /* | 2300 | /* |
2272 | * Setting 'discards_supported' circumvents the normal | 2301 | * Setting 'discards_supported' circumvents the normal |
2273 | * stacking of discard limits (this keeps the pool and | 2302 | * stacking of discard limits (this keeps the pool and |
2274 | * thin devices' discard limits consistent). | 2303 | * thin devices' discard limits consistent). |
2275 | */ | 2304 | */ |
2276 | ti->discards_supported = true; | 2305 | ti->discards_supported = true; |
2306 | ti->discard_zeroes_data_unsupported = true; | ||
2277 | } | 2307 | } |
2278 | ti->private = pt; | 2308 | ti->private = pt; |
2279 | 2309 | ||
@@ -2703,7 +2733,7 @@ static int pool_status(struct dm_target *ti, status_type_t type, | |||
2703 | format_dev_t(buf2, pt->data_dev->bdev->bd_dev), | 2733 | format_dev_t(buf2, pt->data_dev->bdev->bd_dev), |
2704 | (unsigned long)pool->sectors_per_block, | 2734 | (unsigned long)pool->sectors_per_block, |
2705 | (unsigned long long)pt->low_water_blocks); | 2735 | (unsigned long long)pt->low_water_blocks); |
2706 | emit_flags(&pt->pf, result, sz, maxlen); | 2736 | emit_flags(&pt->requested_pf, result, sz, maxlen); |
2707 | break; | 2737 | break; |
2708 | } | 2738 | } |
2709 | 2739 | ||
@@ -2732,20 +2762,21 @@ static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm, | |||
2732 | return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); | 2762 | return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); |
2733 | } | 2763 | } |
2734 | 2764 | ||
2735 | static void set_discard_limits(struct pool *pool, struct queue_limits *limits) | 2765 | static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits) |
2736 | { | 2766 | { |
2737 | /* | 2767 | struct pool *pool = pt->pool; |
2738 | * FIXME: these limits may be incompatible with the pool's data device | 2768 | struct queue_limits *data_limits; |
2739 | */ | 2769 | |
2740 | limits->max_discard_sectors = pool->sectors_per_block; | 2770 | limits->max_discard_sectors = pool->sectors_per_block; |
2741 | 2771 | ||
2742 | /* | 2772 | /* |
2743 | * This is just a hint, and not enforced. We have to cope with | 2773 | * discard_granularity is just a hint, and not enforced. |
2744 | * bios that cover a block partially. A discard that spans a block | ||
2745 | * boundary is not sent to this target. | ||
2746 | */ | 2774 | */ |
2747 | limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; | 2775 | if (pt->adjusted_pf.discard_passdown) { |
2748 | limits->discard_zeroes_data = pool->pf.zero_new_blocks; | 2776 | data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits; |
2777 | limits->discard_granularity = data_limits->discard_granularity; | ||
2778 | } else | ||
2779 | limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; | ||
2749 | } | 2780 | } |
2750 | 2781 | ||
2751 | static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits) | 2782 | static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits) |
@@ -2755,15 +2786,25 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits) | |||
2755 | 2786 | ||
2756 | blk_limits_io_min(limits, 0); | 2787 | blk_limits_io_min(limits, 0); |
2757 | blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT); | 2788 | blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT); |
2758 | if (pool->pf.discard_enabled) | 2789 | |
2759 | set_discard_limits(pool, limits); | 2790 | /* |
2791 | * pt->adjusted_pf is a staging area for the actual features to use. | ||
2792 | * They get transferred to the live pool in bind_control_target() | ||
2793 | * called from pool_preresume(). | ||
2794 | */ | ||
2795 | if (!pt->adjusted_pf.discard_enabled) | ||
2796 | return; | ||
2797 | |||
2798 | disable_passdown_if_not_supported(pt); | ||
2799 | |||
2800 | set_discard_limits(pt, limits); | ||
2760 | } | 2801 | } |
2761 | 2802 | ||
2762 | static struct target_type pool_target = { | 2803 | static struct target_type pool_target = { |
2763 | .name = "thin-pool", | 2804 | .name = "thin-pool", |
2764 | .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | | 2805 | .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | |
2765 | DM_TARGET_IMMUTABLE, | 2806 | DM_TARGET_IMMUTABLE, |
2766 | .version = {1, 3, 0}, | 2807 | .version = {1, 4, 0}, |
2767 | .module = THIS_MODULE, | 2808 | .module = THIS_MODULE, |
2768 | .ctr = pool_ctr, | 2809 | .ctr = pool_ctr, |
2769 | .dtr = pool_dtr, | 2810 | .dtr = pool_dtr, |
@@ -3042,19 +3083,19 @@ static int thin_iterate_devices(struct dm_target *ti, | |||
3042 | return 0; | 3083 | return 0; |
3043 | } | 3084 | } |
3044 | 3085 | ||
3086 | /* | ||
3087 | * A thin device always inherits its queue limits from its pool. | ||
3088 | */ | ||
3045 | static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits) | 3089 | static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits) |
3046 | { | 3090 | { |
3047 | struct thin_c *tc = ti->private; | 3091 | struct thin_c *tc = ti->private; |
3048 | struct pool *pool = tc->pool; | ||
3049 | 3092 | ||
3050 | blk_limits_io_min(limits, 0); | 3093 | *limits = bdev_get_queue(tc->pool_dev->bdev)->limits; |
3051 | blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT); | ||
3052 | set_discard_limits(pool, limits); | ||
3053 | } | 3094 | } |
3054 | 3095 | ||
3055 | static struct target_type thin_target = { | 3096 | static struct target_type thin_target = { |
3056 | .name = "thin", | 3097 | .name = "thin", |
3057 | .version = {1, 3, 0}, | 3098 | .version = {1, 4, 0}, |
3058 | .module = THIS_MODULE, | 3099 | .module = THIS_MODULE, |
3059 | .ctr = thin_ctr, | 3100 | .ctr = thin_ctr, |
3060 | .dtr = thin_dtr, | 3101 | .dtr = thin_dtr, |