aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-thin-metadata.c10
-rw-r--r--drivers/md/dm-thin.c87
2 files changed, 64 insertions, 33 deletions
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 00cee02f8fc9..9452a489ed99 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1645,12 +1645,12 @@ int dm_thin_get_highest_mapped_block(struct dm_thin_device *td,
1645 return r; 1645 return r;
1646} 1646}
1647 1647
1648static int __resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count) 1648static int __resize_space_map(struct dm_space_map *sm, dm_block_t new_count)
1649{ 1649{
1650 int r; 1650 int r;
1651 dm_block_t old_count; 1651 dm_block_t old_count;
1652 1652
1653 r = dm_sm_get_nr_blocks(pmd->data_sm, &old_count); 1653 r = dm_sm_get_nr_blocks(sm, &old_count);
1654 if (r) 1654 if (r)
1655 return r; 1655 return r;
1656 1656
@@ -1658,11 +1658,11 @@ static int __resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
1658 return 0; 1658 return 0;
1659 1659
1660 if (new_count < old_count) { 1660 if (new_count < old_count) {
1661 DMERR("cannot reduce size of data device"); 1661 DMERR("cannot reduce size of space map");
1662 return -EINVAL; 1662 return -EINVAL;
1663 } 1663 }
1664 1664
1665 return dm_sm_extend(pmd->data_sm, new_count - old_count); 1665 return dm_sm_extend(sm, new_count - old_count);
1666} 1666}
1667 1667
1668int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count) 1668int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
@@ -1671,7 +1671,7 @@ int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
1671 1671
1672 down_write(&pmd->root_lock); 1672 down_write(&pmd->root_lock);
1673 if (!pmd->fail_io) 1673 if (!pmd->fail_io)
1674 r = __resize_data_dev(pmd, new_count); 1674 r = __resize_space_map(pmd->data_sm, new_count);
1675 up_write(&pmd->root_lock); 1675 up_write(&pmd->root_lock);
1676 1676
1677 return r; 1677 return r;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 004ad1652b73..111c148fb1d0 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -922,7 +922,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
922 return r; 922 return r;
923 923
924 if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) { 924 if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
925 DMWARN("%s: reached low water mark, sending event.", 925 DMWARN("%s: reached low water mark for data device: sending event.",
926 dm_device_name(pool->pool_md)); 926 dm_device_name(pool->pool_md));
927 spin_lock_irqsave(&pool->lock, flags); 927 spin_lock_irqsave(&pool->lock, flags);
928 pool->low_water_triggered = 1; 928 pool->low_water_triggered = 1;
@@ -1909,6 +1909,20 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
1909 return r; 1909 return r;
1910} 1910}
1911 1911
1912static sector_t get_metadata_dev_size(struct block_device *bdev)
1913{
1914 sector_t metadata_dev_size = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
1915 char buffer[BDEVNAME_SIZE];
1916
1917 if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING) {
1918 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
1919 bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS);
1920 metadata_dev_size = THIN_METADATA_MAX_SECTORS_WARNING;
1921 }
1922
1923 return metadata_dev_size;
1924}
1925
1912/* 1926/*
1913 * thin-pool <metadata dev> <data dev> 1927 * thin-pool <metadata dev> <data dev>
1914 * <data block size (sectors)> 1928 * <data block size (sectors)>
@@ -1931,8 +1945,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
1931 unsigned long block_size; 1945 unsigned long block_size;
1932 dm_block_t low_water_blocks; 1946 dm_block_t low_water_blocks;
1933 struct dm_dev *metadata_dev; 1947 struct dm_dev *metadata_dev;
1934 sector_t metadata_dev_size;
1935 char b[BDEVNAME_SIZE];
1936 1948
1937 /* 1949 /*
1938 * FIXME Remove validation from scope of lock. 1950 * FIXME Remove validation from scope of lock.
@@ -1953,10 +1965,11 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
1953 goto out_unlock; 1965 goto out_unlock;
1954 } 1966 }
1955 1967
1956 metadata_dev_size = i_size_read(metadata_dev->bdev->bd_inode) >> SECTOR_SHIFT; 1968 /*
1957 if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING) 1969 * Run for the side-effect of possibly issuing a warning if the
1958 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.", 1970 * device is too big.
1959 bdevname(metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS); 1971 */
1972 (void) get_metadata_dev_size(metadata_dev->bdev);
1960 1973
1961 r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev); 1974 r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
1962 if (r) { 1975 if (r) {
@@ -2079,18 +2092,7 @@ static int pool_map(struct dm_target *ti, struct bio *bio)
2079 return r; 2092 return r;
2080} 2093}
2081 2094
2082/* 2095static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
2083 * Retrieves the number of blocks of the data device from
2084 * the superblock and compares it to the actual device size,
2085 * thus resizing the data device in case it has grown.
2086 *
2087 * This both copes with opening preallocated data devices in the ctr
2088 * being followed by a resume
2089 * -and-
2090 * calling the resume method individually after userspace has
2091 * grown the data device in reaction to a table event.
2092 */
2093static int pool_preresume(struct dm_target *ti)
2094{ 2096{
2095 int r; 2097 int r;
2096 struct pool_c *pt = ti->private; 2098 struct pool_c *pt = ti->private;
@@ -2098,12 +2100,7 @@ static int pool_preresume(struct dm_target *ti)
2098 sector_t data_size = ti->len; 2100 sector_t data_size = ti->len;
2099 dm_block_t sb_data_size; 2101 dm_block_t sb_data_size;
2100 2102
2101 /* 2103 *need_commit = false;
2102 * Take control of the pool object.
2103 */
2104 r = bind_control_target(pool, ti);
2105 if (r)
2106 return r;
2107 2104
2108 (void) sector_div(data_size, pool->sectors_per_block); 2105 (void) sector_div(data_size, pool->sectors_per_block);
2109 2106
@@ -2114,7 +2111,7 @@ static int pool_preresume(struct dm_target *ti)
2114 } 2111 }
2115 2112
2116 if (data_size < sb_data_size) { 2113 if (data_size < sb_data_size) {
2117 DMERR("pool target too small, is %llu blocks (expected %llu)", 2114 DMERR("pool target (%llu blocks) too small: expected %llu",
2118 (unsigned long long)data_size, sb_data_size); 2115 (unsigned long long)data_size, sb_data_size);
2119 return -EINVAL; 2116 return -EINVAL;
2120 2117
@@ -2122,17 +2119,51 @@ static int pool_preresume(struct dm_target *ti)
2122 r = dm_pool_resize_data_dev(pool->pmd, data_size); 2119 r = dm_pool_resize_data_dev(pool->pmd, data_size);
2123 if (r) { 2120 if (r) {
2124 DMERR("failed to resize data device"); 2121 DMERR("failed to resize data device");
2125 /* FIXME Stricter than necessary: Rollback transaction instead here */
2126 set_pool_mode(pool, PM_READ_ONLY); 2122 set_pool_mode(pool, PM_READ_ONLY);
2127 return r; 2123 return r;
2128 } 2124 }
2129 2125
2130 (void) commit_or_fallback(pool); 2126 *need_commit = true;
2131 } 2127 }
2132 2128
2133 return 0; 2129 return 0;
2134} 2130}
2135 2131
2132/*
2133 * Retrieves the number of blocks of the data device from
2134 * the superblock and compares it to the actual device size,
2135 * thus resizing the data device in case it has grown.
2136 *
2137 * This both copes with opening preallocated data devices in the ctr
2138 * being followed by a resume
2139 * -and-
2140 * calling the resume method individually after userspace has
2141 * grown the data device in reaction to a table event.
2142 */
2143static int pool_preresume(struct dm_target *ti)
2144{
2145 int r;
2146 bool need_commit1;
2147 struct pool_c *pt = ti->private;
2148 struct pool *pool = pt->pool;
2149
2150 /*
2151 * Take control of the pool object.
2152 */
2153 r = bind_control_target(pool, ti);
2154 if (r)
2155 return r;
2156
2157 r = maybe_resize_data_dev(ti, &need_commit1);
2158 if (r)
2159 return r;
2160
2161 if (need_commit1)
2162 (void) commit_or_fallback(pool);
2163
2164 return 0;
2165}
2166
2136static void pool_resume(struct dm_target *ti) 2167static void pool_resume(struct dm_target *ti)
2137{ 2168{
2138 struct pool_c *pt = ti->private; 2169 struct pool_c *pt = ti->private;