aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-07-20 17:24:17 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-07-20 17:24:17 -0400
commitb4460a9586c381edcec6a702ec8cfc80995063fc (patch)
tree668350a1554d6c778055f85a04953e5eabcecfd0
parent18cadf9f370735512eb8254beab77e26297a0e33 (diff)
parentd284f8248c72d0cb36a930920e60592eb455cd0d (diff)
Merge tag 'for-4.18/dm-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
Pull device mapper fix from Mike Snitzer: "Fix DM writecache target to allow an optional offset to the start of the data and metadata area. This allows userspace tools (e.g. LVM2) to place a header and metadata at the front of the writecache device for its use" * tag 'for-4.18/dm-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: dm writecache: support optional offset for start of device
-rw-r--r--Documentation/device-mapper/writecache.txt2
-rw-r--r--drivers/md/dm-writecache.c43
2 files changed, 31 insertions, 14 deletions
diff --git a/Documentation/device-mapper/writecache.txt b/Documentation/device-mapper/writecache.txt
index 4424fa2c67d7..01532b3008ae 100644
--- a/Documentation/device-mapper/writecache.txt
+++ b/Documentation/device-mapper/writecache.txt
@@ -15,6 +15,8 @@ Constructor parameters:
15 size) 15 size)
165. the number of optional parameters (the parameters with an argument 165. the number of optional parameters (the parameters with an argument
17 count as two) 17 count as two)
18 start_sector n (default: 0)
19 offset from the start of cache device in 512-byte sectors
18 high_watermark n (default: 50) 20 high_watermark n (default: 50)
19 start writeback when the number of used blocks reach this 21 start writeback when the number of used blocks reach this
20 watermark 22 watermark
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 07ea6a48aac6..87107c995cb5 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -136,6 +136,7 @@ struct dm_writecache {
136 struct dm_target *ti; 136 struct dm_target *ti;
137 struct dm_dev *dev; 137 struct dm_dev *dev;
138 struct dm_dev *ssd_dev; 138 struct dm_dev *ssd_dev;
139 sector_t start_sector;
139 void *memory_map; 140 void *memory_map;
140 uint64_t memory_map_size; 141 uint64_t memory_map_size;
141 size_t metadata_sectors; 142 size_t metadata_sectors;
@@ -293,6 +294,10 @@ static int persistent_memory_claim(struct dm_writecache *wc)
293 } 294 }
294 295
295 dax_read_unlock(id); 296 dax_read_unlock(id);
297
298 wc->memory_map += (size_t)wc->start_sector << SECTOR_SHIFT;
299 wc->memory_map_size -= (size_t)wc->start_sector << SECTOR_SHIFT;
300
296 return 0; 301 return 0;
297err3: 302err3:
298 kvfree(pages); 303 kvfree(pages);
@@ -311,7 +316,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
311static void persistent_memory_release(struct dm_writecache *wc) 316static void persistent_memory_release(struct dm_writecache *wc)
312{ 317{
313 if (wc->memory_vmapped) 318 if (wc->memory_vmapped)
314 vunmap(wc->memory_map); 319 vunmap(wc->memory_map - ((size_t)wc->start_sector << SECTOR_SHIFT));
315} 320}
316 321
317static struct page *persistent_memory_page(void *addr) 322static struct page *persistent_memory_page(void *addr)
@@ -359,7 +364,7 @@ static void *memory_data(struct dm_writecache *wc, struct wc_entry *e)
359 364
360static sector_t cache_sector(struct dm_writecache *wc, struct wc_entry *e) 365static sector_t cache_sector(struct dm_writecache *wc, struct wc_entry *e)
361{ 366{
362 return wc->metadata_sectors + 367 return wc->start_sector + wc->metadata_sectors +
363 ((sector_t)e->index << (wc->block_size_bits - SECTOR_SHIFT)); 368 ((sector_t)e->index << (wc->block_size_bits - SECTOR_SHIFT));
364} 369}
365 370
@@ -471,6 +476,7 @@ static void ssd_commit_flushed(struct dm_writecache *wc)
471 if (unlikely(region.sector + region.count > wc->metadata_sectors)) 476 if (unlikely(region.sector + region.count > wc->metadata_sectors))
472 region.count = wc->metadata_sectors - region.sector; 477 region.count = wc->metadata_sectors - region.sector;
473 478
479 region.sector += wc->start_sector;
474 atomic_inc(&endio.count); 480 atomic_inc(&endio.count);
475 req.bi_op = REQ_OP_WRITE; 481 req.bi_op = REQ_OP_WRITE;
476 req.bi_op_flags = REQ_SYNC; 482 req.bi_op_flags = REQ_SYNC;
@@ -1946,14 +1952,6 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
1946 } 1952 }
1947 wc->memory_map_size = i_size_read(wc->ssd_dev->bdev->bd_inode); 1953 wc->memory_map_size = i_size_read(wc->ssd_dev->bdev->bd_inode);
1948 1954
1949 if (WC_MODE_PMEM(wc)) {
1950 r = persistent_memory_claim(wc);
1951 if (r) {
1952 ti->error = "Unable to map persistent memory for cache";
1953 goto bad;
1954 }
1955 }
1956
1957 /* 1955 /*
1958 * Parse the cache block size 1956 * Parse the cache block size
1959 */ 1957 */
@@ -1982,7 +1980,16 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
1982 1980
1983 while (opt_params) { 1981 while (opt_params) {
1984 string = dm_shift_arg(&as), opt_params--; 1982 string = dm_shift_arg(&as), opt_params--;
1985 if (!strcasecmp(string, "high_watermark") && opt_params >= 1) { 1983 if (!strcasecmp(string, "start_sector") && opt_params >= 1) {
1984 unsigned long long start_sector;
1985 string = dm_shift_arg(&as), opt_params--;
1986 if (sscanf(string, "%llu%c", &start_sector, &dummy) != 1)
1987 goto invalid_optional;
1988 wc->start_sector = start_sector;
1989 if (wc->start_sector != start_sector ||
1990 wc->start_sector >= wc->memory_map_size >> SECTOR_SHIFT)
1991 goto invalid_optional;
1992 } else if (!strcasecmp(string, "high_watermark") && opt_params >= 1) {
1986 string = dm_shift_arg(&as), opt_params--; 1993 string = dm_shift_arg(&as), opt_params--;
1987 if (sscanf(string, "%d%c", &high_wm_percent, &dummy) != 1) 1994 if (sscanf(string, "%d%c", &high_wm_percent, &dummy) != 1)
1988 goto invalid_optional; 1995 goto invalid_optional;
@@ -2039,12 +2046,20 @@ invalid_optional:
2039 goto bad; 2046 goto bad;
2040 } 2047 }
2041 2048
2042 if (!WC_MODE_PMEM(wc)) { 2049 if (WC_MODE_PMEM(wc)) {
2050 r = persistent_memory_claim(wc);
2051 if (r) {
2052 ti->error = "Unable to map persistent memory for cache";
2053 goto bad;
2054 }
2055 } else {
2043 struct dm_io_region region; 2056 struct dm_io_region region;
2044 struct dm_io_request req; 2057 struct dm_io_request req;
2045 size_t n_blocks, n_metadata_blocks; 2058 size_t n_blocks, n_metadata_blocks;
2046 uint64_t n_bitmap_bits; 2059 uint64_t n_bitmap_bits;
2047 2060
2061 wc->memory_map_size -= (uint64_t)wc->start_sector << SECTOR_SHIFT;
2062
2048 bio_list_init(&wc->flush_list); 2063 bio_list_init(&wc->flush_list);
2049 wc->flush_thread = kthread_create(writecache_flush_thread, wc, "dm_writecache_flush"); 2064 wc->flush_thread = kthread_create(writecache_flush_thread, wc, "dm_writecache_flush");
2050 if (IS_ERR(wc->flush_thread)) { 2065 if (IS_ERR(wc->flush_thread)) {
@@ -2097,7 +2112,7 @@ invalid_optional:
2097 } 2112 }
2098 2113
2099 region.bdev = wc->ssd_dev->bdev; 2114 region.bdev = wc->ssd_dev->bdev;
2100 region.sector = 0; 2115 region.sector = wc->start_sector;
2101 region.count = wc->metadata_sectors; 2116 region.count = wc->metadata_sectors;
2102 req.bi_op = REQ_OP_READ; 2117 req.bi_op = REQ_OP_READ;
2103 req.bi_op_flags = REQ_SYNC; 2118 req.bi_op_flags = REQ_SYNC;
@@ -2265,7 +2280,7 @@ static void writecache_status(struct dm_target *ti, status_type_t type,
2265 2280
2266static struct target_type writecache_target = { 2281static struct target_type writecache_target = {
2267 .name = "writecache", 2282 .name = "writecache",
2268 .version = {1, 0, 0}, 2283 .version = {1, 1, 0},
2269 .module = THIS_MODULE, 2284 .module = THIS_MODULE,
2270 .ctr = writecache_ctr, 2285 .ctr = writecache_ctr,
2271 .dtr = writecache_dtr, 2286 .dtr = writecache_dtr,