diff options
-rw-r--r-- | drivers/dax/super.c | 21 | ||||
-rw-r--r-- | drivers/md/dm-bufio.c | 95 | ||||
-rw-r--r-- | drivers/md/dm-bufio.h | 9 | ||||
-rw-r--r-- | drivers/md/dm-cache-target.c | 4 | ||||
-rw-r--r-- | drivers/md/dm-crypt.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-flakey.c | 4 | ||||
-rw-r--r-- | drivers/md/dm-integrity.c | 42 | ||||
-rw-r--r-- | drivers/md/dm-ioctl.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-linear.c | 15 | ||||
-rw-r--r-- | drivers/md/dm-log-writes.c | 44 | ||||
-rw-r--r-- | drivers/md/dm-mpath.c | 15 | ||||
-rw-r--r-- | drivers/md/dm-rq.c | 27 | ||||
-rw-r--r-- | drivers/md/dm-rq.h | 1 | ||||
-rw-r--r-- | drivers/md/dm-stripe.c | 20 | ||||
-rw-r--r-- | drivers/md/dm-switch.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-table.c | 7 | ||||
-rw-r--r-- | drivers/md/dm-thin.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-verity-target.c | 2 | ||||
-rw-r--r-- | drivers/md/dm.c | 19 | ||||
-rw-r--r-- | drivers/nvdimm/pmem.c | 7 | ||||
-rw-r--r-- | fs/dax.c | 4 | ||||
-rw-r--r-- | include/linux/dax.h | 5 | ||||
-rw-r--r-- | include/linux/device-mapper.h | 7 |
23 files changed, 189 insertions, 167 deletions
diff --git a/drivers/dax/super.c b/drivers/dax/super.c index 3600ff786646..557b93703532 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c | |||
@@ -201,8 +201,10 @@ static umode_t dax_visible(struct kobject *kobj, struct attribute *a, int n) | |||
201 | if (!dax_dev) | 201 | if (!dax_dev) |
202 | return 0; | 202 | return 0; |
203 | 203 | ||
204 | if (a == &dev_attr_write_cache.attr && !dax_dev->ops->flush) | 204 | #ifndef CONFIG_ARCH_HAS_PMEM_API |
205 | if (a == &dev_attr_write_cache.attr) | ||
205 | return 0; | 206 | return 0; |
207 | #endif | ||
206 | return a->mode; | 208 | return a->mode; |
207 | } | 209 | } |
208 | 210 | ||
@@ -267,18 +269,23 @@ size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, | |||
267 | } | 269 | } |
268 | EXPORT_SYMBOL_GPL(dax_copy_from_iter); | 270 | EXPORT_SYMBOL_GPL(dax_copy_from_iter); |
269 | 271 | ||
270 | void dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, | 272 | #ifdef CONFIG_ARCH_HAS_PMEM_API |
271 | size_t size) | 273 | void arch_wb_cache_pmem(void *addr, size_t size); |
274 | void dax_flush(struct dax_device *dax_dev, void *addr, size_t size) | ||
272 | { | 275 | { |
273 | if (!dax_alive(dax_dev)) | 276 | if (unlikely(!dax_alive(dax_dev))) |
274 | return; | 277 | return; |
275 | 278 | ||
276 | if (!test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags)) | 279 | if (unlikely(!test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags))) |
277 | return; | 280 | return; |
278 | 281 | ||
279 | if (dax_dev->ops->flush) | 282 | arch_wb_cache_pmem(addr, size); |
280 | dax_dev->ops->flush(dax_dev, pgoff, addr, size); | ||
281 | } | 283 | } |
284 | #else | ||
285 | void dax_flush(struct dax_device *dax_dev, void *addr, size_t size) | ||
286 | { | ||
287 | } | ||
288 | #endif | ||
282 | EXPORT_SYMBOL_GPL(dax_flush); | 289 | EXPORT_SYMBOL_GPL(dax_flush); |
283 | 290 | ||
284 | void dax_write_cache(struct dax_device *dax_dev, bool wc) | 291 | void dax_write_cache(struct dax_device *dax_dev, bool wc) |
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 9601225e0ae9..d216a8f7bc22 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c | |||
@@ -64,6 +64,12 @@ | |||
64 | #define DM_BUFIO_BLOCK_SIZE_GFP_LIMIT (PAGE_SIZE << (MAX_ORDER - 1)) | 64 | #define DM_BUFIO_BLOCK_SIZE_GFP_LIMIT (PAGE_SIZE << (MAX_ORDER - 1)) |
65 | 65 | ||
66 | /* | 66 | /* |
67 | * Align buffer writes to this boundary. | ||
68 | * Tests show that SSDs have the highest IOPS when using 4k writes. | ||
69 | */ | ||
70 | #define DM_BUFIO_WRITE_ALIGN 4096 | ||
71 | |||
72 | /* | ||
67 | * dm_buffer->list_mode | 73 | * dm_buffer->list_mode |
68 | */ | 74 | */ |
69 | #define LIST_CLEAN 0 | 75 | #define LIST_CLEAN 0 |
@@ -149,6 +155,10 @@ struct dm_buffer { | |||
149 | blk_status_t write_error; | 155 | blk_status_t write_error; |
150 | unsigned long state; | 156 | unsigned long state; |
151 | unsigned long last_accessed; | 157 | unsigned long last_accessed; |
158 | unsigned dirty_start; | ||
159 | unsigned dirty_end; | ||
160 | unsigned write_start; | ||
161 | unsigned write_end; | ||
152 | struct dm_bufio_client *c; | 162 | struct dm_bufio_client *c; |
153 | struct list_head write_list; | 163 | struct list_head write_list; |
154 | struct bio bio; | 164 | struct bio bio; |
@@ -560,7 +570,7 @@ static void dmio_complete(unsigned long error, void *context) | |||
560 | } | 570 | } |
561 | 571 | ||
562 | static void use_dmio(struct dm_buffer *b, int rw, sector_t sector, | 572 | static void use_dmio(struct dm_buffer *b, int rw, sector_t sector, |
563 | unsigned n_sectors, bio_end_io_t *end_io) | 573 | unsigned n_sectors, unsigned offset, bio_end_io_t *end_io) |
564 | { | 574 | { |
565 | int r; | 575 | int r; |
566 | struct dm_io_request io_req = { | 576 | struct dm_io_request io_req = { |
@@ -578,10 +588,10 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t sector, | |||
578 | 588 | ||
579 | if (b->data_mode != DATA_MODE_VMALLOC) { | 589 | if (b->data_mode != DATA_MODE_VMALLOC) { |
580 | io_req.mem.type = DM_IO_KMEM; | 590 | io_req.mem.type = DM_IO_KMEM; |
581 | io_req.mem.ptr.addr = b->data; | 591 | io_req.mem.ptr.addr = (char *)b->data + offset; |
582 | } else { | 592 | } else { |
583 | io_req.mem.type = DM_IO_VMA; | 593 | io_req.mem.type = DM_IO_VMA; |
584 | io_req.mem.ptr.vma = b->data; | 594 | io_req.mem.ptr.vma = (char *)b->data + offset; |
585 | } | 595 | } |
586 | 596 | ||
587 | b->bio.bi_end_io = end_io; | 597 | b->bio.bi_end_io = end_io; |
@@ -609,10 +619,10 @@ static void inline_endio(struct bio *bio) | |||
609 | } | 619 | } |
610 | 620 | ||
611 | static void use_inline_bio(struct dm_buffer *b, int rw, sector_t sector, | 621 | static void use_inline_bio(struct dm_buffer *b, int rw, sector_t sector, |
612 | unsigned n_sectors, bio_end_io_t *end_io) | 622 | unsigned n_sectors, unsigned offset, bio_end_io_t *end_io) |
613 | { | 623 | { |
614 | char *ptr; | 624 | char *ptr; |
615 | int len; | 625 | unsigned len; |
616 | 626 | ||
617 | bio_init(&b->bio, b->bio_vec, DM_BUFIO_INLINE_VECS); | 627 | bio_init(&b->bio, b->bio_vec, DM_BUFIO_INLINE_VECS); |
618 | b->bio.bi_iter.bi_sector = sector; | 628 | b->bio.bi_iter.bi_sector = sector; |
@@ -625,29 +635,20 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t sector, | |||
625 | b->bio.bi_private = end_io; | 635 | b->bio.bi_private = end_io; |
626 | bio_set_op_attrs(&b->bio, rw, 0); | 636 | bio_set_op_attrs(&b->bio, rw, 0); |
627 | 637 | ||
628 | /* | 638 | ptr = (char *)b->data + offset; |
629 | * We assume that if len >= PAGE_SIZE ptr is page-aligned. | ||
630 | * If len < PAGE_SIZE the buffer doesn't cross page boundary. | ||
631 | */ | ||
632 | ptr = b->data; | ||
633 | len = n_sectors << SECTOR_SHIFT; | 639 | len = n_sectors << SECTOR_SHIFT; |
634 | 640 | ||
635 | if (len >= PAGE_SIZE) | ||
636 | BUG_ON((unsigned long)ptr & (PAGE_SIZE - 1)); | ||
637 | else | ||
638 | BUG_ON((unsigned long)ptr & (len - 1)); | ||
639 | |||
640 | do { | 641 | do { |
641 | if (!bio_add_page(&b->bio, virt_to_page(ptr), | 642 | unsigned this_step = min((unsigned)(PAGE_SIZE - offset_in_page(ptr)), len); |
642 | len < PAGE_SIZE ? len : PAGE_SIZE, | 643 | if (!bio_add_page(&b->bio, virt_to_page(ptr), this_step, |
643 | offset_in_page(ptr))) { | 644 | offset_in_page(ptr))) { |
644 | BUG_ON(b->c->block_size <= PAGE_SIZE); | 645 | BUG_ON(b->c->block_size <= PAGE_SIZE); |
645 | use_dmio(b, rw, sector, n_sectors, end_io); | 646 | use_dmio(b, rw, sector, n_sectors, offset, end_io); |
646 | return; | 647 | return; |
647 | } | 648 | } |
648 | 649 | ||
649 | len -= PAGE_SIZE; | 650 | len -= this_step; |
650 | ptr += PAGE_SIZE; | 651 | ptr += this_step; |
651 | } while (len > 0); | 652 | } while (len > 0); |
652 | 653 | ||
653 | submit_bio(&b->bio); | 654 | submit_bio(&b->bio); |
@@ -657,18 +658,33 @@ static void submit_io(struct dm_buffer *b, int rw, bio_end_io_t *end_io) | |||
657 | { | 658 | { |
658 | unsigned n_sectors; | 659 | unsigned n_sectors; |
659 | sector_t sector; | 660 | sector_t sector; |
660 | 661 | unsigned offset, end; | |
661 | if (rw == WRITE && b->c->write_callback) | ||
662 | b->c->write_callback(b); | ||
663 | 662 | ||
664 | sector = (b->block << b->c->sectors_per_block_bits) + b->c->start; | 663 | sector = (b->block << b->c->sectors_per_block_bits) + b->c->start; |
665 | n_sectors = 1 << b->c->sectors_per_block_bits; | 664 | |
665 | if (rw != WRITE) { | ||
666 | n_sectors = 1 << b->c->sectors_per_block_bits; | ||
667 | offset = 0; | ||
668 | } else { | ||
669 | if (b->c->write_callback) | ||
670 | b->c->write_callback(b); | ||
671 | offset = b->write_start; | ||
672 | end = b->write_end; | ||
673 | offset &= -DM_BUFIO_WRITE_ALIGN; | ||
674 | end += DM_BUFIO_WRITE_ALIGN - 1; | ||
675 | end &= -DM_BUFIO_WRITE_ALIGN; | ||
676 | if (unlikely(end > b->c->block_size)) | ||
677 | end = b->c->block_size; | ||
678 | |||
679 | sector += offset >> SECTOR_SHIFT; | ||
680 | n_sectors = (end - offset) >> SECTOR_SHIFT; | ||
681 | } | ||
666 | 682 | ||
667 | if (n_sectors <= ((DM_BUFIO_INLINE_VECS * PAGE_SIZE) >> SECTOR_SHIFT) && | 683 | if (n_sectors <= ((DM_BUFIO_INLINE_VECS * PAGE_SIZE) >> SECTOR_SHIFT) && |
668 | b->data_mode != DATA_MODE_VMALLOC) | 684 | b->data_mode != DATA_MODE_VMALLOC) |
669 | use_inline_bio(b, rw, sector, n_sectors, end_io); | 685 | use_inline_bio(b, rw, sector, n_sectors, offset, end_io); |
670 | else | 686 | else |
671 | use_dmio(b, rw, sector, n_sectors, end_io); | 687 | use_dmio(b, rw, sector, n_sectors, offset, end_io); |
672 | } | 688 | } |
673 | 689 | ||
674 | /*---------------------------------------------------------------- | 690 | /*---------------------------------------------------------------- |
@@ -720,6 +736,9 @@ static void __write_dirty_buffer(struct dm_buffer *b, | |||
720 | clear_bit(B_DIRTY, &b->state); | 736 | clear_bit(B_DIRTY, &b->state); |
721 | wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); | 737 | wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); |
722 | 738 | ||
739 | b->write_start = b->dirty_start; | ||
740 | b->write_end = b->dirty_end; | ||
741 | |||
723 | if (!write_list) | 742 | if (!write_list) |
724 | submit_io(b, WRITE, write_endio); | 743 | submit_io(b, WRITE, write_endio); |
725 | else | 744 | else |
@@ -1221,19 +1240,37 @@ void dm_bufio_release(struct dm_buffer *b) | |||
1221 | } | 1240 | } |
1222 | EXPORT_SYMBOL_GPL(dm_bufio_release); | 1241 | EXPORT_SYMBOL_GPL(dm_bufio_release); |
1223 | 1242 | ||
1224 | void dm_bufio_mark_buffer_dirty(struct dm_buffer *b) | 1243 | void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b, |
1244 | unsigned start, unsigned end) | ||
1225 | { | 1245 | { |
1226 | struct dm_bufio_client *c = b->c; | 1246 | struct dm_bufio_client *c = b->c; |
1227 | 1247 | ||
1248 | BUG_ON(start >= end); | ||
1249 | BUG_ON(end > b->c->block_size); | ||
1250 | |||
1228 | dm_bufio_lock(c); | 1251 | dm_bufio_lock(c); |
1229 | 1252 | ||
1230 | BUG_ON(test_bit(B_READING, &b->state)); | 1253 | BUG_ON(test_bit(B_READING, &b->state)); |
1231 | 1254 | ||
1232 | if (!test_and_set_bit(B_DIRTY, &b->state)) | 1255 | if (!test_and_set_bit(B_DIRTY, &b->state)) { |
1256 | b->dirty_start = start; | ||
1257 | b->dirty_end = end; | ||
1233 | __relink_lru(b, LIST_DIRTY); | 1258 | __relink_lru(b, LIST_DIRTY); |
1259 | } else { | ||
1260 | if (start < b->dirty_start) | ||
1261 | b->dirty_start = start; | ||
1262 | if (end > b->dirty_end) | ||
1263 | b->dirty_end = end; | ||
1264 | } | ||
1234 | 1265 | ||
1235 | dm_bufio_unlock(c); | 1266 | dm_bufio_unlock(c); |
1236 | } | 1267 | } |
1268 | EXPORT_SYMBOL_GPL(dm_bufio_mark_partial_buffer_dirty); | ||
1269 | |||
1270 | void dm_bufio_mark_buffer_dirty(struct dm_buffer *b) | ||
1271 | { | ||
1272 | dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size); | ||
1273 | } | ||
1237 | EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty); | 1274 | EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty); |
1238 | 1275 | ||
1239 | void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c) | 1276 | void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c) |
@@ -1398,6 +1435,8 @@ retry: | |||
1398 | wait_on_bit_io(&b->state, B_WRITING, | 1435 | wait_on_bit_io(&b->state, B_WRITING, |
1399 | TASK_UNINTERRUPTIBLE); | 1436 | TASK_UNINTERRUPTIBLE); |
1400 | set_bit(B_DIRTY, &b->state); | 1437 | set_bit(B_DIRTY, &b->state); |
1438 | b->dirty_start = 0; | ||
1439 | b->dirty_end = c->block_size; | ||
1401 | __unlink_buffer(b); | 1440 | __unlink_buffer(b); |
1402 | __link_buffer(b, new_block, LIST_DIRTY); | 1441 | __link_buffer(b, new_block, LIST_DIRTY); |
1403 | } else { | 1442 | } else { |
diff --git a/drivers/md/dm-bufio.h b/drivers/md/dm-bufio.h index b6d8f53ec15b..be732d3f8611 100644 --- a/drivers/md/dm-bufio.h +++ b/drivers/md/dm-bufio.h | |||
@@ -94,6 +94,15 @@ void dm_bufio_release(struct dm_buffer *b); | |||
94 | void dm_bufio_mark_buffer_dirty(struct dm_buffer *b); | 94 | void dm_bufio_mark_buffer_dirty(struct dm_buffer *b); |
95 | 95 | ||
96 | /* | 96 | /* |
97 | * Mark a part of the buffer dirty. | ||
98 | * | ||
99 | * The specified part of the buffer is scheduled to be written. dm-bufio may | ||
100 | * write the specified part of the buffer or it may write a larger superset. | ||
101 | */ | ||
102 | void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b, | ||
103 | unsigned start, unsigned end); | ||
104 | |||
105 | /* | ||
97 | * Initiate writing of dirty buffers, without waiting for completion. | 106 | * Initiate writing of dirty buffers, without waiting for completion. |
98 | */ | 107 | */ |
99 | void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c); | 108 | void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c); |
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index dcac25c2be7a..8785134c9f1f 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c | |||
@@ -2306,7 +2306,7 @@ static void init_features(struct cache_features *cf) | |||
2306 | static int parse_features(struct cache_args *ca, struct dm_arg_set *as, | 2306 | static int parse_features(struct cache_args *ca, struct dm_arg_set *as, |
2307 | char **error) | 2307 | char **error) |
2308 | { | 2308 | { |
2309 | static struct dm_arg _args[] = { | 2309 | static const struct dm_arg _args[] = { |
2310 | {0, 2, "Invalid number of cache feature arguments"}, | 2310 | {0, 2, "Invalid number of cache feature arguments"}, |
2311 | }; | 2311 | }; |
2312 | 2312 | ||
@@ -2348,7 +2348,7 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as, | |||
2348 | static int parse_policy(struct cache_args *ca, struct dm_arg_set *as, | 2348 | static int parse_policy(struct cache_args *ca, struct dm_arg_set *as, |
2349 | char **error) | 2349 | char **error) |
2350 | { | 2350 | { |
2351 | static struct dm_arg _args[] = { | 2351 | static const struct dm_arg _args[] = { |
2352 | {0, 1024, "Invalid number of policy arguments"}, | 2352 | {0, 1024, "Invalid number of policy arguments"}, |
2353 | }; | 2353 | }; |
2354 | 2354 | ||
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 54aef8ed97db..a55ffd4f5933 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -2529,7 +2529,7 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar | |||
2529 | { | 2529 | { |
2530 | struct crypt_config *cc = ti->private; | 2530 | struct crypt_config *cc = ti->private; |
2531 | struct dm_arg_set as; | 2531 | struct dm_arg_set as; |
2532 | static struct dm_arg _args[] = { | 2532 | static const struct dm_arg _args[] = { |
2533 | {0, 6, "Invalid number of feature args"}, | 2533 | {0, 6, "Invalid number of feature args"}, |
2534 | }; | 2534 | }; |
2535 | unsigned int opt_params, val; | 2535 | unsigned int opt_params, val; |
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index 7146c2d9762d..b82cb1ab1eaa 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c | |||
@@ -51,7 +51,7 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc, | |||
51 | unsigned argc; | 51 | unsigned argc; |
52 | const char *arg_name; | 52 | const char *arg_name; |
53 | 53 | ||
54 | static struct dm_arg _args[] = { | 54 | static const struct dm_arg _args[] = { |
55 | {0, 6, "Invalid number of feature args"}, | 55 | {0, 6, "Invalid number of feature args"}, |
56 | {1, UINT_MAX, "Invalid corrupt bio byte"}, | 56 | {1, UINT_MAX, "Invalid corrupt bio byte"}, |
57 | {0, 255, "Invalid corrupt value to write into bio byte (0-255)"}, | 57 | {0, 255, "Invalid corrupt value to write into bio byte (0-255)"}, |
@@ -178,7 +178,7 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc, | |||
178 | */ | 178 | */ |
179 | static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv) | 179 | static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv) |
180 | { | 180 | { |
181 | static struct dm_arg _args[] = { | 181 | static const struct dm_arg _args[] = { |
182 | {0, UINT_MAX, "Invalid up interval"}, | 182 | {0, UINT_MAX, "Invalid up interval"}, |
183 | {0, UINT_MAX, "Invalid down interval"}, | 183 | {0, UINT_MAX, "Invalid down interval"}, |
184 | }; | 184 | }; |
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 27c0f223f8ea..096fe9b66c50 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c | |||
@@ -225,6 +225,8 @@ struct dm_integrity_c { | |||
225 | struct alg_spec internal_hash_alg; | 225 | struct alg_spec internal_hash_alg; |
226 | struct alg_spec journal_crypt_alg; | 226 | struct alg_spec journal_crypt_alg; |
227 | struct alg_spec journal_mac_alg; | 227 | struct alg_spec journal_mac_alg; |
228 | |||
229 | atomic64_t number_of_mismatches; | ||
228 | }; | 230 | }; |
229 | 231 | ||
230 | struct dm_integrity_range { | 232 | struct dm_integrity_range { |
@@ -298,7 +300,7 @@ static void __DEBUG_bytes(__u8 *bytes, size_t len, const char *msg, ...) | |||
298 | /* | 300 | /* |
299 | * DM Integrity profile, protection is performed layer above (dm-crypt) | 301 | * DM Integrity profile, protection is performed layer above (dm-crypt) |
300 | */ | 302 | */ |
301 | static struct blk_integrity_profile dm_integrity_profile = { | 303 | static const struct blk_integrity_profile dm_integrity_profile = { |
302 | .name = "DM-DIF-EXT-TAG", | 304 | .name = "DM-DIF-EXT-TAG", |
303 | .generate_fn = NULL, | 305 | .generate_fn = NULL, |
304 | .verify_fn = NULL, | 306 | .verify_fn = NULL, |
@@ -310,6 +312,8 @@ static void dm_integrity_dtr(struct dm_target *ti); | |||
310 | 312 | ||
311 | static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err) | 313 | static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err) |
312 | { | 314 | { |
315 | if (err == -EILSEQ) | ||
316 | atomic64_inc(&ic->number_of_mismatches); | ||
313 | if (!cmpxchg(&ic->failed, 0, err)) | 317 | if (!cmpxchg(&ic->failed, 0, err)) |
314 | DMERR("Error on %s: %d", msg, err); | 318 | DMERR("Error on %s: %d", msg, err); |
315 | } | 319 | } |
@@ -770,13 +774,13 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi | |||
770 | unsigned i; | 774 | unsigned i; |
771 | 775 | ||
772 | io_comp.ic = ic; | 776 | io_comp.ic = ic; |
773 | io_comp.comp = COMPLETION_INITIALIZER_ONSTACK(io_comp.comp); | 777 | init_completion(&io_comp.comp); |
774 | 778 | ||
775 | if (commit_start + commit_sections <= ic->journal_sections) { | 779 | if (commit_start + commit_sections <= ic->journal_sections) { |
776 | io_comp.in_flight = (atomic_t)ATOMIC_INIT(1); | 780 | io_comp.in_flight = (atomic_t)ATOMIC_INIT(1); |
777 | if (ic->journal_io) { | 781 | if (ic->journal_io) { |
778 | crypt_comp_1.ic = ic; | 782 | crypt_comp_1.ic = ic; |
779 | crypt_comp_1.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_1.comp); | 783 | init_completion(&crypt_comp_1.comp); |
780 | crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); | 784 | crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); |
781 | encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1); | 785 | encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1); |
782 | wait_for_completion_io(&crypt_comp_1.comp); | 786 | wait_for_completion_io(&crypt_comp_1.comp); |
@@ -792,18 +796,18 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi | |||
792 | to_end = ic->journal_sections - commit_start; | 796 | to_end = ic->journal_sections - commit_start; |
793 | if (ic->journal_io) { | 797 | if (ic->journal_io) { |
794 | crypt_comp_1.ic = ic; | 798 | crypt_comp_1.ic = ic; |
795 | crypt_comp_1.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_1.comp); | 799 | init_completion(&crypt_comp_1.comp); |
796 | crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); | 800 | crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); |
797 | encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1); | 801 | encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1); |
798 | if (try_wait_for_completion(&crypt_comp_1.comp)) { | 802 | if (try_wait_for_completion(&crypt_comp_1.comp)) { |
799 | rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp); | 803 | rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp); |
800 | crypt_comp_1.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_1.comp); | 804 | reinit_completion(&crypt_comp_1.comp); |
801 | crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); | 805 | crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); |
802 | encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1); | 806 | encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1); |
803 | wait_for_completion_io(&crypt_comp_1.comp); | 807 | wait_for_completion_io(&crypt_comp_1.comp); |
804 | } else { | 808 | } else { |
805 | crypt_comp_2.ic = ic; | 809 | crypt_comp_2.ic = ic; |
806 | crypt_comp_2.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_2.comp); | 810 | init_completion(&crypt_comp_2.comp); |
807 | crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0); | 811 | crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0); |
808 | encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2); | 812 | encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2); |
809 | wait_for_completion_io(&crypt_comp_1.comp); | 813 | wait_for_completion_io(&crypt_comp_1.comp); |
@@ -1041,7 +1045,7 @@ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, se | |||
1041 | memcpy(tag, dp, to_copy); | 1045 | memcpy(tag, dp, to_copy); |
1042 | } else if (op == TAG_WRITE) { | 1046 | } else if (op == TAG_WRITE) { |
1043 | memcpy(dp, tag, to_copy); | 1047 | memcpy(dp, tag, to_copy); |
1044 | dm_bufio_mark_buffer_dirty(b); | 1048 | dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy); |
1045 | } else { | 1049 | } else { |
1046 | /* e.g.: op == TAG_CMP */ | 1050 | /* e.g.: op == TAG_CMP */ |
1047 | if (unlikely(memcmp(dp, tag, to_copy))) { | 1051 | if (unlikely(memcmp(dp, tag, to_copy))) { |
@@ -1275,6 +1279,7 @@ again: | |||
1275 | DMERR("Checksum failed at sector 0x%llx", | 1279 | DMERR("Checksum failed at sector 0x%llx", |
1276 | (unsigned long long)(sector - ((r + ic->tag_size - 1) / ic->tag_size))); | 1280 | (unsigned long long)(sector - ((r + ic->tag_size - 1) / ic->tag_size))); |
1277 | r = -EILSEQ; | 1281 | r = -EILSEQ; |
1282 | atomic64_inc(&ic->number_of_mismatches); | ||
1278 | } | 1283 | } |
1279 | if (likely(checksums != checksums_onstack)) | 1284 | if (likely(checksums != checksums_onstack)) |
1280 | kfree(checksums); | 1285 | kfree(checksums); |
@@ -1676,7 +1681,7 @@ sleep: | |||
1676 | dio->in_flight = (atomic_t)ATOMIC_INIT(2); | 1681 | dio->in_flight = (atomic_t)ATOMIC_INIT(2); |
1677 | 1682 | ||
1678 | if (need_sync_io) { | 1683 | if (need_sync_io) { |
1679 | read_comp = COMPLETION_INITIALIZER_ONSTACK(read_comp); | 1684 | init_completion(&read_comp); |
1680 | dio->completion = &read_comp; | 1685 | dio->completion = &read_comp; |
1681 | } else | 1686 | } else |
1682 | dio->completion = NULL; | 1687 | dio->completion = NULL; |
@@ -1700,7 +1705,11 @@ sleep: | |||
1700 | 1705 | ||
1701 | if (need_sync_io) { | 1706 | if (need_sync_io) { |
1702 | wait_for_completion_io(&read_comp); | 1707 | wait_for_completion_io(&read_comp); |
1703 | integrity_metadata(&dio->work); | 1708 | if (likely(!bio->bi_status)) |
1709 | integrity_metadata(&dio->work); | ||
1710 | else | ||
1711 | dec_in_flight(dio); | ||
1712 | |||
1704 | } else { | 1713 | } else { |
1705 | INIT_WORK(&dio->work, integrity_metadata); | 1714 | INIT_WORK(&dio->work, integrity_metadata); |
1706 | queue_work(ic->metadata_wq, &dio->work); | 1715 | queue_work(ic->metadata_wq, &dio->work); |
@@ -1834,7 +1843,7 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start, | |||
1834 | 1843 | ||
1835 | comp.ic = ic; | 1844 | comp.ic = ic; |
1836 | comp.in_flight = (atomic_t)ATOMIC_INIT(1); | 1845 | comp.in_flight = (atomic_t)ATOMIC_INIT(1); |
1837 | comp.comp = COMPLETION_INITIALIZER_ONSTACK(comp.comp); | 1846 | init_completion(&comp.comp); |
1838 | 1847 | ||
1839 | i = write_start; | 1848 | i = write_start; |
1840 | for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) { | 1849 | for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) { |
@@ -2061,7 +2070,7 @@ static void replay_journal(struct dm_integrity_c *ic) | |||
2061 | if (ic->journal_io) { | 2070 | if (ic->journal_io) { |
2062 | struct journal_completion crypt_comp; | 2071 | struct journal_completion crypt_comp; |
2063 | crypt_comp.ic = ic; | 2072 | crypt_comp.ic = ic; |
2064 | crypt_comp.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp.comp); | 2073 | init_completion(&crypt_comp.comp); |
2065 | crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0); | 2074 | crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0); |
2066 | encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp); | 2075 | encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp); |
2067 | wait_for_completion(&crypt_comp.comp); | 2076 | wait_for_completion(&crypt_comp.comp); |
@@ -2233,7 +2242,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type, | |||
2233 | 2242 | ||
2234 | switch (type) { | 2243 | switch (type) { |
2235 | case STATUSTYPE_INFO: | 2244 | case STATUSTYPE_INFO: |
2236 | result[0] = '\0'; | 2245 | DMEMIT("%llu", (unsigned long long)atomic64_read(&ic->number_of_mismatches)); |
2237 | break; | 2246 | break; |
2238 | 2247 | ||
2239 | case STATUSTYPE_TABLE: { | 2248 | case STATUSTYPE_TABLE: { |
@@ -2634,7 +2643,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error) | |||
2634 | memset(iv, 0x00, ivsize); | 2643 | memset(iv, 0x00, ivsize); |
2635 | 2644 | ||
2636 | skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, iv); | 2645 | skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, iv); |
2637 | comp.comp = COMPLETION_INITIALIZER_ONSTACK(comp.comp); | 2646 | init_completion(&comp.comp); |
2638 | comp.in_flight = (atomic_t)ATOMIC_INIT(1); | 2647 | comp.in_flight = (atomic_t)ATOMIC_INIT(1); |
2639 | if (do_crypt(true, req, &comp)) | 2648 | if (do_crypt(true, req, &comp)) |
2640 | wait_for_completion(&comp.comp); | 2649 | wait_for_completion(&comp.comp); |
@@ -2691,7 +2700,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error) | |||
2691 | 2700 | ||
2692 | sg_init_one(&sg, crypt_data, crypt_len); | 2701 | sg_init_one(&sg, crypt_data, crypt_len); |
2693 | skcipher_request_set_crypt(req, &sg, &sg, crypt_len, iv); | 2702 | skcipher_request_set_crypt(req, &sg, &sg, crypt_len, iv); |
2694 | comp.comp = COMPLETION_INITIALIZER_ONSTACK(comp.comp); | 2703 | init_completion(&comp.comp); |
2695 | comp.in_flight = (atomic_t)ATOMIC_INIT(1); | 2704 | comp.in_flight = (atomic_t)ATOMIC_INIT(1); |
2696 | if (do_crypt(true, req, &comp)) | 2705 | if (do_crypt(true, req, &comp)) |
2697 | wait_for_completion(&comp.comp); | 2706 | wait_for_completion(&comp.comp); |
@@ -2778,7 +2787,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
2778 | int r; | 2787 | int r; |
2779 | unsigned extra_args; | 2788 | unsigned extra_args; |
2780 | struct dm_arg_set as; | 2789 | struct dm_arg_set as; |
2781 | static struct dm_arg _args[] = { | 2790 | static const struct dm_arg _args[] = { |
2782 | {0, 9, "Invalid number of feature args"}, | 2791 | {0, 9, "Invalid number of feature args"}, |
2783 | }; | 2792 | }; |
2784 | unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec; | 2793 | unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec; |
@@ -2806,6 +2815,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
2806 | bio_list_init(&ic->flush_bio_list); | 2815 | bio_list_init(&ic->flush_bio_list); |
2807 | init_waitqueue_head(&ic->copy_to_journal_wait); | 2816 | init_waitqueue_head(&ic->copy_to_journal_wait); |
2808 | init_completion(&ic->crypto_backoff); | 2817 | init_completion(&ic->crypto_backoff); |
2818 | atomic64_set(&ic->number_of_mismatches, 0); | ||
2809 | 2819 | ||
2810 | r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev); | 2820 | r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev); |
2811 | if (r) { | 2821 | if (r) { |
@@ -3202,7 +3212,7 @@ static void dm_integrity_dtr(struct dm_target *ti) | |||
3202 | 3212 | ||
3203 | static struct target_type integrity_target = { | 3213 | static struct target_type integrity_target = { |
3204 | .name = "integrity", | 3214 | .name = "integrity", |
3205 | .version = {1, 0, 0}, | 3215 | .version = {1, 1, 0}, |
3206 | .module = THIS_MODULE, | 3216 | .module = THIS_MODULE, |
3207 | .features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY, | 3217 | .features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY, |
3208 | .ctr = dm_integrity_ctr, | 3218 | .ctr = dm_integrity_ctr, |
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index e06f0ef7d2ec..8756a6850431 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c | |||
@@ -1629,7 +1629,7 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para | |||
1629 | *---------------------------------------------------------------*/ | 1629 | *---------------------------------------------------------------*/ |
1630 | static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags) | 1630 | static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags) |
1631 | { | 1631 | { |
1632 | static struct { | 1632 | static const struct { |
1633 | int cmd; | 1633 | int cmd; |
1634 | int flags; | 1634 | int flags; |
1635 | ioctl_fn fn; | 1635 | ioctl_fn fn; |
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index 405eca206d67..d5f8eff7c11d 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c | |||
@@ -184,20 +184,6 @@ static size_t linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff, | |||
184 | return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i); | 184 | return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i); |
185 | } | 185 | } |
186 | 186 | ||
187 | static void linear_dax_flush(struct dm_target *ti, pgoff_t pgoff, void *addr, | ||
188 | size_t size) | ||
189 | { | ||
190 | struct linear_c *lc = ti->private; | ||
191 | struct block_device *bdev = lc->dev->bdev; | ||
192 | struct dax_device *dax_dev = lc->dev->dax_dev; | ||
193 | sector_t dev_sector, sector = pgoff * PAGE_SECTORS; | ||
194 | |||
195 | dev_sector = linear_map_sector(ti, sector); | ||
196 | if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(size, PAGE_SIZE), &pgoff)) | ||
197 | return; | ||
198 | dax_flush(dax_dev, pgoff, addr, size); | ||
199 | } | ||
200 | |||
201 | static struct target_type linear_target = { | 187 | static struct target_type linear_target = { |
202 | .name = "linear", | 188 | .name = "linear", |
203 | .version = {1, 4, 0}, | 189 | .version = {1, 4, 0}, |
@@ -212,7 +198,6 @@ static struct target_type linear_target = { | |||
212 | .iterate_devices = linear_iterate_devices, | 198 | .iterate_devices = linear_iterate_devices, |
213 | .direct_access = linear_dax_direct_access, | 199 | .direct_access = linear_dax_direct_access, |
214 | .dax_copy_from_iter = linear_dax_copy_from_iter, | 200 | .dax_copy_from_iter = linear_dax_copy_from_iter, |
215 | .dax_flush = linear_dax_flush, | ||
216 | }; | 201 | }; |
217 | 202 | ||
218 | int __init dm_linear_init(void) | 203 | int __init dm_linear_init(void) |
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c index 534a254eb977..8b80a9ce9ea9 100644 --- a/drivers/md/dm-log-writes.c +++ b/drivers/md/dm-log-writes.c | |||
@@ -100,6 +100,7 @@ struct log_writes_c { | |||
100 | struct dm_dev *logdev; | 100 | struct dm_dev *logdev; |
101 | u64 logged_entries; | 101 | u64 logged_entries; |
102 | u32 sectorsize; | 102 | u32 sectorsize; |
103 | u32 sectorshift; | ||
103 | atomic_t io_blocks; | 104 | atomic_t io_blocks; |
104 | atomic_t pending_blocks; | 105 | atomic_t pending_blocks; |
105 | sector_t next_sector; | 106 | sector_t next_sector; |
@@ -128,6 +129,18 @@ struct per_bio_data { | |||
128 | struct pending_block *block; | 129 | struct pending_block *block; |
129 | }; | 130 | }; |
130 | 131 | ||
132 | static inline sector_t bio_to_dev_sectors(struct log_writes_c *lc, | ||
133 | sector_t sectors) | ||
134 | { | ||
135 | return sectors >> (lc->sectorshift - SECTOR_SHIFT); | ||
136 | } | ||
137 | |||
138 | static inline sector_t dev_to_bio_sectors(struct log_writes_c *lc, | ||
139 | sector_t sectors) | ||
140 | { | ||
141 | return sectors << (lc->sectorshift - SECTOR_SHIFT); | ||
142 | } | ||
143 | |||
131 | static void put_pending_block(struct log_writes_c *lc) | 144 | static void put_pending_block(struct log_writes_c *lc) |
132 | { | 145 | { |
133 | if (atomic_dec_and_test(&lc->pending_blocks)) { | 146 | if (atomic_dec_and_test(&lc->pending_blocks)) { |
@@ -253,7 +266,7 @@ static int log_one_block(struct log_writes_c *lc, | |||
253 | 266 | ||
254 | if (!block->vec_cnt) | 267 | if (!block->vec_cnt) |
255 | goto out; | 268 | goto out; |
256 | sector++; | 269 | sector += dev_to_bio_sectors(lc, 1); |
257 | 270 | ||
258 | atomic_inc(&lc->io_blocks); | 271 | atomic_inc(&lc->io_blocks); |
259 | bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt, BIO_MAX_PAGES)); | 272 | bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt, BIO_MAX_PAGES)); |
@@ -354,10 +367,9 @@ static int log_writes_kthread(void *arg) | |||
354 | goto next; | 367 | goto next; |
355 | 368 | ||
356 | sector = lc->next_sector; | 369 | sector = lc->next_sector; |
357 | if (block->flags & LOG_DISCARD_FLAG) | 370 | if (!(block->flags & LOG_DISCARD_FLAG)) |
358 | lc->next_sector++; | 371 | lc->next_sector += dev_to_bio_sectors(lc, block->nr_sectors); |
359 | else | 372 | lc->next_sector += dev_to_bio_sectors(lc, 1); |
360 | lc->next_sector += block->nr_sectors + 1; | ||
361 | 373 | ||
362 | /* | 374 | /* |
363 | * Apparently the size of the device may not be known | 375 | * Apparently the size of the device may not be known |
@@ -399,7 +411,7 @@ next: | |||
399 | if (!try_to_freeze()) { | 411 | if (!try_to_freeze()) { |
400 | set_current_state(TASK_INTERRUPTIBLE); | 412 | set_current_state(TASK_INTERRUPTIBLE); |
401 | if (!kthread_should_stop() && | 413 | if (!kthread_should_stop() && |
402 | !atomic_read(&lc->pending_blocks)) | 414 | list_empty(&lc->logging_blocks)) |
403 | schedule(); | 415 | schedule(); |
404 | __set_current_state(TASK_RUNNING); | 416 | __set_current_state(TASK_RUNNING); |
405 | } | 417 | } |
@@ -435,7 +447,6 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
435 | INIT_LIST_HEAD(&lc->unflushed_blocks); | 447 | INIT_LIST_HEAD(&lc->unflushed_blocks); |
436 | INIT_LIST_HEAD(&lc->logging_blocks); | 448 | INIT_LIST_HEAD(&lc->logging_blocks); |
437 | init_waitqueue_head(&lc->wait); | 449 | init_waitqueue_head(&lc->wait); |
438 | lc->sectorsize = 1 << SECTOR_SHIFT; | ||
439 | atomic_set(&lc->io_blocks, 0); | 450 | atomic_set(&lc->io_blocks, 0); |
440 | atomic_set(&lc->pending_blocks, 0); | 451 | atomic_set(&lc->pending_blocks, 0); |
441 | 452 | ||
@@ -455,6 +466,8 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
455 | goto bad; | 466 | goto bad; |
456 | } | 467 | } |
457 | 468 | ||
469 | lc->sectorsize = bdev_logical_block_size(lc->dev->bdev); | ||
470 | lc->sectorshift = ilog2(lc->sectorsize); | ||
458 | lc->log_kthread = kthread_run(log_writes_kthread, lc, "log-write"); | 471 | lc->log_kthread = kthread_run(log_writes_kthread, lc, "log-write"); |
459 | if (IS_ERR(lc->log_kthread)) { | 472 | if (IS_ERR(lc->log_kthread)) { |
460 | ret = PTR_ERR(lc->log_kthread); | 473 | ret = PTR_ERR(lc->log_kthread); |
@@ -464,8 +477,12 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
464 | goto bad; | 477 | goto bad; |
465 | } | 478 | } |
466 | 479 | ||
467 | /* We put the super at sector 0, start logging at sector 1 */ | 480 | /* |
468 | lc->next_sector = 1; | 481 | * next_sector is in 512b sectors to correspond to what bi_sector expects. |
482 | * The super starts at sector 0, and the next_sector is the next logical | ||
483 | * one based on the sectorsize of the device. | ||
484 | */ | ||
485 | lc->next_sector = lc->sectorsize >> SECTOR_SHIFT; | ||
469 | lc->logging_enabled = true; | 486 | lc->logging_enabled = true; |
470 | lc->end_sector = logdev_last_sector(lc); | 487 | lc->end_sector = logdev_last_sector(lc); |
471 | lc->device_supports_discard = true; | 488 | lc->device_supports_discard = true; |
@@ -599,8 +616,8 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio) | |||
599 | if (discard_bio) | 616 | if (discard_bio) |
600 | block->flags |= LOG_DISCARD_FLAG; | 617 | block->flags |= LOG_DISCARD_FLAG; |
601 | 618 | ||
602 | block->sector = bio->bi_iter.bi_sector; | 619 | block->sector = bio_to_dev_sectors(lc, bio->bi_iter.bi_sector); |
603 | block->nr_sectors = bio_sectors(bio); | 620 | block->nr_sectors = bio_to_dev_sectors(lc, bio_sectors(bio)); |
604 | 621 | ||
605 | /* We don't need the data, just submit */ | 622 | /* We don't need the data, just submit */ |
606 | if (discard_bio) { | 623 | if (discard_bio) { |
@@ -767,9 +784,12 @@ static void log_writes_io_hints(struct dm_target *ti, struct queue_limits *limit | |||
767 | 784 | ||
768 | if (!q || !blk_queue_discard(q)) { | 785 | if (!q || !blk_queue_discard(q)) { |
769 | lc->device_supports_discard = false; | 786 | lc->device_supports_discard = false; |
770 | limits->discard_granularity = 1 << SECTOR_SHIFT; | 787 | limits->discard_granularity = lc->sectorsize; |
771 | limits->max_discard_sectors = (UINT_MAX >> SECTOR_SHIFT); | 788 | limits->max_discard_sectors = (UINT_MAX >> SECTOR_SHIFT); |
772 | } | 789 | } |
790 | limits->logical_block_size = bdev_logical_block_size(lc->dev->bdev); | ||
791 | limits->physical_block_size = bdev_physical_block_size(lc->dev->bdev); | ||
792 | limits->io_min = limits->physical_block_size; | ||
773 | } | 793 | } |
774 | 794 | ||
775 | static struct target_type log_writes_target = { | 795 | static struct target_type log_writes_target = { |
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 96aedaac2c64..11f273d2f018 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -632,6 +632,10 @@ static void process_queued_bios(struct work_struct *work) | |||
632 | case DM_MAPIO_REMAPPED: | 632 | case DM_MAPIO_REMAPPED: |
633 | generic_make_request(bio); | 633 | generic_make_request(bio); |
634 | break; | 634 | break; |
635 | case 0: | ||
636 | break; | ||
637 | default: | ||
638 | WARN_ONCE(true, "__multipath_map_bio() returned %d\n", r); | ||
635 | } | 639 | } |
636 | } | 640 | } |
637 | blk_finish_plug(&plug); | 641 | blk_finish_plug(&plug); |
@@ -698,7 +702,7 @@ static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg, | |||
698 | struct path_selector_type *pst; | 702 | struct path_selector_type *pst; |
699 | unsigned ps_argc; | 703 | unsigned ps_argc; |
700 | 704 | ||
701 | static struct dm_arg _args[] = { | 705 | static const struct dm_arg _args[] = { |
702 | {0, 1024, "invalid number of path selector args"}, | 706 | {0, 1024, "invalid number of path selector args"}, |
703 | }; | 707 | }; |
704 | 708 | ||
@@ -822,7 +826,7 @@ retain: | |||
822 | static struct priority_group *parse_priority_group(struct dm_arg_set *as, | 826 | static struct priority_group *parse_priority_group(struct dm_arg_set *as, |
823 | struct multipath *m) | 827 | struct multipath *m) |
824 | { | 828 | { |
825 | static struct dm_arg _args[] = { | 829 | static const struct dm_arg _args[] = { |
826 | {1, 1024, "invalid number of paths"}, | 830 | {1, 1024, "invalid number of paths"}, |
827 | {0, 1024, "invalid number of selector args"} | 831 | {0, 1024, "invalid number of selector args"} |
828 | }; | 832 | }; |
@@ -898,7 +902,7 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m) | |||
898 | int ret; | 902 | int ret; |
899 | struct dm_target *ti = m->ti; | 903 | struct dm_target *ti = m->ti; |
900 | 904 | ||
901 | static struct dm_arg _args[] = { | 905 | static const struct dm_arg _args[] = { |
902 | {0, 1024, "invalid number of hardware handler args"}, | 906 | {0, 1024, "invalid number of hardware handler args"}, |
903 | }; | 907 | }; |
904 | 908 | ||
@@ -950,7 +954,7 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m) | |||
950 | struct dm_target *ti = m->ti; | 954 | struct dm_target *ti = m->ti; |
951 | const char *arg_name; | 955 | const char *arg_name; |
952 | 956 | ||
953 | static struct dm_arg _args[] = { | 957 | static const struct dm_arg _args[] = { |
954 | {0, 8, "invalid number of feature args"}, | 958 | {0, 8, "invalid number of feature args"}, |
955 | {1, 50, "pg_init_retries must be between 1 and 50"}, | 959 | {1, 50, "pg_init_retries must be between 1 and 50"}, |
956 | {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"}, | 960 | {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"}, |
@@ -1019,7 +1023,7 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m) | |||
1019 | static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv) | 1023 | static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv) |
1020 | { | 1024 | { |
1021 | /* target arguments */ | 1025 | /* target arguments */ |
1022 | static struct dm_arg _args[] = { | 1026 | static const struct dm_arg _args[] = { |
1023 | {0, 1024, "invalid number of priority groups"}, | 1027 | {0, 1024, "invalid number of priority groups"}, |
1024 | {0, 1024, "invalid initial priority group number"}, | 1028 | {0, 1024, "invalid initial priority group number"}, |
1025 | }; | 1029 | }; |
@@ -1379,6 +1383,7 @@ static void pg_init_done(void *data, int errors) | |||
1379 | case SCSI_DH_RETRY: | 1383 | case SCSI_DH_RETRY: |
1380 | /* Wait before retrying. */ | 1384 | /* Wait before retrying. */ |
1381 | delay_retry = 1; | 1385 | delay_retry = 1; |
1386 | /* fall through */ | ||
1382 | case SCSI_DH_IMM_RETRY: | 1387 | case SCSI_DH_IMM_RETRY: |
1383 | case SCSI_DH_RES_TEMP_UNAVAIL: | 1388 | case SCSI_DH_RES_TEMP_UNAVAIL: |
1384 | if (pg_init_limit_reached(m, pgpath)) | 1389 | if (pg_init_limit_reached(m, pgpath)) |
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index c6ebc5b1e00e..eadfcfd106ff 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c | |||
@@ -117,9 +117,9 @@ static void end_clone_bio(struct bio *clone) | |||
117 | struct dm_rq_clone_bio_info *info = | 117 | struct dm_rq_clone_bio_info *info = |
118 | container_of(clone, struct dm_rq_clone_bio_info, clone); | 118 | container_of(clone, struct dm_rq_clone_bio_info, clone); |
119 | struct dm_rq_target_io *tio = info->tio; | 119 | struct dm_rq_target_io *tio = info->tio; |
120 | struct bio *bio = info->orig; | ||
121 | unsigned int nr_bytes = info->orig->bi_iter.bi_size; | 120 | unsigned int nr_bytes = info->orig->bi_iter.bi_size; |
122 | blk_status_t error = clone->bi_status; | 121 | blk_status_t error = clone->bi_status; |
122 | bool is_last = !clone->bi_next; | ||
123 | 123 | ||
124 | bio_put(clone); | 124 | bio_put(clone); |
125 | 125 | ||
@@ -137,28 +137,23 @@ static void end_clone_bio(struct bio *clone) | |||
137 | * when the request is completed. | 137 | * when the request is completed. |
138 | */ | 138 | */ |
139 | tio->error = error; | 139 | tio->error = error; |
140 | return; | 140 | goto exit; |
141 | } | 141 | } |
142 | 142 | ||
143 | /* | 143 | /* |
144 | * I/O for the bio successfully completed. | 144 | * I/O for the bio successfully completed. |
145 | * Notice the data completion to the upper layer. | 145 | * Notice the data completion to the upper layer. |
146 | */ | 146 | */ |
147 | 147 | tio->completed += nr_bytes; | |
148 | /* | ||
149 | * bios are processed from the head of the list. | ||
150 | * So the completing bio should always be rq->bio. | ||
151 | * If it's not, something wrong is happening. | ||
152 | */ | ||
153 | if (tio->orig->bio != bio) | ||
154 | DMERR("bio completion is going in the middle of the request"); | ||
155 | 148 | ||
156 | /* | 149 | /* |
157 | * Update the original request. | 150 | * Update the original request. |
158 | * Do not use blk_end_request() here, because it may complete | 151 | * Do not use blk_end_request() here, because it may complete |
159 | * the original request before the clone, and break the ordering. | 152 | * the original request before the clone, and break the ordering. |
160 | */ | 153 | */ |
161 | blk_update_request(tio->orig, BLK_STS_OK, nr_bytes); | 154 | if (is_last) |
155 | exit: | ||
156 | blk_update_request(tio->orig, BLK_STS_OK, tio->completed); | ||
162 | } | 157 | } |
163 | 158 | ||
164 | static struct dm_rq_target_io *tio_from_request(struct request *rq) | 159 | static struct dm_rq_target_io *tio_from_request(struct request *rq) |
@@ -237,14 +232,14 @@ static void dm_end_request(struct request *clone, blk_status_t error) | |||
237 | /* | 232 | /* |
238 | * Requeue the original request of a clone. | 233 | * Requeue the original request of a clone. |
239 | */ | 234 | */ |
240 | static void dm_old_requeue_request(struct request *rq) | 235 | static void dm_old_requeue_request(struct request *rq, unsigned long delay_ms) |
241 | { | 236 | { |
242 | struct request_queue *q = rq->q; | 237 | struct request_queue *q = rq->q; |
243 | unsigned long flags; | 238 | unsigned long flags; |
244 | 239 | ||
245 | spin_lock_irqsave(q->queue_lock, flags); | 240 | spin_lock_irqsave(q->queue_lock, flags); |
246 | blk_requeue_request(q, rq); | 241 | blk_requeue_request(q, rq); |
247 | blk_run_queue_async(q); | 242 | blk_delay_queue(q, delay_ms); |
248 | spin_unlock_irqrestore(q->queue_lock, flags); | 243 | spin_unlock_irqrestore(q->queue_lock, flags); |
249 | } | 244 | } |
250 | 245 | ||
@@ -270,6 +265,7 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_ | |||
270 | struct mapped_device *md = tio->md; | 265 | struct mapped_device *md = tio->md; |
271 | struct request *rq = tio->orig; | 266 | struct request *rq = tio->orig; |
272 | int rw = rq_data_dir(rq); | 267 | int rw = rq_data_dir(rq); |
268 | unsigned long delay_ms = delay_requeue ? 100 : 0; | ||
273 | 269 | ||
274 | rq_end_stats(md, rq); | 270 | rq_end_stats(md, rq); |
275 | if (tio->clone) { | 271 | if (tio->clone) { |
@@ -278,9 +274,9 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_ | |||
278 | } | 274 | } |
279 | 275 | ||
280 | if (!rq->q->mq_ops) | 276 | if (!rq->q->mq_ops) |
281 | dm_old_requeue_request(rq); | 277 | dm_old_requeue_request(rq, delay_ms); |
282 | else | 278 | else |
283 | dm_mq_delay_requeue_request(rq, delay_requeue ? 100/*ms*/ : 0); | 279 | dm_mq_delay_requeue_request(rq, delay_ms); |
284 | 280 | ||
285 | rq_completed(md, rw, false); | 281 | rq_completed(md, rw, false); |
286 | } | 282 | } |
@@ -455,6 +451,7 @@ static void init_tio(struct dm_rq_target_io *tio, struct request *rq, | |||
455 | tio->clone = NULL; | 451 | tio->clone = NULL; |
456 | tio->orig = rq; | 452 | tio->orig = rq; |
457 | tio->error = 0; | 453 | tio->error = 0; |
454 | tio->completed = 0; | ||
458 | /* | 455 | /* |
459 | * Avoid initializing info for blk-mq; it passes | 456 | * Avoid initializing info for blk-mq; it passes |
460 | * target-specific data through info.ptr | 457 | * target-specific data through info.ptr |
diff --git a/drivers/md/dm-rq.h b/drivers/md/dm-rq.h index 9813922e4fe5..f43c45460aac 100644 --- a/drivers/md/dm-rq.h +++ b/drivers/md/dm-rq.h | |||
@@ -29,6 +29,7 @@ struct dm_rq_target_io { | |||
29 | struct dm_stats_aux stats_aux; | 29 | struct dm_stats_aux stats_aux; |
30 | unsigned long duration_jiffies; | 30 | unsigned long duration_jiffies; |
31 | unsigned n_sectors; | 31 | unsigned n_sectors; |
32 | unsigned completed; | ||
32 | }; | 33 | }; |
33 | 34 | ||
34 | /* | 35 | /* |
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index ab50d7c4377f..b5e892149c54 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c | |||
@@ -351,25 +351,6 @@ static size_t stripe_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff, | |||
351 | return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i); | 351 | return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i); |
352 | } | 352 | } |
353 | 353 | ||
354 | static void stripe_dax_flush(struct dm_target *ti, pgoff_t pgoff, void *addr, | ||
355 | size_t size) | ||
356 | { | ||
357 | sector_t dev_sector, sector = pgoff * PAGE_SECTORS; | ||
358 | struct stripe_c *sc = ti->private; | ||
359 | struct dax_device *dax_dev; | ||
360 | struct block_device *bdev; | ||
361 | uint32_t stripe; | ||
362 | |||
363 | stripe_map_sector(sc, sector, &stripe, &dev_sector); | ||
364 | dev_sector += sc->stripe[stripe].physical_start; | ||
365 | dax_dev = sc->stripe[stripe].dev->dax_dev; | ||
366 | bdev = sc->stripe[stripe].dev->bdev; | ||
367 | |||
368 | if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(size, PAGE_SIZE), &pgoff)) | ||
369 | return; | ||
370 | dax_flush(dax_dev, pgoff, addr, size); | ||
371 | } | ||
372 | |||
373 | /* | 354 | /* |
374 | * Stripe status: | 355 | * Stripe status: |
375 | * | 356 | * |
@@ -489,7 +470,6 @@ static struct target_type stripe_target = { | |||
489 | .io_hints = stripe_io_hints, | 470 | .io_hints = stripe_io_hints, |
490 | .direct_access = stripe_dax_direct_access, | 471 | .direct_access = stripe_dax_direct_access, |
491 | .dax_copy_from_iter = stripe_dax_copy_from_iter, | 472 | .dax_copy_from_iter = stripe_dax_copy_from_iter, |
492 | .dax_flush = stripe_dax_flush, | ||
493 | }; | 473 | }; |
494 | 474 | ||
495 | int __init dm_stripe_init(void) | 475 | int __init dm_stripe_init(void) |
diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c index 2dcea4c56f37..4c8de1ff78ca 100644 --- a/drivers/md/dm-switch.c +++ b/drivers/md/dm-switch.c | |||
@@ -251,7 +251,7 @@ static void switch_dtr(struct dm_target *ti) | |||
251 | */ | 251 | */ |
252 | static int switch_ctr(struct dm_target *ti, unsigned argc, char **argv) | 252 | static int switch_ctr(struct dm_target *ti, unsigned argc, char **argv) |
253 | { | 253 | { |
254 | static struct dm_arg _args[] = { | 254 | static const struct dm_arg _args[] = { |
255 | {1, (KMALLOC_MAX_SIZE - sizeof(struct switch_ctx)) / sizeof(struct switch_path), "Invalid number of paths"}, | 255 | {1, (KMALLOC_MAX_SIZE - sizeof(struct switch_ctx)) / sizeof(struct switch_path), "Invalid number of paths"}, |
256 | {1, UINT_MAX, "Invalid region size"}, | 256 | {1, UINT_MAX, "Invalid region size"}, |
257 | {0, 0, "Invalid number of optional args"}, | 257 | {0, 0, "Invalid number of optional args"}, |
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 28a4071cdf85..ef7b8f201f73 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -806,7 +806,8 @@ int dm_table_add_target(struct dm_table *t, const char *type, | |||
806 | /* | 806 | /* |
807 | * Target argument parsing helpers. | 807 | * Target argument parsing helpers. |
808 | */ | 808 | */ |
809 | static int validate_next_arg(struct dm_arg *arg, struct dm_arg_set *arg_set, | 809 | static int validate_next_arg(const struct dm_arg *arg, |
810 | struct dm_arg_set *arg_set, | ||
810 | unsigned *value, char **error, unsigned grouped) | 811 | unsigned *value, char **error, unsigned grouped) |
811 | { | 812 | { |
812 | const char *arg_str = dm_shift_arg(arg_set); | 813 | const char *arg_str = dm_shift_arg(arg_set); |
@@ -824,14 +825,14 @@ static int validate_next_arg(struct dm_arg *arg, struct dm_arg_set *arg_set, | |||
824 | return 0; | 825 | return 0; |
825 | } | 826 | } |
826 | 827 | ||
827 | int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set, | 828 | int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set, |
828 | unsigned *value, char **error) | 829 | unsigned *value, char **error) |
829 | { | 830 | { |
830 | return validate_next_arg(arg, arg_set, value, error, 0); | 831 | return validate_next_arg(arg, arg_set, value, error, 0); |
831 | } | 832 | } |
832 | EXPORT_SYMBOL(dm_read_arg); | 833 | EXPORT_SYMBOL(dm_read_arg); |
833 | 834 | ||
834 | int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set, | 835 | int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set, |
835 | unsigned *value, char **error) | 836 | unsigned *value, char **error) |
836 | { | 837 | { |
837 | return validate_next_arg(arg, arg_set, value, error, 1); | 838 | return validate_next_arg(arg, arg_set, value, error, 1); |
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 69d88aee3055..1e25705209c2 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -3041,7 +3041,7 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf, | |||
3041 | unsigned argc; | 3041 | unsigned argc; |
3042 | const char *arg_name; | 3042 | const char *arg_name; |
3043 | 3043 | ||
3044 | static struct dm_arg _args[] = { | 3044 | static const struct dm_arg _args[] = { |
3045 | {0, 4, "Invalid number of pool feature arguments"}, | 3045 | {0, 4, "Invalid number of pool feature arguments"}, |
3046 | }; | 3046 | }; |
3047 | 3047 | ||
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index 1c5b6185c79d..bda3caca23ca 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c | |||
@@ -839,7 +839,7 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v) | |||
839 | struct dm_target *ti = v->ti; | 839 | struct dm_target *ti = v->ti; |
840 | const char *arg_name; | 840 | const char *arg_name; |
841 | 841 | ||
842 | static struct dm_arg _args[] = { | 842 | static const struct dm_arg _args[] = { |
843 | {0, DM_VERITY_OPTS_MAX, "Invalid number of feature args"}, | 843 | {0, DM_VERITY_OPTS_MAX, "Invalid number of feature args"}, |
844 | }; | 844 | }; |
845 | 845 | ||
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 04ae795e8a5f..6e54145969c5 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -987,24 +987,6 @@ static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, | |||
987 | return ret; | 987 | return ret; |
988 | } | 988 | } |
989 | 989 | ||
990 | static void dm_dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, | ||
991 | size_t size) | ||
992 | { | ||
993 | struct mapped_device *md = dax_get_private(dax_dev); | ||
994 | sector_t sector = pgoff * PAGE_SECTORS; | ||
995 | struct dm_target *ti; | ||
996 | int srcu_idx; | ||
997 | |||
998 | ti = dm_dax_get_live_target(md, sector, &srcu_idx); | ||
999 | |||
1000 | if (!ti) | ||
1001 | goto out; | ||
1002 | if (ti->type->dax_flush) | ||
1003 | ti->type->dax_flush(ti, pgoff, addr, size); | ||
1004 | out: | ||
1005 | dm_put_live_table(md, srcu_idx); | ||
1006 | } | ||
1007 | |||
1008 | /* | 990 | /* |
1009 | * A target may call dm_accept_partial_bio only from the map routine. It is | 991 | * A target may call dm_accept_partial_bio only from the map routine. It is |
1010 | * allowed for all bio types except REQ_PREFLUSH. | 992 | * allowed for all bio types except REQ_PREFLUSH. |
@@ -2992,7 +2974,6 @@ static const struct block_device_operations dm_blk_dops = { | |||
2992 | static const struct dax_operations dm_dax_ops = { | 2974 | static const struct dax_operations dm_dax_ops = { |
2993 | .direct_access = dm_dax_direct_access, | 2975 | .direct_access = dm_dax_direct_access, |
2994 | .copy_from_iter = dm_dax_copy_from_iter, | 2976 | .copy_from_iter = dm_dax_copy_from_iter, |
2995 | .flush = dm_dax_flush, | ||
2996 | }; | 2977 | }; |
2997 | 2978 | ||
2998 | /* | 2979 | /* |
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index e9aa453da50c..39dfd7affa31 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c | |||
@@ -262,16 +262,9 @@ static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, | |||
262 | return copy_from_iter_flushcache(addr, bytes, i); | 262 | return copy_from_iter_flushcache(addr, bytes, i); |
263 | } | 263 | } |
264 | 264 | ||
265 | static void pmem_dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, | ||
266 | void *addr, size_t size) | ||
267 | { | ||
268 | arch_wb_cache_pmem(addr, size); | ||
269 | } | ||
270 | |||
271 | static const struct dax_operations pmem_dax_ops = { | 265 | static const struct dax_operations pmem_dax_ops = { |
272 | .direct_access = pmem_dax_direct_access, | 266 | .direct_access = pmem_dax_direct_access, |
273 | .copy_from_iter = pmem_copy_from_iter, | 267 | .copy_from_iter = pmem_copy_from_iter, |
274 | .flush = pmem_dax_flush, | ||
275 | }; | 268 | }; |
276 | 269 | ||
277 | static const struct attribute_group *pmem_attribute_groups[] = { | 270 | static const struct attribute_group *pmem_attribute_groups[] = { |
@@ -734,7 +734,7 @@ static int dax_writeback_one(struct block_device *bdev, | |||
734 | } | 734 | } |
735 | 735 | ||
736 | dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(pfn)); | 736 | dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(pfn)); |
737 | dax_flush(dax_dev, pgoff, kaddr, size); | 737 | dax_flush(dax_dev, kaddr, size); |
738 | /* | 738 | /* |
739 | * After we have flushed the cache, we can clear the dirty tag. There | 739 | * After we have flushed the cache, we can clear the dirty tag. There |
740 | * cannot be new dirty data in the pfn after the flush has completed as | 740 | * cannot be new dirty data in the pfn after the flush has completed as |
@@ -929,7 +929,7 @@ int __dax_zero_page_range(struct block_device *bdev, | |||
929 | return rc; | 929 | return rc; |
930 | } | 930 | } |
931 | memset(kaddr + offset, 0, size); | 931 | memset(kaddr + offset, 0, size); |
932 | dax_flush(dax_dev, pgoff, kaddr + offset, size); | 932 | dax_flush(dax_dev, kaddr + offset, size); |
933 | dax_read_unlock(id); | 933 | dax_read_unlock(id); |
934 | } | 934 | } |
935 | return 0; | 935 | return 0; |
diff --git a/include/linux/dax.h b/include/linux/dax.h index 46cad1d0f129..122197124b9d 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h | |||
@@ -19,8 +19,6 @@ struct dax_operations { | |||
19 | /* copy_from_iter: required operation for fs-dax direct-i/o */ | 19 | /* copy_from_iter: required operation for fs-dax direct-i/o */ |
20 | size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t, | 20 | size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t, |
21 | struct iov_iter *); | 21 | struct iov_iter *); |
22 | /* flush: optional driver-specific cache management after writes */ | ||
23 | void (*flush)(struct dax_device *, pgoff_t, void *, size_t); | ||
24 | }; | 22 | }; |
25 | 23 | ||
26 | extern struct attribute_group dax_attribute_group; | 24 | extern struct attribute_group dax_attribute_group; |
@@ -90,8 +88,7 @@ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, | |||
90 | void **kaddr, pfn_t *pfn); | 88 | void **kaddr, pfn_t *pfn); |
91 | size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, | 89 | size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, |
92 | size_t bytes, struct iov_iter *i); | 90 | size_t bytes, struct iov_iter *i); |
93 | void dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, | 91 | void dax_flush(struct dax_device *dax_dev, void *addr, size_t size); |
94 | size_t size); | ||
95 | void dax_write_cache(struct dax_device *dax_dev, bool wc); | 92 | void dax_write_cache(struct dax_device *dax_dev, bool wc); |
96 | bool dax_write_cache_enabled(struct dax_device *dax_dev); | 93 | bool dax_write_cache_enabled(struct dax_device *dax_dev); |
97 | 94 | ||
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 4f2b3b2076c4..a5538433c927 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h | |||
@@ -134,8 +134,6 @@ typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff, | |||
134 | long nr_pages, void **kaddr, pfn_t *pfn); | 134 | long nr_pages, void **kaddr, pfn_t *pfn); |
135 | typedef size_t (*dm_dax_copy_from_iter_fn)(struct dm_target *ti, pgoff_t pgoff, | 135 | typedef size_t (*dm_dax_copy_from_iter_fn)(struct dm_target *ti, pgoff_t pgoff, |
136 | void *addr, size_t bytes, struct iov_iter *i); | 136 | void *addr, size_t bytes, struct iov_iter *i); |
137 | typedef void (*dm_dax_flush_fn)(struct dm_target *ti, pgoff_t pgoff, void *addr, | ||
138 | size_t size); | ||
139 | #define PAGE_SECTORS (PAGE_SIZE / 512) | 137 | #define PAGE_SECTORS (PAGE_SIZE / 512) |
140 | 138 | ||
141 | void dm_error(const char *message); | 139 | void dm_error(const char *message); |
@@ -186,7 +184,6 @@ struct target_type { | |||
186 | dm_io_hints_fn io_hints; | 184 | dm_io_hints_fn io_hints; |
187 | dm_dax_direct_access_fn direct_access; | 185 | dm_dax_direct_access_fn direct_access; |
188 | dm_dax_copy_from_iter_fn dax_copy_from_iter; | 186 | dm_dax_copy_from_iter_fn dax_copy_from_iter; |
189 | dm_dax_flush_fn dax_flush; | ||
190 | 187 | ||
191 | /* For internal device-mapper use. */ | 188 | /* For internal device-mapper use. */ |
192 | struct list_head list; | 189 | struct list_head list; |
@@ -387,7 +384,7 @@ struct dm_arg { | |||
387 | * Validate the next argument, either returning it as *value or, if invalid, | 384 | * Validate the next argument, either returning it as *value or, if invalid, |
388 | * returning -EINVAL and setting *error. | 385 | * returning -EINVAL and setting *error. |
389 | */ | 386 | */ |
390 | int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set, | 387 | int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set, |
391 | unsigned *value, char **error); | 388 | unsigned *value, char **error); |
392 | 389 | ||
393 | /* | 390 | /* |
@@ -395,7 +392,7 @@ int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set, | |||
395 | * arg->min and arg->max further arguments. Either return the size as | 392 | * arg->min and arg->max further arguments. Either return the size as |
396 | * *num_args or, if invalid, return -EINVAL and set *error. | 393 | * *num_args or, if invalid, return -EINVAL and set *error. |
397 | */ | 394 | */ |
398 | int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set, | 395 | int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set, |
399 | unsigned *num_args, char **error); | 396 | unsigned *num_args, char **error); |
400 | 397 | ||
401 | /* | 398 | /* |