aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/nvdimm/btt.c129
-rw-r--r--drivers/nvdimm/btt.h2
-rw-r--r--drivers/nvdimm/btt_devs.c3
-rw-r--r--drivers/nvdimm/core.c37
-rw-r--r--drivers/nvdimm/nd.h1
5 files changed, 154 insertions, 18 deletions
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 7ae38aac2c25..18a2463c2300 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -837,6 +837,11 @@ static int btt_meta_init(struct btt *btt)
837 return ret; 837 return ret;
838} 838}
839 839
840static u32 btt_meta_size(struct btt *btt)
841{
842 return btt->lbasize - btt->sector_size;
843}
844
840/* 845/*
841 * This function calculates the arena in which the given LBA lies 846 * This function calculates the arena in which the given LBA lies
842 * by doing a linear walk. This is acceptable since we expect only 847 * by doing a linear walk. This is acceptable since we expect only
@@ -921,8 +926,63 @@ static void zero_fill_data(struct page *page, unsigned int off, u32 len)
921 kunmap_atomic(mem); 926 kunmap_atomic(mem);
922} 927}
923 928
924static int btt_read_pg(struct btt *btt, struct page *page, unsigned int off, 929#ifdef CONFIG_BLK_DEV_INTEGRITY
925 sector_t sector, unsigned int len) 930static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
931 struct arena_info *arena, u32 postmap, int rw)
932{
933 unsigned int len = btt_meta_size(btt);
934 u64 meta_nsoff;
935 int ret = 0;
936
937 if (bip == NULL)
938 return 0;
939
940 meta_nsoff = to_namespace_offset(arena, postmap) + btt->sector_size;
941
942 while (len) {
943 unsigned int cur_len;
944 struct bio_vec bv;
945 void *mem;
946
947 bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
948 /*
949 * The 'bv' obtained from bvec_iter_bvec has its .bv_len and
950 * .bv_offset already adjusted for iter->bi_bvec_done, and we
951 * can use those directly
952 */
953
954 cur_len = min(len, bv.bv_len);
955 mem = kmap_atomic(bv.bv_page);
956 if (rw)
957 ret = arena_write_bytes(arena, meta_nsoff,
958 mem + bv.bv_offset, cur_len);
959 else
960 ret = arena_read_bytes(arena, meta_nsoff,
961 mem + bv.bv_offset, cur_len);
962
963 kunmap_atomic(mem);
964 if (ret)
965 return ret;
966
967 len -= cur_len;
968 meta_nsoff += cur_len;
969 bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len);
970 }
971
972 return ret;
973}
974
975#else /* CONFIG_BLK_DEV_INTEGRITY */
976static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
977 struct arena_info *arena, u32 postmap, int rw)
978{
979 return 0;
980}
981#endif
982
983static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
984 struct page *page, unsigned int off, sector_t sector,
985 unsigned int len)
926{ 986{
927 int ret = 0; 987 int ret = 0;
928 int t_flag, e_flag; 988 int t_flag, e_flag;
@@ -984,6 +1044,12 @@ static int btt_read_pg(struct btt *btt, struct page *page, unsigned int off,
984 if (ret) 1044 if (ret)
985 goto out_rtt; 1045 goto out_rtt;
986 1046
1047 if (bip) {
1048 ret = btt_rw_integrity(btt, bip, arena, postmap, READ);
1049 if (ret)
1050 goto out_rtt;
1051 }
1052
987 arena->rtt[lane] = RTT_INVALID; 1053 arena->rtt[lane] = RTT_INVALID;
988 nd_region_release_lane(btt->nd_region, lane); 1054 nd_region_release_lane(btt->nd_region, lane);
989 1055
@@ -1001,8 +1067,9 @@ static int btt_read_pg(struct btt *btt, struct page *page, unsigned int off,
1001 return ret; 1067 return ret;
1002} 1068}
1003 1069
1004static int btt_write_pg(struct btt *btt, sector_t sector, struct page *page, 1070static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
1005 unsigned int off, unsigned int len) 1071 sector_t sector, struct page *page, unsigned int off,
1072 unsigned int len)
1006{ 1073{
1007 int ret = 0; 1074 int ret = 0;
1008 struct arena_info *arena = NULL; 1075 struct arena_info *arena = NULL;
@@ -1036,12 +1103,19 @@ static int btt_write_pg(struct btt *btt, sector_t sector, struct page *page,
1036 if (new_postmap >= arena->internal_nlba) { 1103 if (new_postmap >= arena->internal_nlba) {
1037 ret = -EIO; 1104 ret = -EIO;
1038 goto out_lane; 1105 goto out_lane;
1039 } else 1106 }
1040 ret = btt_data_write(arena, new_postmap, page, 1107
1041 off, cur_len); 1108 ret = btt_data_write(arena, new_postmap, page, off, cur_len);
1042 if (ret) 1109 if (ret)
1043 goto out_lane; 1110 goto out_lane;
1044 1111
1112 if (bip) {
1113 ret = btt_rw_integrity(btt, bip, arena, new_postmap,
1114 WRITE);
1115 if (ret)
1116 goto out_lane;
1117 }
1118
1045 lock_map(arena, premap); 1119 lock_map(arena, premap);
1046 ret = btt_map_read(arena, premap, &old_postmap, NULL, NULL); 1120 ret = btt_map_read(arena, premap, &old_postmap, NULL, NULL);
1047 if (ret) 1121 if (ret)
@@ -1081,18 +1155,18 @@ static int btt_write_pg(struct btt *btt, sector_t sector, struct page *page,
1081 return ret; 1155 return ret;
1082} 1156}
1083 1157
1084static int btt_do_bvec(struct btt *btt, struct page *page, 1158static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
1085 unsigned int len, unsigned int off, int rw, 1159 struct page *page, unsigned int len, unsigned int off,
1086 sector_t sector) 1160 int rw, sector_t sector)
1087{ 1161{
1088 int ret; 1162 int ret;
1089 1163
1090 if (rw == READ) { 1164 if (rw == READ) {
1091 ret = btt_read_pg(btt, page, off, sector, len); 1165 ret = btt_read_pg(btt, bip, page, off, sector, len);
1092 flush_dcache_page(page); 1166 flush_dcache_page(page);
1093 } else { 1167 } else {
1094 flush_dcache_page(page); 1168 flush_dcache_page(page);
1095 ret = btt_write_pg(btt, sector, page, off, len); 1169 ret = btt_write_pg(btt, bip, sector, page, off, len);
1096 } 1170 }
1097 1171
1098 return ret; 1172 return ret;
@@ -1100,11 +1174,23 @@ static int btt_do_bvec(struct btt *btt, struct page *page,
1100 1174
1101static void btt_make_request(struct request_queue *q, struct bio *bio) 1175static void btt_make_request(struct request_queue *q, struct bio *bio)
1102{ 1176{
1177 struct bio_integrity_payload *bip = bio_integrity(bio);
1103 struct btt *btt = q->queuedata; 1178 struct btt *btt = q->queuedata;
1104 struct bvec_iter iter; 1179 struct bvec_iter iter;
1105 struct bio_vec bvec; 1180 struct bio_vec bvec;
1106 int err = 0, rw; 1181 int err = 0, rw;
1107 1182
1183 /*
1184 * bio_integrity_enabled also checks if the bio already has an
1185 * integrity payload attached. If it does, we *don't* do a
1186 * bio_integrity_prep here - the payload has been generated by
1187 * another kernel subsystem, and we just pass it through.
1188 */
1189 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1190 err = -EIO;
1191 goto out;
1192 }
1193
1108 rw = bio_data_dir(bio); 1194 rw = bio_data_dir(bio);
1109 bio_for_each_segment(bvec, bio, iter) { 1195 bio_for_each_segment(bvec, bio, iter) {
1110 unsigned int len = bvec.bv_len; 1196 unsigned int len = bvec.bv_len;
@@ -1115,7 +1201,7 @@ static void btt_make_request(struct request_queue *q, struct bio *bio)
1115 BUG_ON(len < btt->sector_size); 1201 BUG_ON(len < btt->sector_size);
1116 BUG_ON(len % btt->sector_size); 1202 BUG_ON(len % btt->sector_size);
1117 1203
1118 err = btt_do_bvec(btt, bvec.bv_page, len, bvec.bv_offset, 1204 err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
1119 rw, iter.bi_sector); 1205 rw, iter.bi_sector);
1120 if (err) { 1206 if (err) {
1121 dev_info(&btt->nd_btt->dev, 1207 dev_info(&btt->nd_btt->dev,
@@ -1135,7 +1221,7 @@ static int btt_rw_page(struct block_device *bdev, sector_t sector,
1135{ 1221{
1136 struct btt *btt = bdev->bd_disk->private_data; 1222 struct btt *btt = bdev->bd_disk->private_data;
1137 1223
1138 btt_do_bvec(btt, page, PAGE_CACHE_SIZE, 0, rw, sector); 1224 btt_do_bvec(btt, NULL, page, PAGE_CACHE_SIZE, 0, rw, sector);
1139 page_endio(page, rw & WRITE, 0); 1225 page_endio(page, rw & WRITE, 0);
1140 return 0; 1226 return 0;
1141} 1227}
@@ -1188,15 +1274,26 @@ static int btt_blk_init(struct btt *btt)
1188 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue); 1274 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue);
1189 btt->btt_queue->queuedata = btt; 1275 btt->btt_queue->queuedata = btt;
1190 1276
1191 set_capacity(btt->btt_disk, 1277 set_capacity(btt->btt_disk, 0);
1192 btt->nlba * btt->sector_size >> SECTOR_SHIFT);
1193 add_disk(btt->btt_disk); 1278 add_disk(btt->btt_disk);
1279 if (btt_meta_size(btt)) {
1280 int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt));
1281
1282 if (rc) {
1283 del_gendisk(btt->btt_disk);
1284 put_disk(btt->btt_disk);
1285 blk_cleanup_queue(btt->btt_queue);
1286 return rc;
1287 }
1288 }
1289 set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
1194 1290
1195 return 0; 1291 return 0;
1196} 1292}
1197 1293
1198static void btt_blk_cleanup(struct btt *btt) 1294static void btt_blk_cleanup(struct btt *btt)
1199{ 1295{
1296 blk_integrity_unregister(btt->btt_disk);
1200 del_gendisk(btt->btt_disk); 1297 del_gendisk(btt->btt_disk);
1201 put_disk(btt->btt_disk); 1298 put_disk(btt->btt_disk);
1202 blk_cleanup_queue(btt->btt_queue); 1299 blk_cleanup_queue(btt->btt_queue);
diff --git a/drivers/nvdimm/btt.h b/drivers/nvdimm/btt.h
index 8c95a7792c3e..2caa0ef7e67a 100644
--- a/drivers/nvdimm/btt.h
+++ b/drivers/nvdimm/btt.h
@@ -31,7 +31,7 @@
31#define ARENA_MAX_SIZE (1ULL << 39) /* 512 GB */ 31#define ARENA_MAX_SIZE (1ULL << 39) /* 512 GB */
32#define RTT_VALID (1UL << 31) 32#define RTT_VALID (1UL << 31)
33#define RTT_INVALID 0 33#define RTT_INVALID 0
34#define INT_LBASIZE_ALIGNMENT 256 34#define INT_LBASIZE_ALIGNMENT 64
35#define BTT_PG_SIZE 4096 35#define BTT_PG_SIZE 4096
36#define BTT_DEFAULT_NFREE ND_MAX_LANES 36#define BTT_DEFAULT_NFREE ND_MAX_LANES
37#define LOG_SEQ_INIT 1 37#define LOG_SEQ_INIT 1
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index 470fbdccd0ac..661aacedc140 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -103,7 +103,8 @@ struct nd_btt *to_nd_btt(struct device *dev)
103} 103}
104EXPORT_SYMBOL(to_nd_btt); 104EXPORT_SYMBOL(to_nd_btt);
105 105
106static const unsigned long btt_lbasize_supported[] = { 512, 4096, 0 }; 106static const unsigned long btt_lbasize_supported[] = { 512, 520, 528,
107 4096, 4104, 4160, 4224, 0 };
107 108
108static ssize_t sector_size_show(struct device *dev, 109static ssize_t sector_size_show(struct device *dev,
109 struct device_attribute *attr, char *buf) 110 struct device_attribute *attr, char *buf)
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index dd824d7c2669..1d96b9a6e4cc 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -13,6 +13,7 @@
13#include <linux/libnvdimm.h> 13#include <linux/libnvdimm.h>
14#include <linux/export.h> 14#include <linux/export.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/blkdev.h>
16#include <linux/device.h> 17#include <linux/device.h>
17#include <linux/ctype.h> 18#include <linux/ctype.h>
18#include <linux/ndctl.h> 19#include <linux/ndctl.h>
@@ -361,6 +362,42 @@ void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus)
361} 362}
362EXPORT_SYMBOL_GPL(nvdimm_bus_unregister); 363EXPORT_SYMBOL_GPL(nvdimm_bus_unregister);
363 364
365#ifdef CONFIG_BLK_DEV_INTEGRITY
366static int nd_pi_nop_generate_verify(struct blk_integrity_iter *iter)
367{
368 return 0;
369}
370
371int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
372{
373 struct blk_integrity integrity = {
374 .name = "ND-PI-NOP",
375 .generate_fn = nd_pi_nop_generate_verify,
376 .verify_fn = nd_pi_nop_generate_verify,
377 .tuple_size = meta_size,
378 .tag_size = meta_size,
379 };
380 int ret;
381
382 ret = blk_integrity_register(disk, &integrity);
383 if (ret)
384 return ret;
385
386 blk_queue_max_integrity_segments(disk->queue, 1);
387
388 return 0;
389}
390EXPORT_SYMBOL(nd_integrity_init);
391
392#else /* CONFIG_BLK_DEV_INTEGRITY */
393int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
394{
395 return 0;
396}
397EXPORT_SYMBOL(nd_integrity_init);
398
399#endif
400
364static __init int libnvdimm_init(void) 401static __init int libnvdimm_init(void)
365{ 402{
366 int rc; 403 int rc;
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index f153f43ca3d6..f4459faa456c 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -136,6 +136,7 @@ enum nd_async_mode {
136 ND_ASYNC, 136 ND_ASYNC,
137}; 137};
138 138
139int nd_integrity_init(struct gendisk *disk, unsigned long meta_size);
139void wait_nvdimm_bus_probe_idle(struct device *dev); 140void wait_nvdimm_bus_probe_idle(struct device *dev);
140void nd_device_register(struct device *dev); 141void nd_device_register(struct device *dev);
141void nd_device_unregister(struct device *dev, enum nd_async_mode mode); 142void nd_device_unregister(struct device *dev, enum nd_async_mode mode);