aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/nvdimm/btt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvdimm/btt.c')
-rw-r--r--drivers/nvdimm/btt.c117
1 files changed, 106 insertions, 11 deletions
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 9c96530ea6d5..dabb84f7ab8a 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -31,6 +31,11 @@ enum log_ent_request {
31 LOG_OLD_ENT 31 LOG_OLD_ENT
32}; 32};
33 33
34static u64 adjust_initial_offset(struct nd_btt *nd_btt, u64 offset)
35{
36 return offset + nd_btt->initial_offset;
37}
38
34static int arena_read_bytes(struct arena_info *arena, resource_size_t offset, 39static int arena_read_bytes(struct arena_info *arena, resource_size_t offset,
35 void *buf, size_t n, unsigned long flags) 40 void *buf, size_t n, unsigned long flags)
36{ 41{
@@ -38,7 +43,7 @@ static int arena_read_bytes(struct arena_info *arena, resource_size_t offset,
38 struct nd_namespace_common *ndns = nd_btt->ndns; 43 struct nd_namespace_common *ndns = nd_btt->ndns;
39 44
40 /* arena offsets may be shifted from the base of the device */ 45 /* arena offsets may be shifted from the base of the device */
41 offset += arena->nd_btt->initial_offset; 46 offset = adjust_initial_offset(nd_btt, offset);
42 return nvdimm_read_bytes(ndns, offset, buf, n, flags); 47 return nvdimm_read_bytes(ndns, offset, buf, n, flags);
43} 48}
44 49
@@ -49,7 +54,7 @@ static int arena_write_bytes(struct arena_info *arena, resource_size_t offset,
49 struct nd_namespace_common *ndns = nd_btt->ndns; 54 struct nd_namespace_common *ndns = nd_btt->ndns;
50 55
51 /* arena offsets may be shifted from the base of the device */ 56 /* arena offsets may be shifted from the base of the device */
52 offset += arena->nd_btt->initial_offset; 57 offset = adjust_initial_offset(nd_btt, offset);
53 return nvdimm_write_bytes(ndns, offset, buf, n, flags); 58 return nvdimm_write_bytes(ndns, offset, buf, n, flags);
54} 59}
55 60
@@ -381,7 +386,9 @@ static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub,
381 arena->freelist[lane].sub = 1 - arena->freelist[lane].sub; 386 arena->freelist[lane].sub = 1 - arena->freelist[lane].sub;
382 if (++(arena->freelist[lane].seq) == 4) 387 if (++(arena->freelist[lane].seq) == 4)
383 arena->freelist[lane].seq = 1; 388 arena->freelist[lane].seq = 1;
384 arena->freelist[lane].block = le32_to_cpu(ent->old_map); 389 if (ent_e_flag(ent->old_map))
390 arena->freelist[lane].has_err = 1;
391 arena->freelist[lane].block = le32_to_cpu(ent_lba(ent->old_map));
385 392
386 return ret; 393 return ret;
387} 394}
@@ -480,6 +487,40 @@ static int btt_log_init(struct arena_info *arena)
480 return ret; 487 return ret;
481} 488}
482 489
490static u64 to_namespace_offset(struct arena_info *arena, u64 lba)
491{
492 return arena->dataoff + ((u64)lba * arena->internal_lbasize);
493}
494
495static int arena_clear_freelist_error(struct arena_info *arena, u32 lane)
496{
497 int ret = 0;
498
499 if (arena->freelist[lane].has_err) {
500 void *zero_page = page_address(ZERO_PAGE(0));
501 u32 lba = arena->freelist[lane].block;
502 u64 nsoff = to_namespace_offset(arena, lba);
503 unsigned long len = arena->sector_size;
504
505 mutex_lock(&arena->err_lock);
506
507 while (len) {
508 unsigned long chunk = min(len, PAGE_SIZE);
509
510 ret = arena_write_bytes(arena, nsoff, zero_page,
511 chunk, 0);
512 if (ret)
513 break;
514 len -= chunk;
515 nsoff += chunk;
516 if (len == 0)
517 arena->freelist[lane].has_err = 0;
518 }
519 mutex_unlock(&arena->err_lock);
520 }
521 return ret;
522}
523
483static int btt_freelist_init(struct arena_info *arena) 524static int btt_freelist_init(struct arena_info *arena)
484{ 525{
485 int old, new, ret; 526 int old, new, ret;
@@ -505,6 +546,16 @@ static int btt_freelist_init(struct arena_info *arena)
505 arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq)); 546 arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq));
506 arena->freelist[i].block = le32_to_cpu(log_new.old_map); 547 arena->freelist[i].block = le32_to_cpu(log_new.old_map);
507 548
549 /*
550 * FIXME: if error clearing fails during init, we want to make
551 * the BTT read-only
552 */
553 if (ent_e_flag(log_new.old_map)) {
554 ret = arena_clear_freelist_error(arena, i);
555 if (ret)
556 WARN_ONCE(1, "Unable to clear known errors\n");
557 }
558
508 /* This implies a newly created or untouched flog entry */ 559 /* This implies a newly created or untouched flog entry */
509 if (log_new.old_map == log_new.new_map) 560 if (log_new.old_map == log_new.new_map)
510 continue; 561 continue;
@@ -525,7 +576,6 @@ static int btt_freelist_init(struct arena_info *arena)
525 if (ret) 576 if (ret)
526 return ret; 577 return ret;
527 } 578 }
528
529 } 579 }
530 580
531 return 0; 581 return 0;
@@ -695,6 +745,7 @@ static int discover_arenas(struct btt *btt)
695 arena->external_lba_start = cur_nlba; 745 arena->external_lba_start = cur_nlba;
696 parse_arena_meta(arena, super, cur_off); 746 parse_arena_meta(arena, super, cur_off);
697 747
748 mutex_init(&arena->err_lock);
698 ret = btt_freelist_init(arena); 749 ret = btt_freelist_init(arena);
699 if (ret) 750 if (ret)
700 goto out; 751 goto out;
@@ -905,11 +956,6 @@ static void unlock_map(struct arena_info *arena, u32 premap)
905 spin_unlock(&arena->map_locks[idx].lock); 956 spin_unlock(&arena->map_locks[idx].lock);
906} 957}
907 958
908static u64 to_namespace_offset(struct arena_info *arena, u64 lba)
909{
910 return arena->dataoff + ((u64)lba * arena->internal_lbasize);
911}
912
913static int btt_data_read(struct arena_info *arena, struct page *page, 959static int btt_data_read(struct arena_info *arena, struct page *page,
914 unsigned int off, u32 lba, u32 len) 960 unsigned int off, u32 lba, u32 len)
915{ 961{
@@ -1067,8 +1113,14 @@ static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
1067 } 1113 }
1068 1114
1069 ret = btt_data_read(arena, page, off, postmap, cur_len); 1115 ret = btt_data_read(arena, page, off, postmap, cur_len);
1070 if (ret) 1116 if (ret) {
1117 int rc;
1118
1119 /* Media error - set the e_flag */
1120 rc = btt_map_write(arena, premap, postmap, 0, 1,
1121 NVDIMM_IO_ATOMIC);
1071 goto out_rtt; 1122 goto out_rtt;
1123 }
1072 1124
1073 if (bip) { 1125 if (bip) {
1074 ret = btt_rw_integrity(btt, bip, arena, postmap, READ); 1126 ret = btt_rw_integrity(btt, bip, arena, postmap, READ);
@@ -1093,6 +1145,21 @@ static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
1093 return ret; 1145 return ret;
1094} 1146}
1095 1147
1148/*
1149 * Normally, arena_{read,write}_bytes will take care of the initial offset
1150 * adjustment, but in the case of btt_is_badblock, where we query is_bad_pmem,
1151 * we need the final, raw namespace offset here
1152 */
1153static bool btt_is_badblock(struct btt *btt, struct arena_info *arena,
1154 u32 postmap)
1155{
1156 u64 nsoff = adjust_initial_offset(arena->nd_btt,
1157 to_namespace_offset(arena, postmap));
1158 sector_t phys_sector = nsoff >> 9;
1159
1160 return is_bad_pmem(btt->phys_bb, phys_sector, arena->internal_lbasize);
1161}
1162
1096static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip, 1163static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
1097 sector_t sector, struct page *page, unsigned int off, 1164 sector_t sector, struct page *page, unsigned int off,
1098 unsigned int len) 1165 unsigned int len)
@@ -1105,7 +1172,9 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
1105 1172
1106 while (len) { 1173 while (len) {
1107 u32 cur_len; 1174 u32 cur_len;
1175 int e_flag;
1108 1176
1177 retry:
1109 lane = nd_region_acquire_lane(btt->nd_region); 1178 lane = nd_region_acquire_lane(btt->nd_region);
1110 1179
1111 ret = lba_to_arena(btt, sector, &premap, &arena); 1180 ret = lba_to_arena(btt, sector, &premap, &arena);
@@ -1118,6 +1187,21 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
1118 goto out_lane; 1187 goto out_lane;
1119 } 1188 }
1120 1189
1190 if (btt_is_badblock(btt, arena, arena->freelist[lane].block))
1191 arena->freelist[lane].has_err = 1;
1192
1193 if (mutex_is_locked(&arena->err_lock)
1194 || arena->freelist[lane].has_err) {
1195 nd_region_release_lane(btt->nd_region, lane);
1196
1197 ret = arena_clear_freelist_error(arena, lane);
1198 if (ret)
1199 return ret;
1200
1201 /* OK to acquire a different lane/free block */
1202 goto retry;
1203 }
1204
1121 new_postmap = arena->freelist[lane].block; 1205 new_postmap = arena->freelist[lane].block;
1122 1206
1123 /* Wait if the new block is being read from */ 1207 /* Wait if the new block is being read from */
@@ -1143,7 +1227,7 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
1143 } 1227 }
1144 1228
1145 lock_map(arena, premap); 1229 lock_map(arena, premap);
1146 ret = btt_map_read(arena, premap, &old_postmap, NULL, NULL, 1230 ret = btt_map_read(arena, premap, &old_postmap, NULL, &e_flag,
1147 NVDIMM_IO_ATOMIC); 1231 NVDIMM_IO_ATOMIC);
1148 if (ret) 1232 if (ret)
1149 goto out_map; 1233 goto out_map;
@@ -1151,6 +1235,8 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
1151 ret = -EIO; 1235 ret = -EIO;
1152 goto out_map; 1236 goto out_map;
1153 } 1237 }
1238 if (e_flag)
1239 set_e_flag(old_postmap);
1154 1240
1155 log.lba = cpu_to_le32(premap); 1241 log.lba = cpu_to_le32(premap);
1156 log.old_map = cpu_to_le32(old_postmap); 1242 log.old_map = cpu_to_le32(old_postmap);
@@ -1169,6 +1255,12 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
1169 unlock_map(arena, premap); 1255 unlock_map(arena, premap);
1170 nd_region_release_lane(btt->nd_region, lane); 1256 nd_region_release_lane(btt->nd_region, lane);
1171 1257
1258 if (e_flag) {
1259 ret = arena_clear_freelist_error(arena, lane);
1260 if (ret)
1261 return ret;
1262 }
1263
1172 len -= cur_len; 1264 len -= cur_len;
1173 off += cur_len; 1265 off += cur_len;
1174 sector += btt->sector_size >> SECTOR_SHIFT; 1266 sector += btt->sector_size >> SECTOR_SHIFT;
@@ -1349,6 +1441,7 @@ static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
1349{ 1441{
1350 int ret; 1442 int ret;
1351 struct btt *btt; 1443 struct btt *btt;
1444 struct nd_namespace_io *nsio;
1352 struct device *dev = &nd_btt->dev; 1445 struct device *dev = &nd_btt->dev;
1353 1446
1354 btt = devm_kzalloc(dev, sizeof(struct btt), GFP_KERNEL); 1447 btt = devm_kzalloc(dev, sizeof(struct btt), GFP_KERNEL);
@@ -1362,6 +1455,8 @@ static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
1362 INIT_LIST_HEAD(&btt->arena_list); 1455 INIT_LIST_HEAD(&btt->arena_list);
1363 mutex_init(&btt->init_lock); 1456 mutex_init(&btt->init_lock);
1364 btt->nd_region = nd_region; 1457 btt->nd_region = nd_region;
1458 nsio = to_nd_namespace_io(&nd_btt->ndns->dev);
1459 btt->phys_bb = &nsio->bb;
1365 1460
1366 ret = discover_arenas(btt); 1461 ret = discover_arenas(btt);
1367 if (ret) { 1462 if (ret) {