aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVishal Verma <vishal.l.verma@intel.com>2017-05-10 17:01:30 -0400
committerDan Williams <dan.j.williams@intel.com>2017-05-11 00:46:22 -0400
commit3ae3d67ba705c754a3c91ac009f9ce73a0e7286a (patch)
tree0163a4ca204ab5f9601f12ee3c6fcfdb45a16459
parent8376efd31d3d7c44bd05be337adde023cc531fa1 (diff)
libnvdimm: add an atomic vs process context flag to rw_bytes
nsio_rw_bytes can clear media errors, but this cannot be done while we are in an atomic context due to locking within ACPI. From the BTT, ->rw_bytes may be called either from atomic or process context depending on whether the calls happen during initialization or during IO. During init, we want to ensure error clearing happens, and the flag marking process context allows nsio_rw_bytes to do that. When called during IO, we're in atomic context, and error clearing can be skipped. Cc: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Vishal Verma <vishal.l.verma@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r--drivers/nvdimm/blk.c3
-rw-r--r--drivers/nvdimm/btt.c67
-rw-r--r--drivers/nvdimm/btt_devs.c2
-rw-r--r--drivers/nvdimm/claim.c6
-rw-r--r--drivers/nvdimm/nd.h1
-rw-r--r--drivers/nvdimm/pfn_devs.c4
-rw-r--r--include/linux/nd.h12
7 files changed, 53 insertions, 42 deletions
diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c
index 9faaa9694d87..822198a75e96 100644
--- a/drivers/nvdimm/blk.c
+++ b/drivers/nvdimm/blk.c
@@ -218,7 +218,8 @@ static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio)
218} 218}
219 219
220static int nsblk_rw_bytes(struct nd_namespace_common *ndns, 220static int nsblk_rw_bytes(struct nd_namespace_common *ndns,
221 resource_size_t offset, void *iobuf, size_t n, int rw) 221 resource_size_t offset, void *iobuf, size_t n, int rw,
222 unsigned long flags)
222{ 223{
223 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(&ndns->dev); 224 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(&ndns->dev);
224 struct nd_blk_region *ndbr = to_ndbr(nsblk); 225 struct nd_blk_region *ndbr = to_ndbr(nsblk);
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 368795aad5c9..aa977cd4869d 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -32,25 +32,25 @@ enum log_ent_request {
32}; 32};
33 33
34static int arena_read_bytes(struct arena_info *arena, resource_size_t offset, 34static int arena_read_bytes(struct arena_info *arena, resource_size_t offset,
35 void *buf, size_t n) 35 void *buf, size_t n, unsigned long flags)
36{ 36{
37 struct nd_btt *nd_btt = arena->nd_btt; 37 struct nd_btt *nd_btt = arena->nd_btt;
38 struct nd_namespace_common *ndns = nd_btt->ndns; 38 struct nd_namespace_common *ndns = nd_btt->ndns;
39 39
40 /* arena offsets are 4K from the base of the device */ 40 /* arena offsets are 4K from the base of the device */
41 offset += SZ_4K; 41 offset += SZ_4K;
42 return nvdimm_read_bytes(ndns, offset, buf, n); 42 return nvdimm_read_bytes(ndns, offset, buf, n, flags);
43} 43}
44 44
45static int arena_write_bytes(struct arena_info *arena, resource_size_t offset, 45static int arena_write_bytes(struct arena_info *arena, resource_size_t offset,
46 void *buf, size_t n) 46 void *buf, size_t n, unsigned long flags)
47{ 47{
48 struct nd_btt *nd_btt = arena->nd_btt; 48 struct nd_btt *nd_btt = arena->nd_btt;
49 struct nd_namespace_common *ndns = nd_btt->ndns; 49 struct nd_namespace_common *ndns = nd_btt->ndns;
50 50
51 /* arena offsets are 4K from the base of the device */ 51 /* arena offsets are 4K from the base of the device */
52 offset += SZ_4K; 52 offset += SZ_4K;
53 return nvdimm_write_bytes(ndns, offset, buf, n); 53 return nvdimm_write_bytes(ndns, offset, buf, n, flags);
54} 54}
55 55
56static int btt_info_write(struct arena_info *arena, struct btt_sb *super) 56static int btt_info_write(struct arena_info *arena, struct btt_sb *super)
@@ -58,19 +58,19 @@ static int btt_info_write(struct arena_info *arena, struct btt_sb *super)
58 int ret; 58 int ret;
59 59
60 ret = arena_write_bytes(arena, arena->info2off, super, 60 ret = arena_write_bytes(arena, arena->info2off, super,
61 sizeof(struct btt_sb)); 61 sizeof(struct btt_sb), 0);
62 if (ret) 62 if (ret)
63 return ret; 63 return ret;
64 64
65 return arena_write_bytes(arena, arena->infooff, super, 65 return arena_write_bytes(arena, arena->infooff, super,
66 sizeof(struct btt_sb)); 66 sizeof(struct btt_sb), 0);
67} 67}
68 68
69static int btt_info_read(struct arena_info *arena, struct btt_sb *super) 69static int btt_info_read(struct arena_info *arena, struct btt_sb *super)
70{ 70{
71 WARN_ON(!super); 71 WARN_ON(!super);
72 return arena_read_bytes(arena, arena->infooff, super, 72 return arena_read_bytes(arena, arena->infooff, super,
73 sizeof(struct btt_sb)); 73 sizeof(struct btt_sb), 0);
74} 74}
75 75
76/* 76/*
@@ -79,16 +79,17 @@ static int btt_info_read(struct arena_info *arena, struct btt_sb *super)
79 * mapping is in little-endian 79 * mapping is in little-endian
80 * mapping contains 'E' and 'Z' flags as desired 80 * mapping contains 'E' and 'Z' flags as desired
81 */ 81 */
82static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping) 82static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping,
83 unsigned long flags)
83{ 84{
84 u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE); 85 u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
85 86
86 WARN_ON(lba >= arena->external_nlba); 87 WARN_ON(lba >= arena->external_nlba);
87 return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE); 88 return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE, flags);
88} 89}
89 90
90static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping, 91static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping,
91 u32 z_flag, u32 e_flag) 92 u32 z_flag, u32 e_flag, unsigned long rwb_flags)
92{ 93{
93 u32 ze; 94 u32 ze;
94 __le32 mapping_le; 95 __le32 mapping_le;
@@ -127,11 +128,11 @@ static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping,
127 } 128 }
128 129
129 mapping_le = cpu_to_le32(mapping); 130 mapping_le = cpu_to_le32(mapping);
130 return __btt_map_write(arena, lba, mapping_le); 131 return __btt_map_write(arena, lba, mapping_le, rwb_flags);
131} 132}
132 133
133static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping, 134static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
134 int *trim, int *error) 135 int *trim, int *error, unsigned long rwb_flags)
135{ 136{
136 int ret; 137 int ret;
137 __le32 in; 138 __le32 in;
@@ -140,7 +141,7 @@ static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
140 141
141 WARN_ON(lba >= arena->external_nlba); 142 WARN_ON(lba >= arena->external_nlba);
142 143
143 ret = arena_read_bytes(arena, ns_off, &in, MAP_ENT_SIZE); 144 ret = arena_read_bytes(arena, ns_off, &in, MAP_ENT_SIZE, rwb_flags);
144 if (ret) 145 if (ret)
145 return ret; 146 return ret;
146 147
@@ -189,7 +190,7 @@ static int btt_log_read_pair(struct arena_info *arena, u32 lane,
189 WARN_ON(!ent); 190 WARN_ON(!ent);
190 return arena_read_bytes(arena, 191 return arena_read_bytes(arena,
191 arena->logoff + (2 * lane * LOG_ENT_SIZE), ent, 192 arena->logoff + (2 * lane * LOG_ENT_SIZE), ent,
192 2 * LOG_ENT_SIZE); 193 2 * LOG_ENT_SIZE, 0);
193} 194}
194 195
195static struct dentry *debugfs_root; 196static struct dentry *debugfs_root;
@@ -335,7 +336,7 @@ static int btt_log_read(struct arena_info *arena, u32 lane,
335 * btt_flog_write is the wrapper for updating the freelist elements 336 * btt_flog_write is the wrapper for updating the freelist elements
336 */ 337 */
337static int __btt_log_write(struct arena_info *arena, u32 lane, 338static int __btt_log_write(struct arena_info *arena, u32 lane,
338 u32 sub, struct log_entry *ent) 339 u32 sub, struct log_entry *ent, unsigned long flags)
339{ 340{
340 int ret; 341 int ret;
341 /* 342 /*
@@ -350,13 +351,13 @@ static int __btt_log_write(struct arena_info *arena, u32 lane,
350 void *src = ent; 351 void *src = ent;
351 352
352 /* split the 16B write into atomic, durable halves */ 353 /* split the 16B write into atomic, durable halves */
353 ret = arena_write_bytes(arena, ns_off, src, log_half); 354 ret = arena_write_bytes(arena, ns_off, src, log_half, flags);
354 if (ret) 355 if (ret)
355 return ret; 356 return ret;
356 357
357 ns_off += log_half; 358 ns_off += log_half;
358 src += log_half; 359 src += log_half;
359 return arena_write_bytes(arena, ns_off, src, log_half); 360 return arena_write_bytes(arena, ns_off, src, log_half, flags);
360} 361}
361 362
362static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub, 363static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub,
@@ -364,7 +365,7 @@ static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub,
364{ 365{
365 int ret; 366 int ret;
366 367
367 ret = __btt_log_write(arena, lane, sub, ent); 368 ret = __btt_log_write(arena, lane, sub, ent, NVDIMM_IO_ATOMIC);
368 if (ret) 369 if (ret)
369 return ret; 370 return ret;
370 371
@@ -397,7 +398,7 @@ static int btt_map_init(struct arena_info *arena)
397 size_t size = min(mapsize, chunk_size); 398 size_t size = min(mapsize, chunk_size);
398 399
399 ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf, 400 ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf,
400 size); 401 size, 0);
401 if (ret) 402 if (ret)
402 goto free; 403 goto free;
403 404
@@ -428,10 +429,10 @@ static int btt_log_init(struct arena_info *arena)
428 log.old_map = cpu_to_le32(arena->external_nlba + i); 429 log.old_map = cpu_to_le32(arena->external_nlba + i);
429 log.new_map = cpu_to_le32(arena->external_nlba + i); 430 log.new_map = cpu_to_le32(arena->external_nlba + i);
430 log.seq = cpu_to_le32(LOG_SEQ_INIT); 431 log.seq = cpu_to_le32(LOG_SEQ_INIT);
431 ret = __btt_log_write(arena, i, 0, &log); 432 ret = __btt_log_write(arena, i, 0, &log, 0);
432 if (ret) 433 if (ret)
433 return ret; 434 return ret;
434 ret = __btt_log_write(arena, i, 1, &zerolog); 435 ret = __btt_log_write(arena, i, 1, &zerolog, 0);
435 if (ret) 436 if (ret)
436 return ret; 437 return ret;
437 } 438 }
@@ -470,7 +471,7 @@ static int btt_freelist_init(struct arena_info *arena)
470 471
471 /* Check if map recovery is needed */ 472 /* Check if map recovery is needed */
472 ret = btt_map_read(arena, le32_to_cpu(log_new.lba), &map_entry, 473 ret = btt_map_read(arena, le32_to_cpu(log_new.lba), &map_entry,
473 NULL, NULL); 474 NULL, NULL, 0);
474 if (ret) 475 if (ret)
475 return ret; 476 return ret;
476 if ((le32_to_cpu(log_new.new_map) != map_entry) && 477 if ((le32_to_cpu(log_new.new_map) != map_entry) &&
@@ -480,7 +481,7 @@ static int btt_freelist_init(struct arena_info *arena)
480 * to complete the map write. So fix up the map. 481 * to complete the map write. So fix up the map.
481 */ 482 */
482 ret = btt_map_write(arena, le32_to_cpu(log_new.lba), 483 ret = btt_map_write(arena, le32_to_cpu(log_new.lba),
483 le32_to_cpu(log_new.new_map), 0, 0); 484 le32_to_cpu(log_new.new_map), 0, 0, 0);
484 if (ret) 485 if (ret)
485 return ret; 486 return ret;
486 } 487 }
@@ -875,7 +876,7 @@ static int btt_data_read(struct arena_info *arena, struct page *page,
875 u64 nsoff = to_namespace_offset(arena, lba); 876 u64 nsoff = to_namespace_offset(arena, lba);
876 void *mem = kmap_atomic(page); 877 void *mem = kmap_atomic(page);
877 878
878 ret = arena_read_bytes(arena, nsoff, mem + off, len); 879 ret = arena_read_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
879 kunmap_atomic(mem); 880 kunmap_atomic(mem);
880 881
881 return ret; 882 return ret;
@@ -888,7 +889,7 @@ static int btt_data_write(struct arena_info *arena, u32 lba,
888 u64 nsoff = to_namespace_offset(arena, lba); 889 u64 nsoff = to_namespace_offset(arena, lba);
889 void *mem = kmap_atomic(page); 890 void *mem = kmap_atomic(page);
890 891
891 ret = arena_write_bytes(arena, nsoff, mem + off, len); 892 ret = arena_write_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
892 kunmap_atomic(mem); 893 kunmap_atomic(mem);
893 894
894 return ret; 895 return ret;
@@ -931,10 +932,12 @@ static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
931 mem = kmap_atomic(bv.bv_page); 932 mem = kmap_atomic(bv.bv_page);
932 if (rw) 933 if (rw)
933 ret = arena_write_bytes(arena, meta_nsoff, 934 ret = arena_write_bytes(arena, meta_nsoff,
934 mem + bv.bv_offset, cur_len); 935 mem + bv.bv_offset, cur_len,
936 NVDIMM_IO_ATOMIC);
935 else 937 else
936 ret = arena_read_bytes(arena, meta_nsoff, 938 ret = arena_read_bytes(arena, meta_nsoff,
937 mem + bv.bv_offset, cur_len); 939 mem + bv.bv_offset, cur_len,
940 NVDIMM_IO_ATOMIC);
938 941
939 kunmap_atomic(mem); 942 kunmap_atomic(mem);
940 if (ret) 943 if (ret)
@@ -976,7 +979,8 @@ static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
976 979
977 cur_len = min(btt->sector_size, len); 980 cur_len = min(btt->sector_size, len);
978 981
979 ret = btt_map_read(arena, premap, &postmap, &t_flag, &e_flag); 982 ret = btt_map_read(arena, premap, &postmap, &t_flag, &e_flag,
983 NVDIMM_IO_ATOMIC);
980 if (ret) 984 if (ret)
981 goto out_lane; 985 goto out_lane;
982 986
@@ -1006,7 +1010,7 @@ static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
1006 barrier(); 1010 barrier();
1007 1011
1008 ret = btt_map_read(arena, premap, &new_map, &t_flag, 1012 ret = btt_map_read(arena, premap, &new_map, &t_flag,
1009 &e_flag); 1013 &e_flag, NVDIMM_IO_ATOMIC);
1010 if (ret) 1014 if (ret)
1011 goto out_rtt; 1015 goto out_rtt;
1012 1016
@@ -1093,7 +1097,8 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
1093 } 1097 }
1094 1098
1095 lock_map(arena, premap); 1099 lock_map(arena, premap);
1096 ret = btt_map_read(arena, premap, &old_postmap, NULL, NULL); 1100 ret = btt_map_read(arena, premap, &old_postmap, NULL, NULL,
1101 NVDIMM_IO_ATOMIC);
1097 if (ret) 1102 if (ret)
1098 goto out_map; 1103 goto out_map;
1099 if (old_postmap >= arena->internal_nlba) { 1104 if (old_postmap >= arena->internal_nlba) {
@@ -1110,7 +1115,7 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
1110 if (ret) 1115 if (ret)
1111 goto out_map; 1116 goto out_map;
1112 1117
1113 ret = btt_map_write(arena, premap, new_postmap, 0, 0); 1118 ret = btt_map_write(arena, premap, new_postmap, 0, 0, 0);
1114 if (ret) 1119 if (ret)
1115 goto out_map; 1120 goto out_map;
1116 1121
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index 4b76af2b8715..ae00dc0d9791 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -273,7 +273,7 @@ static int __nd_btt_probe(struct nd_btt *nd_btt,
273 if (!btt_sb || !ndns || !nd_btt) 273 if (!btt_sb || !ndns || !nd_btt)
274 return -ENODEV; 274 return -ENODEV;
275 275
276 if (nvdimm_read_bytes(ndns, SZ_4K, btt_sb, sizeof(*btt_sb))) 276 if (nvdimm_read_bytes(ndns, SZ_4K, btt_sb, sizeof(*btt_sb), 0))
277 return -ENXIO; 277 return -ENXIO;
278 278
279 if (nvdimm_namespace_capacity(ndns) < SZ_16M) 279 if (nvdimm_namespace_capacity(ndns) < SZ_16M)
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
index 93d128da1c92..7ceb5fa4f2a1 100644
--- a/drivers/nvdimm/claim.c
+++ b/drivers/nvdimm/claim.c
@@ -228,7 +228,8 @@ u64 nd_sb_checksum(struct nd_gen_sb *nd_gen_sb)
228EXPORT_SYMBOL(nd_sb_checksum); 228EXPORT_SYMBOL(nd_sb_checksum);
229 229
230static int nsio_rw_bytes(struct nd_namespace_common *ndns, 230static int nsio_rw_bytes(struct nd_namespace_common *ndns,
231 resource_size_t offset, void *buf, size_t size, int rw) 231 resource_size_t offset, void *buf, size_t size, int rw,
232 unsigned long flags)
232{ 233{
233 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); 234 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
234 unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512); 235 unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512);
@@ -259,7 +260,8 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
259 * work around this collision. 260 * work around this collision.
260 */ 261 */
261 if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512) 262 if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)
262 && (!ndns->claim || !is_nd_btt(ndns->claim))) { 263 && !(flags & NVDIMM_IO_ATOMIC)
264 && !ndns->claim) {
263 long cleared; 265 long cleared;
264 266
265 cleared = nvdimm_clear_poison(&ndns->dev, 267 cleared = nvdimm_clear_poison(&ndns->dev,
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 77d032192bf7..03852d738eec 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -31,6 +31,7 @@ enum {
31 ND_MAX_LANES = 256, 31 ND_MAX_LANES = 256,
32 SECTOR_SHIFT = 9, 32 SECTOR_SHIFT = 9,
33 INT_LBASIZE_ALIGNMENT = 64, 33 INT_LBASIZE_ALIGNMENT = 64,
34 NVDIMM_IO_ATOMIC = 1,
34}; 35};
35 36
36struct nd_poison { 37struct nd_poison {
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 335c8175410b..a6c403600d19 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -357,7 +357,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
357 if (!is_nd_pmem(nd_pfn->dev.parent)) 357 if (!is_nd_pmem(nd_pfn->dev.parent))
358 return -ENODEV; 358 return -ENODEV;
359 359
360 if (nvdimm_read_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb))) 360 if (nvdimm_read_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb), 0))
361 return -ENXIO; 361 return -ENXIO;
362 362
363 if (memcmp(pfn_sb->signature, sig, PFN_SIG_LEN) != 0) 363 if (memcmp(pfn_sb->signature, sig, PFN_SIG_LEN) != 0)
@@ -662,7 +662,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
662 checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb); 662 checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
663 pfn_sb->checksum = cpu_to_le64(checksum); 663 pfn_sb->checksum = cpu_to_le64(checksum);
664 664
665 return nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb)); 665 return nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb), 0);
666} 666}
667 667
668/* 668/*
diff --git a/include/linux/nd.h b/include/linux/nd.h
index fa66aeed441a..194b8e002ea7 100644
--- a/include/linux/nd.h
+++ b/include/linux/nd.h
@@ -48,7 +48,7 @@ struct nd_namespace_common {
48 struct device dev; 48 struct device dev;
49 struct device *claim; 49 struct device *claim;
50 int (*rw_bytes)(struct nd_namespace_common *, resource_size_t offset, 50 int (*rw_bytes)(struct nd_namespace_common *, resource_size_t offset,
51 void *buf, size_t size, int rw); 51 void *buf, size_t size, int rw, unsigned long flags);
52}; 52};
53 53
54static inline struct nd_namespace_common *to_ndns(struct device *dev) 54static inline struct nd_namespace_common *to_ndns(struct device *dev)
@@ -134,9 +134,10 @@ static inline struct nd_namespace_blk *to_nd_namespace_blk(const struct device *
134 * @buf is up-to-date upon return from this routine. 134 * @buf is up-to-date upon return from this routine.
135 */ 135 */
136static inline int nvdimm_read_bytes(struct nd_namespace_common *ndns, 136static inline int nvdimm_read_bytes(struct nd_namespace_common *ndns,
137 resource_size_t offset, void *buf, size_t size) 137 resource_size_t offset, void *buf, size_t size,
138 unsigned long flags)
138{ 139{
139 return ndns->rw_bytes(ndns, offset, buf, size, READ); 140 return ndns->rw_bytes(ndns, offset, buf, size, READ, flags);
140} 141}
141 142
142/** 143/**
@@ -152,9 +153,10 @@ static inline int nvdimm_read_bytes(struct nd_namespace_common *ndns,
152 * to media is handled internal to the @ndns driver, if at all. 153 * to media is handled internal to the @ndns driver, if at all.
153 */ 154 */
154static inline int nvdimm_write_bytes(struct nd_namespace_common *ndns, 155static inline int nvdimm_write_bytes(struct nd_namespace_common *ndns,
155 resource_size_t offset, void *buf, size_t size) 156 resource_size_t offset, void *buf, size_t size,
157 unsigned long flags)
156{ 158{
157 return ndns->rw_bytes(ndns, offset, buf, size, WRITE); 159 return ndns->rw_bytes(ndns, offset, buf, size, WRITE, flags);
158} 160}
159 161
160#define MODULE_ALIAS_ND_DEVICE(type) \ 162#define MODULE_ALIAS_ND_DEVICE(type) \