aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2016-03-22 03:22:16 -0400
committerDan Williams <dan.j.williams@intel.com>2016-04-22 15:26:23 -0400
commit200c79da824c978fcf6eec1dc9c0a1e521133267 (patch)
treebbc2182ead10eaa034278bb42c01a924db9a7d7d
parent947df02d255a6a81a3832e831c5ca02078cfd529 (diff)
libnvdimm, pmem, pfn: make pmem_rw_bytes generic and refactor pfn setup
In preparation for providing an alternative (to block device) access mechanism to persistent memory, convert pmem_rw_bytes() to nsio_rw_bytes(). This allows ->rw_bytes() functionality without requiring a 'struct pmem_device' to be instantiated. In other words, when ->rw_bytes() is in use i/o is driven through 'struct nd_namespace_io', otherwise it is driven through 'struct pmem_device' and the block layer. This consolidates the disjoint calls to devm_exit_badblocks() and devm_memunmap() into a common devm_nsio_disable() and cleans up the init path to use a unified pmem_attach_disk() implementation. Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r--drivers/nvdimm/blk.c2
-rw-r--r--drivers/nvdimm/btt_devs.c4
-rw-r--r--drivers/nvdimm/claim.c61
-rw-r--r--drivers/nvdimm/nd.h40
-rw-r--r--drivers/nvdimm/pfn_devs.c4
-rw-r--r--drivers/nvdimm/pmem.c236
-rw-r--r--include/linux/nd.h9
-rw-r--r--tools/testing/nvdimm/Kbuild1
-rw-r--r--tools/testing/nvdimm/test/iomap.c27
9 files changed, 211 insertions, 173 deletions
diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c
index 4c14ecdc792b..495e06d9f7e7 100644
--- a/drivers/nvdimm/blk.c
+++ b/drivers/nvdimm/blk.c
@@ -324,7 +324,7 @@ static int nd_blk_probe(struct device *dev)
324 ndns->rw_bytes = nsblk_rw_bytes; 324 ndns->rw_bytes = nsblk_rw_bytes;
325 if (is_nd_btt(dev)) 325 if (is_nd_btt(dev))
326 return nvdimm_namespace_attach_btt(ndns); 326 return nvdimm_namespace_attach_btt(ndns);
327 else if (nd_btt_probe(dev, ndns, nsblk) == 0) { 327 else if (nd_btt_probe(dev, ndns) == 0) {
328 /* we'll come back as btt-blk */ 328 /* we'll come back as btt-blk */
329 return -ENXIO; 329 return -ENXIO;
330 } else 330 } else
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index 1886171af80e..816d0dae6398 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -273,8 +273,7 @@ static int __nd_btt_probe(struct nd_btt *nd_btt,
273 return 0; 273 return 0;
274} 274}
275 275
276int nd_btt_probe(struct device *dev, struct nd_namespace_common *ndns, 276int nd_btt_probe(struct device *dev, struct nd_namespace_common *ndns)
277 void *drvdata)
278{ 277{
279 int rc; 278 int rc;
280 struct device *btt_dev; 279 struct device *btt_dev;
@@ -289,7 +288,6 @@ int nd_btt_probe(struct device *dev, struct nd_namespace_common *ndns,
289 nvdimm_bus_unlock(&ndns->dev); 288 nvdimm_bus_unlock(&ndns->dev);
290 if (!btt_dev) 289 if (!btt_dev)
291 return -ENOMEM; 290 return -ENOMEM;
292 dev_set_drvdata(btt_dev, drvdata);
293 btt_sb = devm_kzalloc(dev, sizeof(*btt_sb), GFP_KERNEL); 291 btt_sb = devm_kzalloc(dev, sizeof(*btt_sb), GFP_KERNEL);
294 rc = __nd_btt_probe(to_nd_btt(btt_dev), ndns, btt_sb); 292 rc = __nd_btt_probe(to_nd_btt(btt_dev), ndns, btt_sb);
295 dev_dbg(dev, "%s: btt: %s\n", __func__, 293 dev_dbg(dev, "%s: btt: %s\n", __func__,
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
index e8f03b0e95e4..6bbd0a36994a 100644
--- a/drivers/nvdimm/claim.c
+++ b/drivers/nvdimm/claim.c
@@ -12,6 +12,7 @@
12 */ 12 */
13#include <linux/device.h> 13#include <linux/device.h>
14#include <linux/sizes.h> 14#include <linux/sizes.h>
15#include <linux/pmem.h>
15#include "nd-core.h" 16#include "nd-core.h"
16#include "pfn.h" 17#include "pfn.h"
17#include "btt.h" 18#include "btt.h"
@@ -199,3 +200,63 @@ u64 nd_sb_checksum(struct nd_gen_sb *nd_gen_sb)
199 return sum; 200 return sum;
200} 201}
201EXPORT_SYMBOL(nd_sb_checksum); 202EXPORT_SYMBOL(nd_sb_checksum);
203
204static int nsio_rw_bytes(struct nd_namespace_common *ndns,
205 resource_size_t offset, void *buf, size_t size, int rw)
206{
207 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
208
209 if (unlikely(offset + size > nsio->size)) {
210 dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
211 return -EFAULT;
212 }
213
214 if (rw == READ) {
215 unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512);
216
217 if (unlikely(is_bad_pmem(&nsio->bb, offset / 512, sz_align)))
218 return -EIO;
219 return memcpy_from_pmem(buf, nsio->addr + offset, size);
220 } else {
221 memcpy_to_pmem(nsio->addr + offset, buf, size);
222 wmb_pmem();
223 }
224
225 return 0;
226}
227
228int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
229{
230 struct resource *res = &nsio->res;
231 struct nd_namespace_common *ndns = &nsio->common;
232
233 nsio->size = resource_size(res);
234 if (!devm_request_mem_region(dev, res->start, resource_size(res),
235 dev_name(dev))) {
236 dev_warn(dev, "could not reserve region %pR\n", res);
237 return -EBUSY;
238 }
239
240 ndns->rw_bytes = nsio_rw_bytes;
241 if (devm_init_badblocks(dev, &nsio->bb))
242 return -ENOMEM;
243 nvdimm_badblocks_populate(to_nd_region(ndns->dev.parent), &nsio->bb,
244 &nsio->res);
245
246 nsio->addr = devm_memremap(dev, res->start, resource_size(res),
247 ARCH_MEMREMAP_PMEM);
248 if (IS_ERR(nsio->addr))
249 return PTR_ERR(nsio->addr);
250 return 0;
251}
252EXPORT_SYMBOL_GPL(devm_nsio_enable);
253
254void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio)
255{
256 struct resource *res = &nsio->res;
257
258 devm_memunmap(dev, nsio->addr);
259 devm_exit_badblocks(dev, &nsio->bb);
260 devm_release_mem_region(dev, res->start, resource_size(res));
261}
262EXPORT_SYMBOL_GPL(devm_nsio_disable);
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 0fb14890ba26..10e23fe49012 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -13,6 +13,7 @@
13#ifndef __ND_H__ 13#ifndef __ND_H__
14#define __ND_H__ 14#define __ND_H__
15#include <linux/libnvdimm.h> 15#include <linux/libnvdimm.h>
16#include <linux/badblocks.h>
16#include <linux/blkdev.h> 17#include <linux/blkdev.h>
17#include <linux/device.h> 18#include <linux/device.h>
18#include <linux/mutex.h> 19#include <linux/mutex.h>
@@ -197,13 +198,12 @@ struct nd_gen_sb {
197 198
198u64 nd_sb_checksum(struct nd_gen_sb *sb); 199u64 nd_sb_checksum(struct nd_gen_sb *sb);
199#if IS_ENABLED(CONFIG_BTT) 200#if IS_ENABLED(CONFIG_BTT)
200int nd_btt_probe(struct device *dev, struct nd_namespace_common *ndns, 201int nd_btt_probe(struct device *dev, struct nd_namespace_common *ndns);
201 void *drvdata);
202bool is_nd_btt(struct device *dev); 202bool is_nd_btt(struct device *dev);
203struct device *nd_btt_create(struct nd_region *nd_region); 203struct device *nd_btt_create(struct nd_region *nd_region);
204#else 204#else
205static inline int nd_btt_probe(struct device *dev, 205static inline int nd_btt_probe(struct device *dev,
206 struct nd_namespace_common *ndns, void *drvdata) 206 struct nd_namespace_common *ndns)
207{ 207{
208 return -ENODEV; 208 return -ENODEV;
209} 209}
@@ -221,14 +221,13 @@ static inline struct device *nd_btt_create(struct nd_region *nd_region)
221 221
222struct nd_pfn *to_nd_pfn(struct device *dev); 222struct nd_pfn *to_nd_pfn(struct device *dev);
223#if IS_ENABLED(CONFIG_NVDIMM_PFN) 223#if IS_ENABLED(CONFIG_NVDIMM_PFN)
224int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns, 224int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns);
225 void *drvdata);
226bool is_nd_pfn(struct device *dev); 225bool is_nd_pfn(struct device *dev);
227struct device *nd_pfn_create(struct nd_region *nd_region); 226struct device *nd_pfn_create(struct nd_region *nd_region);
228int nd_pfn_validate(struct nd_pfn *nd_pfn); 227int nd_pfn_validate(struct nd_pfn *nd_pfn);
229#else 228#else
230static inline int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns, 229static inline int nd_pfn_probe(struct device *dev,
231 void *drvdata) 230 struct nd_namespace_common *ndns)
232{ 231{
233 return -ENODEV; 232 return -ENODEV;
234} 233}
@@ -272,6 +271,20 @@ const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
272 char *name); 271 char *name);
273void nvdimm_badblocks_populate(struct nd_region *nd_region, 272void nvdimm_badblocks_populate(struct nd_region *nd_region,
274 struct badblocks *bb, const struct resource *res); 273 struct badblocks *bb, const struct resource *res);
274#if IS_ENABLED(CONFIG_ND_CLAIM)
275int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio);
276void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio);
277#else
278static inline int devm_nsio_enable(struct device *dev,
279 struct nd_namespace_io *nsio)
280{
281 return -ENXIO;
282}
283static inline void devm_nsio_disable(struct device *dev,
284 struct nd_namespace_io *nsio)
285{
286}
287#endif
275int nd_blk_region_init(struct nd_region *nd_region); 288int nd_blk_region_init(struct nd_region *nd_region);
276void __nd_iostat_start(struct bio *bio, unsigned long *start); 289void __nd_iostat_start(struct bio *bio, unsigned long *start);
277static inline bool nd_iostat_start(struct bio *bio, unsigned long *start) 290static inline bool nd_iostat_start(struct bio *bio, unsigned long *start)
@@ -285,6 +298,19 @@ static inline bool nd_iostat_start(struct bio *bio, unsigned long *start)
285 return true; 298 return true;
286} 299}
287void nd_iostat_end(struct bio *bio, unsigned long start); 300void nd_iostat_end(struct bio *bio, unsigned long start);
301static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector,
302 unsigned int len)
303{
304 if (bb->count) {
305 sector_t first_bad;
306 int num_bad;
307
308 return !!badblocks_check(bb, sector, len / 512, &first_bad,
309 &num_bad);
310 }
311
312 return false;
313}
288resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk); 314resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk);
289const u8 *nd_dev_to_uuid(struct device *dev); 315const u8 *nd_dev_to_uuid(struct device *dev);
290bool pmem_should_map_pages(struct device *dev); 316bool pmem_should_map_pages(struct device *dev);
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 96aa5490c279..9df081ae96e3 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -410,8 +410,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn)
410} 410}
411EXPORT_SYMBOL(nd_pfn_validate); 411EXPORT_SYMBOL(nd_pfn_validate);
412 412
413int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns, 413int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
414 void *drvdata)
415{ 414{
416 int rc; 415 int rc;
417 struct nd_pfn *nd_pfn; 416 struct nd_pfn *nd_pfn;
@@ -427,7 +426,6 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns,
427 nvdimm_bus_unlock(&ndns->dev); 426 nvdimm_bus_unlock(&ndns->dev);
428 if (!pfn_dev) 427 if (!pfn_dev)
429 return -ENOMEM; 428 return -ENOMEM;
430 dev_set_drvdata(pfn_dev, drvdata);
431 pfn_sb = devm_kzalloc(dev, sizeof(*pfn_sb), GFP_KERNEL); 429 pfn_sb = devm_kzalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
432 nd_pfn = to_nd_pfn(pfn_dev); 430 nd_pfn = to_nd_pfn(pfn_dev);
433 nd_pfn->pfn_sb = pfn_sb; 431 nd_pfn->pfn_sb = pfn_sb;
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 67d48e2e8ca2..b5f81b02205c 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -49,19 +49,6 @@ struct pmem_device {
49 struct badblocks bb; 49 struct badblocks bb;
50}; 50};
51 51
52static bool is_bad_pmem(struct badblocks *bb, sector_t sector, unsigned int len)
53{
54 if (bb->count) {
55 sector_t first_bad;
56 int num_bad;
57
58 return !!badblocks_check(bb, sector, len / 512, &first_bad,
59 &num_bad);
60 }
61
62 return false;
63}
64
65static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset, 52static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
66 unsigned int len) 53 unsigned int len)
67{ 54{
@@ -209,16 +196,40 @@ void pmem_release_disk(void *disk)
209 put_disk(disk); 196 put_disk(disk);
210} 197}
211 198
212static struct pmem_device *pmem_alloc(struct device *dev, 199static struct vmem_altmap *nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
213 struct resource *res, int id) 200 struct resource *res, struct vmem_altmap *altmap);
201
202static int pmem_attach_disk(struct device *dev,
203 struct nd_namespace_common *ndns)
214{ 204{
205 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
206 struct vmem_altmap __altmap, *altmap = NULL;
207 struct resource *res = &nsio->res;
208 struct nd_pfn *nd_pfn = NULL;
209 int nid = dev_to_node(dev);
210 struct nd_pfn_sb *pfn_sb;
215 struct pmem_device *pmem; 211 struct pmem_device *pmem;
212 struct resource pfn_res;
216 struct request_queue *q; 213 struct request_queue *q;
214 struct gendisk *disk;
215 void *addr;
216
217 /* while nsio_rw_bytes is active, parse a pfn info block if present */
218 if (is_nd_pfn(dev)) {
219 nd_pfn = to_nd_pfn(dev);
220 altmap = nvdimm_setup_pfn(nd_pfn, &pfn_res, &__altmap);
221 if (IS_ERR(altmap))
222 return PTR_ERR(altmap);
223 }
224
225 /* we're attaching a block device, disable raw namespace access */
226 devm_nsio_disable(dev, nsio);
217 227
218 pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL); 228 pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
219 if (!pmem) 229 if (!pmem)
220 return ERR_PTR(-ENOMEM); 230 return -ENOMEM;
221 231
232 dev_set_drvdata(dev, pmem);
222 pmem->phys_addr = res->start; 233 pmem->phys_addr = res->start;
223 pmem->size = resource_size(res); 234 pmem->size = resource_size(res);
224 if (!arch_has_wmb_pmem()) 235 if (!arch_has_wmb_pmem())
@@ -227,22 +238,31 @@ static struct pmem_device *pmem_alloc(struct device *dev,
227 if (!devm_request_mem_region(dev, res->start, resource_size(res), 238 if (!devm_request_mem_region(dev, res->start, resource_size(res),
228 dev_name(dev))) { 239 dev_name(dev))) {
229 dev_warn(dev, "could not reserve region %pR\n", res); 240 dev_warn(dev, "could not reserve region %pR\n", res);
230 return ERR_PTR(-EBUSY); 241 return -EBUSY;
231 } 242 }
232 243
233 q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev)); 244 q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev));
234 if (!q) 245 if (!q)
235 return ERR_PTR(-ENOMEM); 246 return -ENOMEM;
247 pmem->pmem_queue = q;
236 248
237 pmem->pfn_flags = PFN_DEV; 249 pmem->pfn_flags = PFN_DEV;
238 if (pmem_should_map_pages(dev)) { 250 if (is_nd_pfn(dev)) {
239 pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, res, 251 addr = devm_memremap_pages(dev, &pfn_res, &q->q_usage_counter,
252 altmap);
253 pfn_sb = nd_pfn->pfn_sb;
254 pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
255 pmem->pfn_pad = resource_size(res) - resource_size(&pfn_res);
256 pmem->pfn_flags |= PFN_MAP;
257 res = &pfn_res; /* for badblocks populate */
258 res->start += pmem->data_offset;
259 } else if (pmem_should_map_pages(dev)) {
260 addr = devm_memremap_pages(dev, &nsio->res,
240 &q->q_usage_counter, NULL); 261 &q->q_usage_counter, NULL);
241 pmem->pfn_flags |= PFN_MAP; 262 pmem->pfn_flags |= PFN_MAP;
242 } else 263 } else
243 pmem->virt_addr = (void __pmem *) devm_memremap(dev, 264 addr = devm_memremap(dev, pmem->phys_addr,
244 pmem->phys_addr, pmem->size, 265 pmem->size, ARCH_MEMREMAP_PMEM);
245 ARCH_MEMREMAP_PMEM);
246 266
247 /* 267 /*
248 * At release time the queue must be dead before 268 * At release time the queue must be dead before
@@ -250,23 +270,12 @@ static struct pmem_device *pmem_alloc(struct device *dev,
250 */ 270 */
251 if (devm_add_action(dev, pmem_release_queue, q)) { 271 if (devm_add_action(dev, pmem_release_queue, q)) {
252 blk_cleanup_queue(q); 272 blk_cleanup_queue(q);
253 return ERR_PTR(-ENOMEM); 273 return -ENOMEM;
254 } 274 }
255 275
256 if (IS_ERR(pmem->virt_addr)) 276 if (IS_ERR(addr))
257 return (void __force *) pmem->virt_addr; 277 return PTR_ERR(addr);
258 278 pmem->virt_addr = (void __pmem *) addr;
259 pmem->pmem_queue = q;
260 return pmem;
261}
262
263static int pmem_attach_disk(struct device *dev,
264 struct nd_namespace_common *ndns, struct pmem_device *pmem)
265{
266 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
267 int nid = dev_to_node(dev);
268 struct resource bb_res;
269 struct gendisk *disk;
270 279
271 blk_queue_make_request(pmem->pmem_queue, pmem_make_request); 280 blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
272 blk_queue_physical_block_size(pmem->pmem_queue, PAGE_SIZE); 281 blk_queue_physical_block_size(pmem->pmem_queue, PAGE_SIZE);
@@ -291,20 +300,9 @@ static int pmem_attach_disk(struct device *dev,
291 set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset) 300 set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
292 / 512); 301 / 512);
293 pmem->pmem_disk = disk; 302 pmem->pmem_disk = disk;
294 devm_exit_badblocks(dev, &pmem->bb);
295 if (devm_init_badblocks(dev, &pmem->bb)) 303 if (devm_init_badblocks(dev, &pmem->bb))
296 return -ENOMEM; 304 return -ENOMEM;
297 bb_res.start = nsio->res.start + pmem->data_offset; 305 nvdimm_badblocks_populate(to_nd_region(dev->parent), &pmem->bb, res);
298 bb_res.end = nsio->res.end;
299 if (is_nd_pfn(dev)) {
300 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
301 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
302
303 bb_res.start += __le32_to_cpu(pfn_sb->start_pad);
304 bb_res.end -= __le32_to_cpu(pfn_sb->end_trunc);
305 }
306 nvdimm_badblocks_populate(to_nd_region(dev->parent), &pmem->bb,
307 &bb_res);
308 disk->bb = &pmem->bb; 306 disk->bb = &pmem->bb;
309 add_disk(disk); 307 add_disk(disk);
310 revalidate_disk(disk); 308 revalidate_disk(disk);
@@ -312,33 +310,8 @@ static int pmem_attach_disk(struct device *dev,
312 return 0; 310 return 0;
313} 311}
314 312
315static int pmem_rw_bytes(struct nd_namespace_common *ndns,
316 resource_size_t offset, void *buf, size_t size, int rw)
317{
318 struct pmem_device *pmem = dev_get_drvdata(ndns->claim);
319
320 if (unlikely(offset + size > pmem->size)) {
321 dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
322 return -EFAULT;
323 }
324
325 if (rw == READ) {
326 unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512);
327
328 if (unlikely(is_bad_pmem(&pmem->bb, offset / 512, sz_align)))
329 return -EIO;
330 return memcpy_from_pmem(buf, pmem->virt_addr + offset, size);
331 } else {
332 memcpy_to_pmem(pmem->virt_addr + offset, buf, size);
333 wmb_pmem();
334 }
335
336 return 0;
337}
338
339static int nd_pfn_init(struct nd_pfn *nd_pfn) 313static int nd_pfn_init(struct nd_pfn *nd_pfn)
340{ 314{
341 struct pmem_device *pmem = dev_get_drvdata(&nd_pfn->dev);
342 struct nd_namespace_common *ndns = nd_pfn->ndns; 315 struct nd_namespace_common *ndns = nd_pfn->ndns;
343 u32 start_pad = 0, end_trunc = 0; 316 u32 start_pad = 0, end_trunc = 0;
344 resource_size_t start, size; 317 resource_size_t start, size;
@@ -404,7 +377,8 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
404 * ->direct_access() to those that are included in the memmap. 377 * ->direct_access() to those that are included in the memmap.
405 */ 378 */
406 start += start_pad; 379 start += start_pad;
407 npfns = (pmem->size - start_pad - end_trunc - SZ_8K) / SZ_4K; 380 size = resource_size(&nsio->res);
381 npfns = (size - start_pad - end_trunc - SZ_8K) / SZ_4K;
408 if (nd_pfn->mode == PFN_MODE_PMEM) 382 if (nd_pfn->mode == PFN_MODE_PMEM)
409 offset = ALIGN(start + SZ_8K + 64 * npfns, nd_pfn->align) 383 offset = ALIGN(start + SZ_8K + 64 * npfns, nd_pfn->align)
410 - start; 384 - start;
@@ -413,13 +387,13 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
413 else 387 else
414 return -ENXIO; 388 return -ENXIO;
415 389
416 if (offset + start_pad + end_trunc >= pmem->size) { 390 if (offset + start_pad + end_trunc >= size) {
417 dev_err(&nd_pfn->dev, "%s unable to satisfy requested alignment\n", 391 dev_err(&nd_pfn->dev, "%s unable to satisfy requested alignment\n",
418 dev_name(&ndns->dev)); 392 dev_name(&ndns->dev));
419 return -ENXIO; 393 return -ENXIO;
420 } 394 }
421 395
422 npfns = (pmem->size - offset - start_pad - end_trunc) / SZ_4K; 396 npfns = (size - offset - start_pad - end_trunc) / SZ_4K;
423 pfn_sb->mode = cpu_to_le32(nd_pfn->mode); 397 pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
424 pfn_sb->dataoff = cpu_to_le64(offset); 398 pfn_sb->dataoff = cpu_to_le64(offset);
425 pfn_sb->npfns = cpu_to_le64(npfns); 399 pfn_sb->npfns = cpu_to_le64(npfns);
@@ -456,17 +430,14 @@ static unsigned long init_altmap_reserve(resource_size_t base)
456 return reserve; 430 return reserve;
457} 431}
458 432
459static int __nvdimm_namespace_attach_pfn(struct nd_pfn *nd_pfn) 433static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
434 struct resource *res, struct vmem_altmap *altmap)
460{ 435{
461 struct resource res;
462 struct request_queue *q;
463 struct pmem_device *pmem;
464 struct vmem_altmap *altmap;
465 struct device *dev = &nd_pfn->dev;
466 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; 436 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
467 struct nd_namespace_common *ndns = nd_pfn->ndns; 437 u64 offset = le64_to_cpu(pfn_sb->dataoff);
468 u32 start_pad = __le32_to_cpu(pfn_sb->start_pad); 438 u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
469 u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc); 439 u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
440 struct nd_namespace_common *ndns = nd_pfn->ndns;
470 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); 441 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
471 resource_size_t base = nsio->res.start + start_pad; 442 resource_size_t base = nsio->res.start + start_pad;
472 struct vmem_altmap __altmap = { 443 struct vmem_altmap __altmap = {
@@ -474,112 +445,75 @@ static int __nvdimm_namespace_attach_pfn(struct nd_pfn *nd_pfn)
474 .reserve = init_altmap_reserve(base), 445 .reserve = init_altmap_reserve(base),
475 }; 446 };
476 447
477 pmem = dev_get_drvdata(dev); 448 memcpy(res, &nsio->res, sizeof(*res));
478 pmem->data_offset = le64_to_cpu(pfn_sb->dataoff); 449 res->start += start_pad;
479 pmem->pfn_pad = start_pad + end_trunc; 450 res->end -= end_trunc;
451
480 nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode); 452 nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode);
481 if (nd_pfn->mode == PFN_MODE_RAM) { 453 if (nd_pfn->mode == PFN_MODE_RAM) {
482 if (pmem->data_offset < SZ_8K) 454 if (offset < SZ_8K)
483 return -EINVAL; 455 return ERR_PTR(-EINVAL);
484 nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns); 456 nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
485 altmap = NULL; 457 altmap = NULL;
486 } else if (nd_pfn->mode == PFN_MODE_PMEM) { 458 } else if (nd_pfn->mode == PFN_MODE_PMEM) {
487 nd_pfn->npfns = (pmem->size - pmem->pfn_pad - pmem->data_offset) 459 nd_pfn->npfns = (resource_size(res) - offset) / PAGE_SIZE;
488 / PAGE_SIZE;
489 if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns) 460 if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
490 dev_info(&nd_pfn->dev, 461 dev_info(&nd_pfn->dev,
491 "number of pfns truncated from %lld to %ld\n", 462 "number of pfns truncated from %lld to %ld\n",
492 le64_to_cpu(nd_pfn->pfn_sb->npfns), 463 le64_to_cpu(nd_pfn->pfn_sb->npfns),
493 nd_pfn->npfns); 464 nd_pfn->npfns);
494 altmap = & __altmap; 465 memcpy(altmap, &__altmap, sizeof(*altmap));
495 altmap->free = PHYS_PFN(pmem->data_offset - SZ_8K); 466 altmap->free = PHYS_PFN(offset - SZ_8K);
496 altmap->alloc = 0; 467 altmap->alloc = 0;
497 } else 468 } else
498 return -ENXIO; 469 return ERR_PTR(-ENXIO);
499 470
500 /* establish pfn range for lookup, and switch to direct map */ 471 return altmap;
501 q = pmem->pmem_queue;
502 memcpy(&res, &nsio->res, sizeof(res));
503 res.start += start_pad;
504 res.end -= end_trunc;
505 devm_remove_action(dev, pmem_release_queue, q);
506 devm_memunmap(dev, (void __force *) pmem->virt_addr);
507 pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, &res,
508 &q->q_usage_counter, altmap);
509 pmem->pfn_flags |= PFN_MAP;
510
511 /*
512 * At release time the queue must be dead before
513 * devm_memremap_pages is unwound
514 */
515 if (devm_add_action(dev, pmem_release_queue, q)) {
516 blk_cleanup_queue(q);
517 return -ENOMEM;
518 }
519 if (IS_ERR(pmem->virt_addr))
520 return PTR_ERR(pmem->virt_addr);
521
522 /* attach pmem disk in "pfn-mode" */
523 return pmem_attach_disk(dev, ndns, pmem);
524} 472}
525 473
526static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns) 474/*
475 * Determine the effective resource range and vmem_altmap from an nd_pfn
476 * instance.
477 */
478static struct vmem_altmap *nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
479 struct resource *res, struct vmem_altmap *altmap)
527{ 480{
528 struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim);
529 int rc; 481 int rc;
530 482
531 if (!nd_pfn->uuid || !nd_pfn->ndns) 483 if (!nd_pfn->uuid || !nd_pfn->ndns)
532 return -ENODEV; 484 return ERR_PTR(-ENODEV);
533 485
534 rc = nd_pfn_init(nd_pfn); 486 rc = nd_pfn_init(nd_pfn);
535 if (rc) 487 if (rc)
536 return rc; 488 return ERR_PTR(rc);
489
537 /* we need a valid pfn_sb before we can init a vmem_altmap */ 490 /* we need a valid pfn_sb before we can init a vmem_altmap */
538 return __nvdimm_namespace_attach_pfn(nd_pfn); 491 return __nvdimm_setup_pfn(nd_pfn, res, altmap);
539} 492}
540 493
541static int nd_pmem_probe(struct device *dev) 494static int nd_pmem_probe(struct device *dev)
542{ 495{
543 struct nd_region *nd_region = to_nd_region(dev->parent);
544 struct nd_namespace_common *ndns; 496 struct nd_namespace_common *ndns;
545 struct nd_namespace_io *nsio;
546 struct pmem_device *pmem;
547 497
548 ndns = nvdimm_namespace_common_probe(dev); 498 ndns = nvdimm_namespace_common_probe(dev);
549 if (IS_ERR(ndns)) 499 if (IS_ERR(ndns))
550 return PTR_ERR(ndns); 500 return PTR_ERR(ndns);
551 501
552 nsio = to_nd_namespace_io(&ndns->dev); 502 if (devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev)))
553 pmem = pmem_alloc(dev, &nsio->res, nd_region->id); 503 return -ENXIO;
554 if (IS_ERR(pmem))
555 return PTR_ERR(pmem);
556
557 dev_set_drvdata(dev, pmem);
558 ndns->rw_bytes = pmem_rw_bytes;
559 if (devm_init_badblocks(dev, &pmem->bb))
560 return -ENOMEM;
561 nvdimm_badblocks_populate(nd_region, &pmem->bb, &nsio->res);
562 504
563 if (is_nd_btt(dev)) { 505 if (is_nd_btt(dev))
564 /* btt allocates its own request_queue */
565 devm_remove_action(dev, pmem_release_queue, pmem->pmem_queue);
566 blk_cleanup_queue(pmem->pmem_queue);
567 return nvdimm_namespace_attach_btt(ndns); 506 return nvdimm_namespace_attach_btt(ndns);
568 }
569 507
570 if (is_nd_pfn(dev)) 508 if (is_nd_pfn(dev))
571 return nvdimm_namespace_attach_pfn(ndns); 509 return pmem_attach_disk(dev, ndns);
572 510
573 if (nd_btt_probe(dev, ndns, pmem) == 0 511 /* if we find a valid info-block we'll come back as that personality */
574 || nd_pfn_probe(dev, ndns, pmem) == 0) { 512 if (nd_btt_probe(dev, ndns) == 0 || nd_pfn_probe(dev, ndns) == 0)
575 /*
576 * We'll come back as either btt-pmem, or pfn-pmem, so
577 * drop the queue allocation for now.
578 */
579 return -ENXIO; 513 return -ENXIO;
580 }
581 514
582 return pmem_attach_disk(dev, ndns, pmem); 515 /* ...otherwise we're just a raw pmem device */
516 return pmem_attach_disk(dev, ndns);
583} 517}
584 518
585static int nd_pmem_remove(struct device *dev) 519static int nd_pmem_remove(struct device *dev)
diff --git a/include/linux/nd.h b/include/linux/nd.h
index 5ea4aec7fd63..aee2761d294c 100644
--- a/include/linux/nd.h
+++ b/include/linux/nd.h
@@ -15,6 +15,7 @@
15#include <linux/fs.h> 15#include <linux/fs.h>
16#include <linux/ndctl.h> 16#include <linux/ndctl.h>
17#include <linux/device.h> 17#include <linux/device.h>
18#include <linux/badblocks.h>
18 19
19enum nvdimm_event { 20enum nvdimm_event {
20 NVDIMM_REVALIDATE_POISON, 21 NVDIMM_REVALIDATE_POISON,
@@ -55,13 +56,19 @@ static inline struct nd_namespace_common *to_ndns(struct device *dev)
55} 56}
56 57
57/** 58/**
58 * struct nd_namespace_io - infrastructure for loading an nd_pmem instance 59 * struct nd_namespace_io - device representation of a persistent memory range
59 * @dev: namespace device created by the nd region driver 60 * @dev: namespace device created by the nd region driver
60 * @res: struct resource conversion of a NFIT SPA table 61 * @res: struct resource conversion of a NFIT SPA table
62 * @size: cached resource_size(@res) for fast path size checks
63 * @addr: virtual address to access the namespace range
64 * @bb: badblocks list for the namespace range
61 */ 65 */
62struct nd_namespace_io { 66struct nd_namespace_io {
63 struct nd_namespace_common common; 67 struct nd_namespace_common common;
64 struct resource res; 68 struct resource res;
69 resource_size_t size;
70 void __pmem *addr;
71 struct badblocks bb;
65}; 72};
66 73
67/** 74/**
diff --git a/tools/testing/nvdimm/Kbuild b/tools/testing/nvdimm/Kbuild
index a34bfd0c8928..d5bc8c080b44 100644
--- a/tools/testing/nvdimm/Kbuild
+++ b/tools/testing/nvdimm/Kbuild
@@ -7,6 +7,7 @@ ldflags-y += --wrap=ioremap_nocache
7ldflags-y += --wrap=iounmap 7ldflags-y += --wrap=iounmap
8ldflags-y += --wrap=memunmap 8ldflags-y += --wrap=memunmap
9ldflags-y += --wrap=__devm_request_region 9ldflags-y += --wrap=__devm_request_region
10ldflags-y += --wrap=__devm_release_region
10ldflags-y += --wrap=__request_region 11ldflags-y += --wrap=__request_region
11ldflags-y += --wrap=__release_region 12ldflags-y += --wrap=__release_region
12ldflags-y += --wrap=devm_memremap_pages 13ldflags-y += --wrap=devm_memremap_pages
diff --git a/tools/testing/nvdimm/test/iomap.c b/tools/testing/nvdimm/test/iomap.c
index 0c1a7e65bb81..c842095f2801 100644
--- a/tools/testing/nvdimm/test/iomap.c
+++ b/tools/testing/nvdimm/test/iomap.c
@@ -239,13 +239,11 @@ struct resource *__wrap___devm_request_region(struct device *dev,
239} 239}
240EXPORT_SYMBOL(__wrap___devm_request_region); 240EXPORT_SYMBOL(__wrap___devm_request_region);
241 241
242void __wrap___release_region(struct resource *parent, resource_size_t start, 242static bool nfit_test_release_region(struct resource *parent,
243 resource_size_t n) 243 resource_size_t start, resource_size_t n)
244{ 244{
245 struct nfit_test_resource *nfit_res;
246
247 if (parent == &iomem_resource) { 245 if (parent == &iomem_resource) {
248 nfit_res = get_nfit_res(start); 246 struct nfit_test_resource *nfit_res = get_nfit_res(start);
249 if (nfit_res) { 247 if (nfit_res) {
250 struct resource *res = nfit_res->res + 1; 248 struct resource *res = nfit_res->res + 1;
251 249
@@ -254,11 +252,26 @@ void __wrap___release_region(struct resource *parent, resource_size_t start,
254 __func__, start, n, res); 252 __func__, start, n, res);
255 else 253 else
256 memset(res, 0, sizeof(*res)); 254 memset(res, 0, sizeof(*res));
257 return; 255 return true;
258 } 256 }
259 } 257 }
260 __release_region(parent, start, n); 258 return false;
259}
260
261void __wrap___release_region(struct resource *parent, resource_size_t start,
262 resource_size_t n)
263{
264 if (!nfit_test_release_region(parent, start, n))
265 __release_region(parent, start, n);
261} 266}
262EXPORT_SYMBOL(__wrap___release_region); 267EXPORT_SYMBOL(__wrap___release_region);
263 268
269void __wrap___devm_release_region(struct device *dev, struct resource *parent,
270 resource_size_t start, resource_size_t n)
271{
272 if (!nfit_test_release_region(parent, start, n))
273 __devm_release_region(dev, parent, start, n);
274}
275EXPORT_SYMBOL(__wrap___devm_release_region);
276
264MODULE_LICENSE("GPL v2"); 277MODULE_LICENSE("GPL v2");