aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2016-03-22 03:29:43 -0400
committerDan Williams <dan.j.williams@intel.com>2016-04-22 15:26:23 -0400
commitac515c084be9b3995f7aef0ae87797e75e0260f0 (patch)
treefd4803d8c5b720b5647686cc70b048ee69223172
parent200c79da824c978fcf6eec1dc9c0a1e521133267 (diff)
libnvdimm, pmem, pfn: move pfn setup to the core
Now that pmem internals have been disentangled from pfn setup, that code can move to the core. This is in preparation for adding another user of the pfn-device capabilities. Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r--drivers/nvdimm/nd.h7
-rw-r--r--drivers/nvdimm/pfn_devs.c181
-rw-r--r--drivers/nvdimm/pmem.c184
3 files changed, 188 insertions, 184 deletions
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 10e23fe49012..6c36509662e4 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -272,9 +272,16 @@ const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
272void nvdimm_badblocks_populate(struct nd_region *nd_region, 272void nvdimm_badblocks_populate(struct nd_region *nd_region,
273 struct badblocks *bb, const struct resource *res); 273 struct badblocks *bb, const struct resource *res);
274#if IS_ENABLED(CONFIG_ND_CLAIM) 274#if IS_ENABLED(CONFIG_ND_CLAIM)
275struct vmem_altmap *nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
276 struct resource *res, struct vmem_altmap *altmap);
275int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio); 277int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio);
276void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio); 278void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio);
277#else 279#else
280static inline struct vmem_altmap *nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
281 struct resource *res, struct vmem_altmap *altmap)
282{
283 return ERR_PTR(-ENXIO);
284}
278static inline int devm_nsio_enable(struct device *dev, 285static inline int devm_nsio_enable(struct device *dev,
279 struct nd_namespace_io *nsio) 286 struct nd_namespace_io *nsio)
280{ 287{
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 9df081ae96e3..e8693fe65e49 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -10,6 +10,7 @@
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details. 11 * General Public License for more details.
12 */ 12 */
13#include <linux/memremap.h>
13#include <linux/blkdev.h> 14#include <linux/blkdev.h>
14#include <linux/device.h> 15#include <linux/device.h>
15#include <linux/genhd.h> 16#include <linux/genhd.h>
@@ -441,3 +442,183 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
441 return rc; 442 return rc;
442} 443}
443EXPORT_SYMBOL(nd_pfn_probe); 444EXPORT_SYMBOL(nd_pfn_probe);
445
446/*
447 * We hotplug memory at section granularity, pad the reserved area from
448 * the previous section base to the namespace base address.
449 */
450static unsigned long init_altmap_base(resource_size_t base)
451{
452 unsigned long base_pfn = PHYS_PFN(base);
453
454 return PFN_SECTION_ALIGN_DOWN(base_pfn);
455}
456
457static unsigned long init_altmap_reserve(resource_size_t base)
458{
459 unsigned long reserve = PHYS_PFN(SZ_8K);
460 unsigned long base_pfn = PHYS_PFN(base);
461
462 reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn);
463 return reserve;
464}
465
466static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
467 struct resource *res, struct vmem_altmap *altmap)
468{
469 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
470 u64 offset = le64_to_cpu(pfn_sb->dataoff);
471 u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
472 u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
473 struct nd_namespace_common *ndns = nd_pfn->ndns;
474 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
475 resource_size_t base = nsio->res.start + start_pad;
476 struct vmem_altmap __altmap = {
477 .base_pfn = init_altmap_base(base),
478 .reserve = init_altmap_reserve(base),
479 };
480
481 memcpy(res, &nsio->res, sizeof(*res));
482 res->start += start_pad;
483 res->end -= end_trunc;
484
485 nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode);
486 if (nd_pfn->mode == PFN_MODE_RAM) {
487 if (offset < SZ_8K)
488 return ERR_PTR(-EINVAL);
489 nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
490 altmap = NULL;
491 } else if (nd_pfn->mode == PFN_MODE_PMEM) {
492 nd_pfn->npfns = (resource_size(res) - offset) / PAGE_SIZE;
493 if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
494 dev_info(&nd_pfn->dev,
495 "number of pfns truncated from %lld to %ld\n",
496 le64_to_cpu(nd_pfn->pfn_sb->npfns),
497 nd_pfn->npfns);
498 memcpy(altmap, &__altmap, sizeof(*altmap));
499 altmap->free = PHYS_PFN(offset - SZ_8K);
500 altmap->alloc = 0;
501 } else
502 return ERR_PTR(-ENXIO);
503
504 return altmap;
505}
506
507static int nd_pfn_init(struct nd_pfn *nd_pfn)
508{
509 struct nd_namespace_common *ndns = nd_pfn->ndns;
510 u32 start_pad = 0, end_trunc = 0;
511 resource_size_t start, size;
512 struct nd_namespace_io *nsio;
513 struct nd_region *nd_region;
514 struct nd_pfn_sb *pfn_sb;
515 unsigned long npfns;
516 phys_addr_t offset;
517 u64 checksum;
518 int rc;
519
520 pfn_sb = devm_kzalloc(&nd_pfn->dev, sizeof(*pfn_sb), GFP_KERNEL);
521 if (!pfn_sb)
522 return -ENOMEM;
523
524 nd_pfn->pfn_sb = pfn_sb;
525 rc = nd_pfn_validate(nd_pfn);
526 if (rc != -ENODEV)
527 return rc;
528
529 /* no info block, do init */;
530 nd_region = to_nd_region(nd_pfn->dev.parent);
531 if (nd_region->ro) {
532 dev_info(&nd_pfn->dev,
533 "%s is read-only, unable to init metadata\n",
534 dev_name(&nd_region->dev));
535 return -ENXIO;
536 }
537
538 memset(pfn_sb, 0, sizeof(*pfn_sb));
539
540 /*
541 * Check if pmem collides with 'System RAM' when section aligned and
542 * trim it accordingly
543 */
544 nsio = to_nd_namespace_io(&ndns->dev);
545 start = PHYS_SECTION_ALIGN_DOWN(nsio->res.start);
546 size = resource_size(&nsio->res);
547 if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
548 IORES_DESC_NONE) == REGION_MIXED) {
549 start = nsio->res.start;
550 start_pad = PHYS_SECTION_ALIGN_UP(start) - start;
551 }
552
553 start = nsio->res.start;
554 size = PHYS_SECTION_ALIGN_UP(start + size) - start;
555 if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
556 IORES_DESC_NONE) == REGION_MIXED) {
557 size = resource_size(&nsio->res);
558 end_trunc = start + size - PHYS_SECTION_ALIGN_DOWN(start + size);
559 }
560
561 if (start_pad + end_trunc)
562 dev_info(&nd_pfn->dev, "%s section collision, truncate %d bytes\n",
563 dev_name(&ndns->dev), start_pad + end_trunc);
564
565 /*
566 * Note, we use 64 here for the standard size of struct page,
567 * debugging options may cause it to be larger in which case the
568 * implementation will limit the pfns advertised through
569 * ->direct_access() to those that are included in the memmap.
570 */
571 start += start_pad;
572 size = resource_size(&nsio->res);
573 npfns = (size - start_pad - end_trunc - SZ_8K) / SZ_4K;
574 if (nd_pfn->mode == PFN_MODE_PMEM)
575 offset = ALIGN(start + SZ_8K + 64 * npfns, nd_pfn->align)
576 - start;
577 else if (nd_pfn->mode == PFN_MODE_RAM)
578 offset = ALIGN(start + SZ_8K, nd_pfn->align) - start;
579 else
580 return -ENXIO;
581
582 if (offset + start_pad + end_trunc >= size) {
583 dev_err(&nd_pfn->dev, "%s unable to satisfy requested alignment\n",
584 dev_name(&ndns->dev));
585 return -ENXIO;
586 }
587
588 npfns = (size - offset - start_pad - end_trunc) / SZ_4K;
589 pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
590 pfn_sb->dataoff = cpu_to_le64(offset);
591 pfn_sb->npfns = cpu_to_le64(npfns);
592 memcpy(pfn_sb->signature, PFN_SIG, PFN_SIG_LEN);
593 memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
594 memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
595 pfn_sb->version_major = cpu_to_le16(1);
596 pfn_sb->version_minor = cpu_to_le16(1);
597 pfn_sb->start_pad = cpu_to_le32(start_pad);
598 pfn_sb->end_trunc = cpu_to_le32(end_trunc);
599 checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
600 pfn_sb->checksum = cpu_to_le64(checksum);
601
602 return nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb));
603}
604
605/*
606 * Determine the effective resource range and vmem_altmap from an nd_pfn
607 * instance.
608 */
609struct vmem_altmap *nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
610 struct resource *res, struct vmem_altmap *altmap)
611{
612 int rc;
613
614 if (!nd_pfn->uuid || !nd_pfn->ndns)
615 return ERR_PTR(-ENODEV);
616
617 rc = nd_pfn_init(nd_pfn);
618 if (rc)
619 return ERR_PTR(rc);
620
621 /* we need a valid pfn_sb before we can init a vmem_altmap */
622 return __nvdimm_setup_pfn(nd_pfn, res, altmap);
623}
624EXPORT_SYMBOL_GPL(nvdimm_setup_pfn);
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index b5f81b02205c..3fc68962c1fc 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -196,9 +196,6 @@ void pmem_release_disk(void *disk)
196 put_disk(disk); 196 put_disk(disk);
197} 197}
198 198
199static struct vmem_altmap *nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
200 struct resource *res, struct vmem_altmap *altmap);
201
202static int pmem_attach_disk(struct device *dev, 199static int pmem_attach_disk(struct device *dev,
203 struct nd_namespace_common *ndns) 200 struct nd_namespace_common *ndns)
204{ 201{
@@ -310,187 +307,6 @@ static int pmem_attach_disk(struct device *dev,
310 return 0; 307 return 0;
311} 308}
312 309
313static int nd_pfn_init(struct nd_pfn *nd_pfn)
314{
315 struct nd_namespace_common *ndns = nd_pfn->ndns;
316 u32 start_pad = 0, end_trunc = 0;
317 resource_size_t start, size;
318 struct nd_namespace_io *nsio;
319 struct nd_region *nd_region;
320 struct nd_pfn_sb *pfn_sb;
321 unsigned long npfns;
322 phys_addr_t offset;
323 u64 checksum;
324 int rc;
325
326 pfn_sb = devm_kzalloc(&nd_pfn->dev, sizeof(*pfn_sb), GFP_KERNEL);
327 if (!pfn_sb)
328 return -ENOMEM;
329
330 nd_pfn->pfn_sb = pfn_sb;
331 rc = nd_pfn_validate(nd_pfn);
332 if (rc == -ENODEV)
333 /* no info block, do init */;
334 else
335 return rc;
336
337 nd_region = to_nd_region(nd_pfn->dev.parent);
338 if (nd_region->ro) {
339 dev_info(&nd_pfn->dev,
340 "%s is read-only, unable to init metadata\n",
341 dev_name(&nd_region->dev));
342 return -ENXIO;
343 }
344
345 memset(pfn_sb, 0, sizeof(*pfn_sb));
346
347 /*
348 * Check if pmem collides with 'System RAM' when section aligned and
349 * trim it accordingly
350 */
351 nsio = to_nd_namespace_io(&ndns->dev);
352 start = PHYS_SECTION_ALIGN_DOWN(nsio->res.start);
353 size = resource_size(&nsio->res);
354 if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
355 IORES_DESC_NONE) == REGION_MIXED) {
356
357 start = nsio->res.start;
358 start_pad = PHYS_SECTION_ALIGN_UP(start) - start;
359 }
360
361 start = nsio->res.start;
362 size = PHYS_SECTION_ALIGN_UP(start + size) - start;
363 if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
364 IORES_DESC_NONE) == REGION_MIXED) {
365 size = resource_size(&nsio->res);
366 end_trunc = start + size - PHYS_SECTION_ALIGN_DOWN(start + size);
367 }
368
369 if (start_pad + end_trunc)
370 dev_info(&nd_pfn->dev, "%s section collision, truncate %d bytes\n",
371 dev_name(&ndns->dev), start_pad + end_trunc);
372
373 /*
374 * Note, we use 64 here for the standard size of struct page,
375 * debugging options may cause it to be larger in which case the
376 * implementation will limit the pfns advertised through
377 * ->direct_access() to those that are included in the memmap.
378 */
379 start += start_pad;
380 size = resource_size(&nsio->res);
381 npfns = (size - start_pad - end_trunc - SZ_8K) / SZ_4K;
382 if (nd_pfn->mode == PFN_MODE_PMEM)
383 offset = ALIGN(start + SZ_8K + 64 * npfns, nd_pfn->align)
384 - start;
385 else if (nd_pfn->mode == PFN_MODE_RAM)
386 offset = ALIGN(start + SZ_8K, nd_pfn->align) - start;
387 else
388 return -ENXIO;
389
390 if (offset + start_pad + end_trunc >= size) {
391 dev_err(&nd_pfn->dev, "%s unable to satisfy requested alignment\n",
392 dev_name(&ndns->dev));
393 return -ENXIO;
394 }
395
396 npfns = (size - offset - start_pad - end_trunc) / SZ_4K;
397 pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
398 pfn_sb->dataoff = cpu_to_le64(offset);
399 pfn_sb->npfns = cpu_to_le64(npfns);
400 memcpy(pfn_sb->signature, PFN_SIG, PFN_SIG_LEN);
401 memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
402 memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
403 pfn_sb->version_major = cpu_to_le16(1);
404 pfn_sb->version_minor = cpu_to_le16(1);
405 pfn_sb->start_pad = cpu_to_le32(start_pad);
406 pfn_sb->end_trunc = cpu_to_le32(end_trunc);
407 checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
408 pfn_sb->checksum = cpu_to_le64(checksum);
409
410 return nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb));
411}
412
413/*
414 * We hotplug memory at section granularity, pad the reserved area from
415 * the previous section base to the namespace base address.
416 */
417static unsigned long init_altmap_base(resource_size_t base)
418{
419 unsigned long base_pfn = PHYS_PFN(base);
420
421 return PFN_SECTION_ALIGN_DOWN(base_pfn);
422}
423
424static unsigned long init_altmap_reserve(resource_size_t base)
425{
426 unsigned long reserve = PHYS_PFN(SZ_8K);
427 unsigned long base_pfn = PHYS_PFN(base);
428
429 reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn);
430 return reserve;
431}
432
433static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
434 struct resource *res, struct vmem_altmap *altmap)
435{
436 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
437 u64 offset = le64_to_cpu(pfn_sb->dataoff);
438 u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
439 u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
440 struct nd_namespace_common *ndns = nd_pfn->ndns;
441 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
442 resource_size_t base = nsio->res.start + start_pad;
443 struct vmem_altmap __altmap = {
444 .base_pfn = init_altmap_base(base),
445 .reserve = init_altmap_reserve(base),
446 };
447
448 memcpy(res, &nsio->res, sizeof(*res));
449 res->start += start_pad;
450 res->end -= end_trunc;
451
452 nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode);
453 if (nd_pfn->mode == PFN_MODE_RAM) {
454 if (offset < SZ_8K)
455 return ERR_PTR(-EINVAL);
456 nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
457 altmap = NULL;
458 } else if (nd_pfn->mode == PFN_MODE_PMEM) {
459 nd_pfn->npfns = (resource_size(res) - offset) / PAGE_SIZE;
460 if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
461 dev_info(&nd_pfn->dev,
462 "number of pfns truncated from %lld to %ld\n",
463 le64_to_cpu(nd_pfn->pfn_sb->npfns),
464 nd_pfn->npfns);
465 memcpy(altmap, &__altmap, sizeof(*altmap));
466 altmap->free = PHYS_PFN(offset - SZ_8K);
467 altmap->alloc = 0;
468 } else
469 return ERR_PTR(-ENXIO);
470
471 return altmap;
472}
473
474/*
475 * Determine the effective resource range and vmem_altmap from an nd_pfn
476 * instance.
477 */
478static struct vmem_altmap *nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
479 struct resource *res, struct vmem_altmap *altmap)
480{
481 int rc;
482
483 if (!nd_pfn->uuid || !nd_pfn->ndns)
484 return ERR_PTR(-ENODEV);
485
486 rc = nd_pfn_init(nd_pfn);
487 if (rc)
488 return ERR_PTR(rc);
489
490 /* we need a valid pfn_sb before we can init a vmem_altmap */
491 return __nvdimm_setup_pfn(nd_pfn, res, altmap);
492}
493
494static int nd_pmem_probe(struct device *dev) 310static int nd_pmem_probe(struct device *dev)
495{ 311{
496 struct nd_namespace_common *ndns; 312 struct nd_namespace_common *ndns;