aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/nvdimm/dimm_devs.c32
-rw-r--r--drivers/nvdimm/namespace_devs.c128
-rw-r--r--drivers/nvdimm/nd-core.h18
3 files changed, 133 insertions, 45 deletions
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index 4b0296ccb375..d614493ad5ac 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -386,13 +386,7 @@ struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data,
386} 386}
387EXPORT_SYMBOL_GPL(nvdimm_create); 387EXPORT_SYMBOL_GPL(nvdimm_create);
388 388
389struct blk_alloc_info { 389int alias_dpa_busy(struct device *dev, void *data)
390 struct nd_mapping *nd_mapping;
391 resource_size_t available, busy;
392 struct resource *res;
393};
394
395static int alias_dpa_busy(struct device *dev, void *data)
396{ 390{
397 resource_size_t map_end, blk_start, new, busy; 391 resource_size_t map_end, blk_start, new, busy;
398 struct blk_alloc_info *info = data; 392 struct blk_alloc_info *info = data;
@@ -418,6 +412,20 @@ static int alias_dpa_busy(struct device *dev, void *data)
418 ndd = to_ndd(nd_mapping); 412 ndd = to_ndd(nd_mapping);
419 map_end = nd_mapping->start + nd_mapping->size - 1; 413 map_end = nd_mapping->start + nd_mapping->size - 1;
420 blk_start = nd_mapping->start; 414 blk_start = nd_mapping->start;
415
416 /*
417 * In the allocation case ->res is set to free space that we are
418 * looking to validate against PMEM aliasing collision rules
419 * (i.e. BLK is allocated after all aliased PMEM).
420 */
421 if (info->res) {
422 if (info->res->start >= nd_mapping->start
423 && info->res->start < map_end)
424 /* pass */;
425 else
426 return 0;
427 }
428
421 retry: 429 retry:
422 /* 430 /*
423 * Find the free dpa from the end of the last pmem allocation to 431 * Find the free dpa from the end of the last pmem allocation to
@@ -447,7 +455,16 @@ static int alias_dpa_busy(struct device *dev, void *data)
447 } 455 }
448 } 456 }
449 457
458 /* update the free space range with the probed blk_start */
459 if (info->res && blk_start > info->res->start) {
460 info->res->start = max(info->res->start, blk_start);
461 if (info->res->start > info->res->end)
462 info->res->end = info->res->start - 1;
463 return 1;
464 }
465
450 info->available -= blk_start - nd_mapping->start + busy; 466 info->available -= blk_start - nd_mapping->start + busy;
467
451 return 0; 468 return 0;
452} 469}
453 470
@@ -508,6 +525,7 @@ resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
508 struct blk_alloc_info info = { 525 struct blk_alloc_info info = {
509 .nd_mapping = nd_mapping, 526 .nd_mapping = nd_mapping,
510 .available = nd_mapping->size, 527 .available = nd_mapping->size,
528 .res = NULL,
511 }; 529 };
512 struct resource *res; 530 struct resource *res;
513 531
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 132c5b8b5366..81451c74b01c 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -529,19 +529,68 @@ static resource_size_t init_dpa_allocation(struct nd_label_id *label_id,
529 return rc ? n : 0; 529 return rc ? n : 0;
530} 530}
531 531
532static bool space_valid(bool is_pmem, bool is_reserve, 532
533 struct nd_label_id *label_id, struct resource *res) 533/**
534 * space_valid() - validate free dpa space against constraints
535 * @nd_region: hosting region of the free space
536 * @ndd: dimm device data for debug
537 * @label_id: namespace id to allocate space
538 * @prev: potential allocation that precedes free space
539 * @next: allocation that follows the given free space range
540 * @exist: first allocation with same id in the mapping
541 * @n: range that must satisfied for pmem allocations
542 * @valid: free space range to validate
543 *
544 * BLK-space is valid as long as it does not precede a PMEM
545 * allocation in a given region. PMEM-space must be contiguous
546 * and adjacent to an existing existing allocation (if one
547 * exists). If reserving PMEM any space is valid.
548 */
549static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd,
550 struct nd_label_id *label_id, struct resource *prev,
551 struct resource *next, struct resource *exist,
552 resource_size_t n, struct resource *valid)
534{ 553{
535 /* 554 bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0;
536 * For BLK-space any space is valid, for PMEM-space, it must be 555 bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
537 * contiguous with an existing allocation unless we are 556
538 * reserving pmem. 557 if (valid->start >= valid->end)
539 */ 558 goto invalid;
540 if (is_reserve || !is_pmem) 559
541 return true; 560 if (is_reserve)
542 if (!res || strcmp(res->name, label_id->id) == 0) 561 return;
543 return true; 562
544 return false; 563 if (!is_pmem) {
564 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
565 struct nvdimm_bus *nvdimm_bus;
566 struct blk_alloc_info info = {
567 .nd_mapping = nd_mapping,
568 .available = nd_mapping->size,
569 .res = valid,
570 };
571
572 WARN_ON(!is_nd_blk(&nd_region->dev));
573 nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
574 device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy);
575 return;
576 }
577
578 /* allocation needs to be contiguous, so this is all or nothing */
579 if (resource_size(valid) < n)
580 goto invalid;
581
582 /* we've got all the space we need and no existing allocation */
583 if (!exist)
584 return;
585
586 /* allocation needs to be contiguous with the existing namespace */
587 if (valid->start == exist->end + 1
588 || valid->end == exist->start - 1)
589 return;
590
591 invalid:
592 /* truncate @valid size to 0 */
593 valid->end = valid->start - 1;
545} 594}
546 595
547enum alloc_loc { 596enum alloc_loc {
@@ -553,18 +602,24 @@ static resource_size_t scan_allocate(struct nd_region *nd_region,
553 resource_size_t n) 602 resource_size_t n)
554{ 603{
555 resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1; 604 resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1;
556 bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0;
557 bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0; 605 bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
558 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 606 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
607 struct resource *res, *exist = NULL, valid;
559 const resource_size_t to_allocate = n; 608 const resource_size_t to_allocate = n;
560 struct resource *res;
561 int first; 609 int first;
562 610
611 for_each_dpa_resource(ndd, res)
612 if (strcmp(label_id->id, res->name) == 0)
613 exist = res;
614
615 valid.start = nd_mapping->start;
616 valid.end = mapping_end;
617 valid.name = "free space";
563 retry: 618 retry:
564 first = 0; 619 first = 0;
565 for_each_dpa_resource(ndd, res) { 620 for_each_dpa_resource(ndd, res) {
566 resource_size_t allocate, available = 0, free_start, free_end;
567 struct resource *next = res->sibling, *new_res = NULL; 621 struct resource *next = res->sibling, *new_res = NULL;
622 resource_size_t allocate, available = 0;
568 enum alloc_loc loc = ALLOC_ERR; 623 enum alloc_loc loc = ALLOC_ERR;
569 const char *action; 624 const char *action;
570 int rc = 0; 625 int rc = 0;
@@ -577,32 +632,35 @@ static resource_size_t scan_allocate(struct nd_region *nd_region,
577 632
578 /* space at the beginning of the mapping */ 633 /* space at the beginning of the mapping */
579 if (!first++ && res->start > nd_mapping->start) { 634 if (!first++ && res->start > nd_mapping->start) {
580 free_start = nd_mapping->start; 635 valid.start = nd_mapping->start;
581 available = res->start - free_start; 636 valid.end = res->start - 1;
582 if (space_valid(is_pmem, is_reserve, label_id, NULL)) 637 space_valid(nd_region, ndd, label_id, NULL, next, exist,
638 to_allocate, &valid);
639 available = resource_size(&valid);
640 if (available)
583 loc = ALLOC_BEFORE; 641 loc = ALLOC_BEFORE;
584 } 642 }
585 643
586 /* space between allocations */ 644 /* space between allocations */
587 if (!loc && next) { 645 if (!loc && next) {
588 free_start = res->start + resource_size(res); 646 valid.start = res->start + resource_size(res);
589 free_end = min(mapping_end, next->start - 1); 647 valid.end = min(mapping_end, next->start - 1);
590 if (space_valid(is_pmem, is_reserve, label_id, res) 648 space_valid(nd_region, ndd, label_id, res, next, exist,
591 && free_start < free_end) { 649 to_allocate, &valid);
592 available = free_end + 1 - free_start; 650 available = resource_size(&valid);
651 if (available)
593 loc = ALLOC_MID; 652 loc = ALLOC_MID;
594 }
595 } 653 }
596 654
597 /* space at the end of the mapping */ 655 /* space at the end of the mapping */
598 if (!loc && !next) { 656 if (!loc && !next) {
599 free_start = res->start + resource_size(res); 657 valid.start = res->start + resource_size(res);
600 free_end = mapping_end; 658 valid.end = mapping_end;
601 if (space_valid(is_pmem, is_reserve, label_id, res) 659 space_valid(nd_region, ndd, label_id, res, next, exist,
602 && free_start < free_end) { 660 to_allocate, &valid);
603 available = free_end + 1 - free_start; 661 available = resource_size(&valid);
662 if (available)
604 loc = ALLOC_AFTER; 663 loc = ALLOC_AFTER;
605 }
606 } 664 }
607 665
608 if (!loc || !available) 666 if (!loc || !available)
@@ -612,8 +670,6 @@ static resource_size_t scan_allocate(struct nd_region *nd_region,
612 case ALLOC_BEFORE: 670 case ALLOC_BEFORE:
613 if (strcmp(res->name, label_id->id) == 0) { 671 if (strcmp(res->name, label_id->id) == 0) {
614 /* adjust current resource up */ 672 /* adjust current resource up */
615 if (is_pmem && !is_reserve)
616 return n;
617 rc = adjust_resource(res, res->start - allocate, 673 rc = adjust_resource(res, res->start - allocate,
618 resource_size(res) + allocate); 674 resource_size(res) + allocate);
619 action = "cur grow up"; 675 action = "cur grow up";
@@ -623,8 +679,6 @@ static resource_size_t scan_allocate(struct nd_region *nd_region,
623 case ALLOC_MID: 679 case ALLOC_MID:
624 if (strcmp(next->name, label_id->id) == 0) { 680 if (strcmp(next->name, label_id->id) == 0) {
625 /* adjust next resource up */ 681 /* adjust next resource up */
626 if (is_pmem && !is_reserve)
627 return n;
628 rc = adjust_resource(next, next->start 682 rc = adjust_resource(next, next->start
629 - allocate, resource_size(next) 683 - allocate, resource_size(next)
630 + allocate); 684 + allocate);
@@ -648,12 +702,10 @@ static resource_size_t scan_allocate(struct nd_region *nd_region,
648 if (strcmp(action, "allocate") == 0) { 702 if (strcmp(action, "allocate") == 0) {
649 /* BLK allocate bottom up */ 703 /* BLK allocate bottom up */
650 if (!is_pmem) 704 if (!is_pmem)
651 free_start += available - allocate; 705 valid.start += available - allocate;
652 else if (!is_reserve && free_start != nd_mapping->start)
653 return n;
654 706
655 new_res = nvdimm_allocate_dpa(ndd, label_id, 707 new_res = nvdimm_allocate_dpa(ndd, label_id,
656 free_start, allocate); 708 valid.start, allocate);
657 if (!new_res) 709 if (!new_res)
658 rc = -EBUSY; 710 rc = -EBUSY;
659 } else if (strcmp(action, "grow down") == 0) { 711 } else if (strcmp(action, "grow down") == 0) {
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index 7c2196a1d56f..3ba0b96ce7de 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -44,6 +44,23 @@ struct nvdimm {
44 struct resource *flush_wpq; 44 struct resource *flush_wpq;
45}; 45};
46 46
47/**
48 * struct blk_alloc_info - tracking info for BLK dpa scanning
49 * @nd_mapping: blk region mapping boundaries
50 * @available: decremented in alias_dpa_busy as aliased PMEM is scanned
51 * @busy: decremented in blk_dpa_busy to account for ranges already
52 * handled by alias_dpa_busy
53 * @res: alias_dpa_busy interprets this a free space range that needs to
54 * be truncated to the valid BLK allocation starting DPA, blk_dpa_busy
55 * treats it as a busy range that needs the aliased PMEM ranges
56 * truncated.
57 */
58struct blk_alloc_info {
59 struct nd_mapping *nd_mapping;
60 resource_size_t available, busy;
61 struct resource *res;
62};
63
47bool is_nvdimm(struct device *dev); 64bool is_nvdimm(struct device *dev);
48bool is_nd_pmem(struct device *dev); 65bool is_nd_pmem(struct device *dev);
49bool is_nd_blk(struct device *dev); 66bool is_nd_blk(struct device *dev);
@@ -80,6 +97,7 @@ resource_size_t nd_blk_available_dpa(struct nd_region *nd_region);
80resource_size_t nd_region_available_dpa(struct nd_region *nd_region); 97resource_size_t nd_region_available_dpa(struct nd_region *nd_region);
81resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd, 98resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
82 struct nd_label_id *label_id); 99 struct nd_label_id *label_id);
100int alias_dpa_busy(struct device *dev, void *data);
83struct resource *nsblk_add_resource(struct nd_region *nd_region, 101struct resource *nsblk_add_resource(struct nd_region *nd_region,
84 struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk, 102 struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk,
85 resource_size_t start); 103 resource_size_t start);