aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/nvdimm/namespace_devs.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2016-10-04 19:09:59 -0400
committerDan Williams <dan.j.williams@intel.com>2016-10-07 12:22:53 -0400
commit762d067dbad5f32560cb1657b7ca20034332dc56 (patch)
tree2d8b4300b38d277dfff626a067d44b1c3cf832b0 /drivers/nvdimm/namespace_devs.c
parent16660eaea0ccc6d0692f173922cd365876eb288e (diff)
libnvdimm, namespace: enable allocation of multiple pmem namespaces
Now that we have nd_region_available_dpa() able to handle the presence of multiple PMEM allocations in aliased PMEM regions, reuse that same infrastructure to track allocations from free space. In particular handle allocating from an aliased PMEM region in the case where there are dis-contiguous holes. The allocation for BLK and PMEM are documented in the space_valid() helper: BLK-space is valid as long as it does not precede a PMEM allocation in a given region. PMEM-space must be contiguous and adjacent to an existing existing allocation (if one exists). Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/nvdimm/namespace_devs.c')
-rw-r--r--drivers/nvdimm/namespace_devs.c128
1 files changed, 90 insertions, 38 deletions
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 132c5b8b5366..81451c74b01c 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -529,19 +529,68 @@ static resource_size_t init_dpa_allocation(struct nd_label_id *label_id,
529 return rc ? n : 0; 529 return rc ? n : 0;
530} 530}
531 531
532static bool space_valid(bool is_pmem, bool is_reserve, 532
533 struct nd_label_id *label_id, struct resource *res) 533/**
534 * space_valid() - validate free dpa space against constraints
535 * @nd_region: hosting region of the free space
536 * @ndd: dimm device data for debug
537 * @label_id: namespace id to allocate space
538 * @prev: potential allocation that precedes free space
539 * @next: allocation that follows the given free space range
540 * @exist: first allocation with same id in the mapping
541 * @n: range that must satisfied for pmem allocations
542 * @valid: free space range to validate
543 *
544 * BLK-space is valid as long as it does not precede a PMEM
545 * allocation in a given region. PMEM-space must be contiguous
546 * and adjacent to an existing existing allocation (if one
547 * exists). If reserving PMEM any space is valid.
548 */
549static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd,
550 struct nd_label_id *label_id, struct resource *prev,
551 struct resource *next, struct resource *exist,
552 resource_size_t n, struct resource *valid)
534{ 553{
535 /* 554 bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0;
536 * For BLK-space any space is valid, for PMEM-space, it must be 555 bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
537 * contiguous with an existing allocation unless we are 556
538 * reserving pmem. 557 if (valid->start >= valid->end)
539 */ 558 goto invalid;
540 if (is_reserve || !is_pmem) 559
541 return true; 560 if (is_reserve)
542 if (!res || strcmp(res->name, label_id->id) == 0) 561 return;
543 return true; 562
544 return false; 563 if (!is_pmem) {
564 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
565 struct nvdimm_bus *nvdimm_bus;
566 struct blk_alloc_info info = {
567 .nd_mapping = nd_mapping,
568 .available = nd_mapping->size,
569 .res = valid,
570 };
571
572 WARN_ON(!is_nd_blk(&nd_region->dev));
573 nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
574 device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy);
575 return;
576 }
577
578 /* allocation needs to be contiguous, so this is all or nothing */
579 if (resource_size(valid) < n)
580 goto invalid;
581
582 /* we've got all the space we need and no existing allocation */
583 if (!exist)
584 return;
585
586 /* allocation needs to be contiguous with the existing namespace */
587 if (valid->start == exist->end + 1
588 || valid->end == exist->start - 1)
589 return;
590
591 invalid:
592 /* truncate @valid size to 0 */
593 valid->end = valid->start - 1;
545} 594}
546 595
547enum alloc_loc { 596enum alloc_loc {
@@ -553,18 +602,24 @@ static resource_size_t scan_allocate(struct nd_region *nd_region,
553 resource_size_t n) 602 resource_size_t n)
554{ 603{
555 resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1; 604 resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1;
556 bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0;
557 bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0; 605 bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
558 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 606 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
607 struct resource *res, *exist = NULL, valid;
559 const resource_size_t to_allocate = n; 608 const resource_size_t to_allocate = n;
560 struct resource *res;
561 int first; 609 int first;
562 610
611 for_each_dpa_resource(ndd, res)
612 if (strcmp(label_id->id, res->name) == 0)
613 exist = res;
614
615 valid.start = nd_mapping->start;
616 valid.end = mapping_end;
617 valid.name = "free space";
563 retry: 618 retry:
564 first = 0; 619 first = 0;
565 for_each_dpa_resource(ndd, res) { 620 for_each_dpa_resource(ndd, res) {
566 resource_size_t allocate, available = 0, free_start, free_end;
567 struct resource *next = res->sibling, *new_res = NULL; 621 struct resource *next = res->sibling, *new_res = NULL;
622 resource_size_t allocate, available = 0;
568 enum alloc_loc loc = ALLOC_ERR; 623 enum alloc_loc loc = ALLOC_ERR;
569 const char *action; 624 const char *action;
570 int rc = 0; 625 int rc = 0;
@@ -577,32 +632,35 @@ static resource_size_t scan_allocate(struct nd_region *nd_region,
577 632
578 /* space at the beginning of the mapping */ 633 /* space at the beginning of the mapping */
579 if (!first++ && res->start > nd_mapping->start) { 634 if (!first++ && res->start > nd_mapping->start) {
580 free_start = nd_mapping->start; 635 valid.start = nd_mapping->start;
581 available = res->start - free_start; 636 valid.end = res->start - 1;
582 if (space_valid(is_pmem, is_reserve, label_id, NULL)) 637 space_valid(nd_region, ndd, label_id, NULL, next, exist,
638 to_allocate, &valid);
639 available = resource_size(&valid);
640 if (available)
583 loc = ALLOC_BEFORE; 641 loc = ALLOC_BEFORE;
584 } 642 }
585 643
586 /* space between allocations */ 644 /* space between allocations */
587 if (!loc && next) { 645 if (!loc && next) {
588 free_start = res->start + resource_size(res); 646 valid.start = res->start + resource_size(res);
589 free_end = min(mapping_end, next->start - 1); 647 valid.end = min(mapping_end, next->start - 1);
590 if (space_valid(is_pmem, is_reserve, label_id, res) 648 space_valid(nd_region, ndd, label_id, res, next, exist,
591 && free_start < free_end) { 649 to_allocate, &valid);
592 available = free_end + 1 - free_start; 650 available = resource_size(&valid);
651 if (available)
593 loc = ALLOC_MID; 652 loc = ALLOC_MID;
594 }
595 } 653 }
596 654
597 /* space at the end of the mapping */ 655 /* space at the end of the mapping */
598 if (!loc && !next) { 656 if (!loc && !next) {
599 free_start = res->start + resource_size(res); 657 valid.start = res->start + resource_size(res);
600 free_end = mapping_end; 658 valid.end = mapping_end;
601 if (space_valid(is_pmem, is_reserve, label_id, res) 659 space_valid(nd_region, ndd, label_id, res, next, exist,
602 && free_start < free_end) { 660 to_allocate, &valid);
603 available = free_end + 1 - free_start; 661 available = resource_size(&valid);
662 if (available)
604 loc = ALLOC_AFTER; 663 loc = ALLOC_AFTER;
605 }
606 } 664 }
607 665
608 if (!loc || !available) 666 if (!loc || !available)
@@ -612,8 +670,6 @@ static resource_size_t scan_allocate(struct nd_region *nd_region,
612 case ALLOC_BEFORE: 670 case ALLOC_BEFORE:
613 if (strcmp(res->name, label_id->id) == 0) { 671 if (strcmp(res->name, label_id->id) == 0) {
614 /* adjust current resource up */ 672 /* adjust current resource up */
615 if (is_pmem && !is_reserve)
616 return n;
617 rc = adjust_resource(res, res->start - allocate, 673 rc = adjust_resource(res, res->start - allocate,
618 resource_size(res) + allocate); 674 resource_size(res) + allocate);
619 action = "cur grow up"; 675 action = "cur grow up";
@@ -623,8 +679,6 @@ static resource_size_t scan_allocate(struct nd_region *nd_region,
623 case ALLOC_MID: 679 case ALLOC_MID:
624 if (strcmp(next->name, label_id->id) == 0) { 680 if (strcmp(next->name, label_id->id) == 0) {
625 /* adjust next resource up */ 681 /* adjust next resource up */
626 if (is_pmem && !is_reserve)
627 return n;
628 rc = adjust_resource(next, next->start 682 rc = adjust_resource(next, next->start
629 - allocate, resource_size(next) 683 - allocate, resource_size(next)
630 + allocate); 684 + allocate);
@@ -648,12 +702,10 @@ static resource_size_t scan_allocate(struct nd_region *nd_region,
648 if (strcmp(action, "allocate") == 0) { 702 if (strcmp(action, "allocate") == 0) {
649 /* BLK allocate bottom up */ 703 /* BLK allocate bottom up */
650 if (!is_pmem) 704 if (!is_pmem)
651 free_start += available - allocate; 705 valid.start += available - allocate;
652 else if (!is_reserve && free_start != nd_mapping->start)
653 return n;
654 706
655 new_res = nvdimm_allocate_dpa(ndd, label_id, 707 new_res = nvdimm_allocate_dpa(ndd, label_id,
656 free_start, allocate); 708 valid.start, allocate);
657 if (!new_res) 709 if (!new_res)
658 rc = -EBUSY; 710 rc = -EBUSY;
659 } else if (strcmp(action, "grow down") == 0) { 711 } else if (strcmp(action, "grow down") == 0) {