aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/pmem/pmem-region.txt65
-rw-r--r--MAINTAINERS8
-rw-r--r--arch/powerpc/platforms/powernv/opal.c3
-rw-r--r--drivers/acpi/nfit/core.c679
-rw-r--r--drivers/acpi/nfit/mce.c5
-rw-r--r--drivers/acpi/nfit/nfit.h22
-rw-r--r--drivers/dax/device.c38
-rw-r--r--drivers/dax/pmem.c18
-rw-r--r--drivers/nvdimm/Kconfig11
-rw-r--r--drivers/nvdimm/Makefile1
-rw-r--r--drivers/nvdimm/btt_devs.c21
-rw-r--r--drivers/nvdimm/bus.c14
-rw-r--r--drivers/nvdimm/claim.c2
-rw-r--r--drivers/nvdimm/core.c6
-rw-r--r--drivers/nvdimm/dax_devs.c5
-rw-r--r--drivers/nvdimm/dimm.c8
-rw-r--r--drivers/nvdimm/dimm_devs.c7
-rw-r--r--drivers/nvdimm/label.c85
-rw-r--r--drivers/nvdimm/label.h2
-rw-r--r--drivers/nvdimm/namespace_devs.c42
-rw-r--r--drivers/nvdimm/nd.h1
-rw-r--r--drivers/nvdimm/of_pmem.c119
-rw-r--r--drivers/nvdimm/pfn_devs.c25
-rw-r--r--drivers/nvdimm/pmem.c14
-rw-r--r--drivers/nvdimm/region.c4
-rw-r--r--drivers/nvdimm/region_devs.c9
-rw-r--r--include/linux/libnvdimm.h4
-rw-r--r--include/linux/nd.h6
-rw-r--r--tools/testing/nvdimm/test/nfit.c239
-rw-r--r--tools/testing/nvdimm/test/nfit_test.h16
30 files changed, 876 insertions, 603 deletions
diff --git a/Documentation/devicetree/bindings/pmem/pmem-region.txt b/Documentation/devicetree/bindings/pmem/pmem-region.txt
new file mode 100644
index 000000000000..5cfa4f016a00
--- /dev/null
+++ b/Documentation/devicetree/bindings/pmem/pmem-region.txt
@@ -0,0 +1,65 @@
1Device-tree bindings for persistent memory regions
2-----------------------------------------------------
3
4Persistent memory refers to a class of memory devices that are:
5
6 a) Usable as main system memory (i.e. cacheable), and
7 b) Retain their contents across power failure.
8
9Given b) it is best to think of persistent memory as a kind of memory mapped
10storage device. To ensure data integrity the operating system needs to manage
11persistent regions separately to the normal memory pool. To aid with that this
12binding provides a standardised interface for discovering where persistent
13memory regions exist inside the physical address space.
14
15Bindings for the region nodes:
16-----------------------------
17
18Required properties:
19 - compatible = "pmem-region"
20
21 - reg = <base, size>;
22 The reg property should specificy an address range that is
23 translatable to a system physical address range. This address
24 range should be mappable as normal system memory would be
25 (i.e cacheable).
26
27 If the reg property contains multiple address ranges
28 each address range will be treated as though it was specified
29 in a separate device node. Having multiple address ranges in a
30 node implies no special relationship between the two ranges.
31
32Optional properties:
33 - Any relevant NUMA assocativity properties for the target platform.
34
35 - volatile; This property indicates that this region is actually
36 backed by non-persistent memory. This lets the OS know that it
37 may skip the cache flushes required to ensure data is made
38 persistent after a write.
39
40 If this property is absent then the OS must assume that the region
41 is backed by non-volatile memory.
42
43Examples:
44--------------------
45
46 /*
47 * This node specifies one 4KB region spanning from
48 * 0x5000 to 0x5fff that is backed by non-volatile memory.
49 */
50 pmem@5000 {
51 compatible = "pmem-region";
52 reg = <0x00005000 0x00001000>;
53 };
54
55 /*
56 * This node specifies two 4KB regions that are backed by
57 * volatile (normal) memory.
58 */
59 pmem@6000 {
60 compatible = "pmem-region";
61 reg = < 0x00006000 0x00001000
62 0x00008000 0x00001000 >;
63 volatile;
64 };
65
diff --git a/MAINTAINERS b/MAINTAINERS
index 73c0cdabf755..821700479d6b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -8035,6 +8035,14 @@ Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
8035S: Supported 8035S: Supported
8036F: drivers/nvdimm/pmem* 8036F: drivers/nvdimm/pmem*
8037 8037
8038LIBNVDIMM: DEVICETREE BINDINGS
8039M: Oliver O'Halloran <oohall@gmail.com>
8040L: linux-nvdimm@lists.01.org
8041Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
8042S: Supported
8043F: drivers/nvdimm/of_pmem.c
8044F: Documentation/devicetree/bindings/pmem/pmem-region.txt
8045
8038LIBNVDIMM: NON-VOLATILE MEMORY DEVICE SUBSYSTEM 8046LIBNVDIMM: NON-VOLATILE MEMORY DEVICE SUBSYSTEM
8039M: Dan Williams <dan.j.williams@intel.com> 8047M: Dan Williams <dan.j.williams@intel.com>
8040L: linux-nvdimm@lists.01.org 8048L: linux-nvdimm@lists.01.org
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index c15182765ff5..c37485a3c5c9 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -821,6 +821,9 @@ static int __init opal_init(void)
821 /* Create i2c platform devices */ 821 /* Create i2c platform devices */
822 opal_pdev_init("ibm,opal-i2c"); 822 opal_pdev_init("ibm,opal-i2c");
823 823
824 /* Handle non-volatile memory devices */
825 opal_pdev_init("pmem-region");
826
824 /* Setup a heatbeat thread if requested by OPAL */ 827 /* Setup a heatbeat thread if requested by OPAL */
825 opal_init_heartbeat(); 828 opal_init_heartbeat();
826 829
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index eb09ef55c38a..dbe43503cba5 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -35,16 +35,6 @@ static bool force_enable_dimms;
35module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR); 35module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR);
36MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status"); 36MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status");
37 37
38static unsigned int scrub_timeout = NFIT_ARS_TIMEOUT;
39module_param(scrub_timeout, uint, S_IRUGO|S_IWUSR);
40MODULE_PARM_DESC(scrub_timeout, "Initial scrub timeout in seconds");
41
42/* after three payloads of overflow, it's dead jim */
43static unsigned int scrub_overflow_abort = 3;
44module_param(scrub_overflow_abort, uint, S_IRUGO|S_IWUSR);
45MODULE_PARM_DESC(scrub_overflow_abort,
46 "Number of times we overflow ARS results before abort");
47
48static bool disable_vendor_specific; 38static bool disable_vendor_specific;
49module_param(disable_vendor_specific, bool, S_IRUGO); 39module_param(disable_vendor_specific, bool, S_IRUGO);
50MODULE_PARM_DESC(disable_vendor_specific, 40MODULE_PARM_DESC(disable_vendor_specific,
@@ -59,6 +49,10 @@ module_param(default_dsm_family, int, S_IRUGO);
59MODULE_PARM_DESC(default_dsm_family, 49MODULE_PARM_DESC(default_dsm_family,
60 "Try this DSM type first when identifying NVDIMM family"); 50 "Try this DSM type first when identifying NVDIMM family");
61 51
52static bool no_init_ars;
53module_param(no_init_ars, bool, 0644);
54MODULE_PARM_DESC(no_init_ars, "Skip ARS run at nfit init time");
55
62LIST_HEAD(acpi_descs); 56LIST_HEAD(acpi_descs);
63DEFINE_MUTEX(acpi_desc_lock); 57DEFINE_MUTEX(acpi_desc_lock);
64 58
@@ -196,7 +190,7 @@ static int xlat_nvdimm_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd
196 * In the _LSI, _LSR, _LSW case the locked status is 190 * In the _LSI, _LSR, _LSW case the locked status is
197 * communicated via the read/write commands 191 * communicated via the read/write commands
198 */ 192 */
199 if (nfit_mem->has_lsi) 193 if (nfit_mem->has_lsr)
200 break; 194 break;
201 195
202 if (status >> 16 & ND_CONFIG_LOCKED) 196 if (status >> 16 & ND_CONFIG_LOCKED)
@@ -476,14 +470,14 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
476 in_buf.buffer.length = call_pkg->nd_size_in; 470 in_buf.buffer.length = call_pkg->nd_size_in;
477 } 471 }
478 472
479 dev_dbg(dev, "%s:%s cmd: %d: func: %d input length: %d\n", 473 dev_dbg(dev, "%s cmd: %d: func: %d input length: %d\n",
480 __func__, dimm_name, cmd, func, in_buf.buffer.length); 474 dimm_name, cmd, func, in_buf.buffer.length);
481 print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4, 475 print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4,
482 in_buf.buffer.pointer, 476 in_buf.buffer.pointer,
483 min_t(u32, 256, in_buf.buffer.length), true); 477 min_t(u32, 256, in_buf.buffer.length), true);
484 478
485 /* call the BIOS, prefer the named methods over _DSM if available */ 479 /* call the BIOS, prefer the named methods over _DSM if available */
486 if (nvdimm && cmd == ND_CMD_GET_CONFIG_SIZE && nfit_mem->has_lsi) 480 if (nvdimm && cmd == ND_CMD_GET_CONFIG_SIZE && nfit_mem->has_lsr)
487 out_obj = acpi_label_info(handle); 481 out_obj = acpi_label_info(handle);
488 else if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA && nfit_mem->has_lsr) { 482 else if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA && nfit_mem->has_lsr) {
489 struct nd_cmd_get_config_data_hdr *p = buf; 483 struct nd_cmd_get_config_data_hdr *p = buf;
@@ -506,8 +500,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
506 } 500 }
507 501
508 if (!out_obj) { 502 if (!out_obj) {
509 dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name, 503 dev_dbg(dev, "%s _DSM failed cmd: %s\n", dimm_name, cmd_name);
510 cmd_name);
511 return -EINVAL; 504 return -EINVAL;
512 } 505 }
513 506
@@ -528,13 +521,13 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
528 } 521 }
529 522
530 if (out_obj->package.type != ACPI_TYPE_BUFFER) { 523 if (out_obj->package.type != ACPI_TYPE_BUFFER) {
531 dev_dbg(dev, "%s:%s unexpected output object type cmd: %s type: %d\n", 524 dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n",
532 __func__, dimm_name, cmd_name, out_obj->type); 525 dimm_name, cmd_name, out_obj->type);
533 rc = -EINVAL; 526 rc = -EINVAL;
534 goto out; 527 goto out;
535 } 528 }
536 529
537 dev_dbg(dev, "%s:%s cmd: %s output length: %d\n", __func__, dimm_name, 530 dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name,
538 cmd_name, out_obj->buffer.length); 531 cmd_name, out_obj->buffer.length);
539 print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4, 532 print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4,
540 out_obj->buffer.pointer, 533 out_obj->buffer.pointer,
@@ -546,14 +539,14 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
546 out_obj->buffer.length - offset); 539 out_obj->buffer.length - offset);
547 540
548 if (offset + out_size > out_obj->buffer.length) { 541 if (offset + out_size > out_obj->buffer.length) {
549 dev_dbg(dev, "%s:%s output object underflow cmd: %s field: %d\n", 542 dev_dbg(dev, "%s output object underflow cmd: %s field: %d\n",
550 __func__, dimm_name, cmd_name, i); 543 dimm_name, cmd_name, i);
551 break; 544 break;
552 } 545 }
553 546
554 if (in_buf.buffer.length + offset + out_size > buf_len) { 547 if (in_buf.buffer.length + offset + out_size > buf_len) {
555 dev_dbg(dev, "%s:%s output overrun cmd: %s field: %d\n", 548 dev_dbg(dev, "%s output overrun cmd: %s field: %d\n",
556 __func__, dimm_name, cmd_name, i); 549 dimm_name, cmd_name, i);
557 rc = -ENXIO; 550 rc = -ENXIO;
558 goto out; 551 goto out;
559 } 552 }
@@ -655,7 +648,7 @@ static bool add_spa(struct acpi_nfit_desc *acpi_desc,
655 INIT_LIST_HEAD(&nfit_spa->list); 648 INIT_LIST_HEAD(&nfit_spa->list);
656 memcpy(nfit_spa->spa, spa, sizeof(*spa)); 649 memcpy(nfit_spa->spa, spa, sizeof(*spa));
657 list_add_tail(&nfit_spa->list, &acpi_desc->spas); 650 list_add_tail(&nfit_spa->list, &acpi_desc->spas);
658 dev_dbg(dev, "%s: spa index: %d type: %s\n", __func__, 651 dev_dbg(dev, "spa index: %d type: %s\n",
659 spa->range_index, 652 spa->range_index,
660 spa_type_name(nfit_spa_type(spa))); 653 spa_type_name(nfit_spa_type(spa)));
661 return true; 654 return true;
@@ -684,8 +677,8 @@ static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
684 INIT_LIST_HEAD(&nfit_memdev->list); 677 INIT_LIST_HEAD(&nfit_memdev->list);
685 memcpy(nfit_memdev->memdev, memdev, sizeof(*memdev)); 678 memcpy(nfit_memdev->memdev, memdev, sizeof(*memdev));
686 list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs); 679 list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs);
687 dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d flags: %#x\n", 680 dev_dbg(dev, "memdev handle: %#x spa: %d dcr: %d flags: %#x\n",
688 __func__, memdev->device_handle, memdev->range_index, 681 memdev->device_handle, memdev->range_index,
689 memdev->region_index, memdev->flags); 682 memdev->region_index, memdev->flags);
690 return true; 683 return true;
691} 684}
@@ -727,7 +720,7 @@ static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
727 INIT_LIST_HEAD(&nfit_dcr->list); 720 INIT_LIST_HEAD(&nfit_dcr->list);
728 memcpy(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)); 721 memcpy(nfit_dcr->dcr, dcr, sizeof_dcr(dcr));
729 list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs); 722 list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs);
730 dev_dbg(dev, "%s: dcr index: %d windows: %d\n", __func__, 723 dev_dbg(dev, "dcr index: %d windows: %d\n",
731 dcr->region_index, dcr->windows); 724 dcr->region_index, dcr->windows);
732 return true; 725 return true;
733} 726}
@@ -754,7 +747,7 @@ static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
754 INIT_LIST_HEAD(&nfit_bdw->list); 747 INIT_LIST_HEAD(&nfit_bdw->list);
755 memcpy(nfit_bdw->bdw, bdw, sizeof(*bdw)); 748 memcpy(nfit_bdw->bdw, bdw, sizeof(*bdw));
756 list_add_tail(&nfit_bdw->list, &acpi_desc->bdws); 749 list_add_tail(&nfit_bdw->list, &acpi_desc->bdws);
757 dev_dbg(dev, "%s: bdw dcr: %d windows: %d\n", __func__, 750 dev_dbg(dev, "bdw dcr: %d windows: %d\n",
758 bdw->region_index, bdw->windows); 751 bdw->region_index, bdw->windows);
759 return true; 752 return true;
760} 753}
@@ -793,7 +786,7 @@ static bool add_idt(struct acpi_nfit_desc *acpi_desc,
793 INIT_LIST_HEAD(&nfit_idt->list); 786 INIT_LIST_HEAD(&nfit_idt->list);
794 memcpy(nfit_idt->idt, idt, sizeof_idt(idt)); 787 memcpy(nfit_idt->idt, idt, sizeof_idt(idt));
795 list_add_tail(&nfit_idt->list, &acpi_desc->idts); 788 list_add_tail(&nfit_idt->list, &acpi_desc->idts);
796 dev_dbg(dev, "%s: idt index: %d num_lines: %d\n", __func__, 789 dev_dbg(dev, "idt index: %d num_lines: %d\n",
797 idt->interleave_index, idt->line_count); 790 idt->interleave_index, idt->line_count);
798 return true; 791 return true;
799} 792}
@@ -833,7 +826,7 @@ static bool add_flush(struct acpi_nfit_desc *acpi_desc,
833 INIT_LIST_HEAD(&nfit_flush->list); 826 INIT_LIST_HEAD(&nfit_flush->list);
834 memcpy(nfit_flush->flush, flush, sizeof_flush(flush)); 827 memcpy(nfit_flush->flush, flush, sizeof_flush(flush));
835 list_add_tail(&nfit_flush->list, &acpi_desc->flushes); 828 list_add_tail(&nfit_flush->list, &acpi_desc->flushes);
836 dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__, 829 dev_dbg(dev, "nfit_flush handle: %d hint_count: %d\n",
837 flush->device_handle, flush->hint_count); 830 flush->device_handle, flush->hint_count);
838 return true; 831 return true;
839} 832}
@@ -846,7 +839,7 @@ static bool add_platform_cap(struct acpi_nfit_desc *acpi_desc,
846 839
847 mask = (1 << (pcap->highest_capability + 1)) - 1; 840 mask = (1 << (pcap->highest_capability + 1)) - 1;
848 acpi_desc->platform_cap = pcap->capabilities & mask; 841 acpi_desc->platform_cap = pcap->capabilities & mask;
849 dev_dbg(dev, "%s: cap: %#x\n", __func__, acpi_desc->platform_cap); 842 dev_dbg(dev, "cap: %#x\n", acpi_desc->platform_cap);
850 return true; 843 return true;
851} 844}
852 845
@@ -893,7 +886,7 @@ static void *add_table(struct acpi_nfit_desc *acpi_desc,
893 return err; 886 return err;
894 break; 887 break;
895 case ACPI_NFIT_TYPE_SMBIOS: 888 case ACPI_NFIT_TYPE_SMBIOS:
896 dev_dbg(dev, "%s: smbios\n", __func__); 889 dev_dbg(dev, "smbios\n");
897 break; 890 break;
898 case ACPI_NFIT_TYPE_CAPABILITIES: 891 case ACPI_NFIT_TYPE_CAPABILITIES:
899 if (!add_platform_cap(acpi_desc, table)) 892 if (!add_platform_cap(acpi_desc, table))
@@ -1250,8 +1243,11 @@ static ssize_t scrub_show(struct device *dev,
1250 if (nd_desc) { 1243 if (nd_desc) {
1251 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1244 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1252 1245
1246 mutex_lock(&acpi_desc->init_mutex);
1253 rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, 1247 rc = sprintf(buf, "%d%s", acpi_desc->scrub_count,
1254 (work_busy(&acpi_desc->work)) ? "+\n" : "\n"); 1248 work_busy(&acpi_desc->dwork.work)
1249 && !acpi_desc->cancel ? "+\n" : "\n");
1250 mutex_unlock(&acpi_desc->init_mutex);
1255 } 1251 }
1256 device_unlock(dev); 1252 device_unlock(dev);
1257 return rc; 1253 return rc;
@@ -1621,7 +1617,7 @@ void __acpi_nvdimm_notify(struct device *dev, u32 event)
1621 struct nfit_mem *nfit_mem; 1617 struct nfit_mem *nfit_mem;
1622 struct acpi_nfit_desc *acpi_desc; 1618 struct acpi_nfit_desc *acpi_desc;
1623 1619
1624 dev_dbg(dev->parent, "%s: %s: event: %d\n", dev_name(dev), __func__, 1620 dev_dbg(dev->parent, "%s: event: %d\n", dev_name(dev),
1625 event); 1621 event);
1626 1622
1627 if (event != NFIT_NOTIFY_DIMM_HEALTH) { 1623 if (event != NFIT_NOTIFY_DIMM_HEALTH) {
@@ -1654,12 +1650,23 @@ static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data)
1654 device_unlock(dev->parent); 1650 device_unlock(dev->parent);
1655} 1651}
1656 1652
1653static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method)
1654{
1655 acpi_handle handle;
1656 acpi_status status;
1657
1658 status = acpi_get_handle(adev->handle, method, &handle);
1659
1660 if (ACPI_SUCCESS(status))
1661 return true;
1662 return false;
1663}
1664
1657static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, 1665static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
1658 struct nfit_mem *nfit_mem, u32 device_handle) 1666 struct nfit_mem *nfit_mem, u32 device_handle)
1659{ 1667{
1660 struct acpi_device *adev, *adev_dimm; 1668 struct acpi_device *adev, *adev_dimm;
1661 struct device *dev = acpi_desc->dev; 1669 struct device *dev = acpi_desc->dev;
1662 union acpi_object *obj;
1663 unsigned long dsm_mask; 1670 unsigned long dsm_mask;
1664 const guid_t *guid; 1671 const guid_t *guid;
1665 int i; 1672 int i;
@@ -1732,25 +1739,15 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
1732 1ULL << i)) 1739 1ULL << i))
1733 set_bit(i, &nfit_mem->dsm_mask); 1740 set_bit(i, &nfit_mem->dsm_mask);
1734 1741
1735 obj = acpi_label_info(adev_dimm->handle); 1742 if (acpi_nvdimm_has_method(adev_dimm, "_LSI")
1736 if (obj) { 1743 && acpi_nvdimm_has_method(adev_dimm, "_LSR")) {
1737 ACPI_FREE(obj);
1738 nfit_mem->has_lsi = 1;
1739 dev_dbg(dev, "%s: has _LSI\n", dev_name(&adev_dimm->dev));
1740 }
1741
1742 obj = acpi_label_read(adev_dimm->handle, 0, 0);
1743 if (obj) {
1744 ACPI_FREE(obj);
1745 nfit_mem->has_lsr = 1;
1746 dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev)); 1744 dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev));
1745 nfit_mem->has_lsr = true;
1747 } 1746 }
1748 1747
1749 obj = acpi_label_write(adev_dimm->handle, 0, 0, NULL); 1748 if (nfit_mem->has_lsr && acpi_nvdimm_has_method(adev_dimm, "_LSW")) {
1750 if (obj) {
1751 ACPI_FREE(obj);
1752 nfit_mem->has_lsw = 1;
1753 dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev)); 1749 dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev));
1750 nfit_mem->has_lsw = true;
1754 } 1751 }
1755 1752
1756 return 0; 1753 return 0;
@@ -1839,10 +1836,10 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
1839 cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK; 1836 cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK;
1840 } 1837 }
1841 1838
1842 if (nfit_mem->has_lsi) 1839 if (nfit_mem->has_lsr) {
1843 set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask); 1840 set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
1844 if (nfit_mem->has_lsr)
1845 set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask); 1841 set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
1842 }
1846 if (nfit_mem->has_lsw) 1843 if (nfit_mem->has_lsw)
1847 set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask); 1844 set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
1848 1845
@@ -2338,7 +2335,7 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
2338 nvdimm = nd_blk_region_to_dimm(ndbr); 2335 nvdimm = nd_blk_region_to_dimm(ndbr);
2339 nfit_mem = nvdimm_provider_data(nvdimm); 2336 nfit_mem = nvdimm_provider_data(nvdimm);
2340 if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) { 2337 if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) {
2341 dev_dbg(dev, "%s: missing%s%s%s\n", __func__, 2338 dev_dbg(dev, "missing%s%s%s\n",
2342 nfit_mem ? "" : " nfit_mem", 2339 nfit_mem ? "" : " nfit_mem",
2343 (nfit_mem && nfit_mem->dcr) ? "" : " dcr", 2340 (nfit_mem && nfit_mem->dcr) ? "" : " dcr",
2344 (nfit_mem && nfit_mem->bdw) ? "" : " bdw"); 2341 (nfit_mem && nfit_mem->bdw) ? "" : " bdw");
@@ -2357,7 +2354,7 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
2357 mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address, 2354 mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address,
2358 nfit_mem->spa_bdw->length, nd_blk_memremap_flags(ndbr)); 2355 nfit_mem->spa_bdw->length, nd_blk_memremap_flags(ndbr));
2359 if (!mmio->addr.base) { 2356 if (!mmio->addr.base) {
2360 dev_dbg(dev, "%s: %s failed to map bdw\n", __func__, 2357 dev_dbg(dev, "%s failed to map bdw\n",
2361 nvdimm_name(nvdimm)); 2358 nvdimm_name(nvdimm));
2362 return -ENOMEM; 2359 return -ENOMEM;
2363 } 2360 }
@@ -2368,8 +2365,8 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
2368 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw, 2365 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw,
2369 nfit_mem->memdev_bdw->interleave_ways); 2366 nfit_mem->memdev_bdw->interleave_ways);
2370 if (rc) { 2367 if (rc) {
2371 dev_dbg(dev, "%s: %s failed to init bdw interleave\n", 2368 dev_dbg(dev, "%s failed to init bdw interleave\n",
2372 __func__, nvdimm_name(nvdimm)); 2369 nvdimm_name(nvdimm));
2373 return rc; 2370 return rc;
2374 } 2371 }
2375 2372
@@ -2380,7 +2377,7 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
2380 mmio->addr.base = devm_nvdimm_ioremap(dev, nfit_mem->spa_dcr->address, 2377 mmio->addr.base = devm_nvdimm_ioremap(dev, nfit_mem->spa_dcr->address,
2381 nfit_mem->spa_dcr->length); 2378 nfit_mem->spa_dcr->length);
2382 if (!mmio->addr.base) { 2379 if (!mmio->addr.base) {
2383 dev_dbg(dev, "%s: %s failed to map dcr\n", __func__, 2380 dev_dbg(dev, "%s failed to map dcr\n",
2384 nvdimm_name(nvdimm)); 2381 nvdimm_name(nvdimm));
2385 return -ENOMEM; 2382 return -ENOMEM;
2386 } 2383 }
@@ -2391,15 +2388,15 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
2391 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr, 2388 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr,
2392 nfit_mem->memdev_dcr->interleave_ways); 2389 nfit_mem->memdev_dcr->interleave_ways);
2393 if (rc) { 2390 if (rc) {
2394 dev_dbg(dev, "%s: %s failed to init dcr interleave\n", 2391 dev_dbg(dev, "%s failed to init dcr interleave\n",
2395 __func__, nvdimm_name(nvdimm)); 2392 nvdimm_name(nvdimm));
2396 return rc; 2393 return rc;
2397 } 2394 }
2398 2395
2399 rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk); 2396 rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk);
2400 if (rc < 0) { 2397 if (rc < 0) {
2401 dev_dbg(dev, "%s: %s failed get DIMM flags\n", 2398 dev_dbg(dev, "%s failed get DIMM flags\n",
2402 __func__, nvdimm_name(nvdimm)); 2399 nvdimm_name(nvdimm));
2403 return rc; 2400 return rc;
2404 } 2401 }
2405 2402
@@ -2449,7 +2446,8 @@ static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa
2449 memset(&ars_start, 0, sizeof(ars_start)); 2446 memset(&ars_start, 0, sizeof(ars_start));
2450 ars_start.address = spa->address; 2447 ars_start.address = spa->address;
2451 ars_start.length = spa->length; 2448 ars_start.length = spa->length;
2452 ars_start.flags = acpi_desc->ars_start_flags; 2449 if (test_bit(ARS_SHORT, &nfit_spa->ars_state))
2450 ars_start.flags = ND_ARS_RETURN_PREV_DATA;
2453 if (nfit_spa_type(spa) == NFIT_SPA_PM) 2451 if (nfit_spa_type(spa) == NFIT_SPA_PM)
2454 ars_start.type = ND_ARS_PERSISTENT; 2452 ars_start.type = ND_ARS_PERSISTENT;
2455 else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) 2453 else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE)
@@ -2491,16 +2489,62 @@ static int ars_get_status(struct acpi_nfit_desc *acpi_desc)
2491 int rc, cmd_rc; 2489 int rc, cmd_rc;
2492 2490
2493 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status, 2491 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status,
2494 acpi_desc->ars_status_size, &cmd_rc); 2492 acpi_desc->max_ars, &cmd_rc);
2495 if (rc < 0) 2493 if (rc < 0)
2496 return rc; 2494 return rc;
2497 return cmd_rc; 2495 return cmd_rc;
2498} 2496}
2499 2497
2500static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc, 2498static void ars_complete(struct acpi_nfit_desc *acpi_desc,
2501 struct nd_cmd_ars_status *ars_status) 2499 struct nfit_spa *nfit_spa)
2500{
2501 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2502 struct acpi_nfit_system_address *spa = nfit_spa->spa;
2503 struct nd_region *nd_region = nfit_spa->nd_region;
2504 struct device *dev;
2505
2506 if ((ars_status->address >= spa->address && ars_status->address
2507 < spa->address + spa->length)
2508 || (ars_status->address < spa->address)) {
2509 /*
2510 * Assume that if a scrub starts at an offset from the
2511 * start of nfit_spa that we are in the continuation
2512 * case.
2513 *
2514 * Otherwise, if the scrub covers the spa range, mark
2515 * any pending request complete.
2516 */
2517 if (ars_status->address + ars_status->length
2518 >= spa->address + spa->length)
2519 /* complete */;
2520 else
2521 return;
2522 } else
2523 return;
2524
2525 if (test_bit(ARS_DONE, &nfit_spa->ars_state))
2526 return;
2527
2528 if (!test_and_clear_bit(ARS_REQ, &nfit_spa->ars_state))
2529 return;
2530
2531 if (nd_region) {
2532 dev = nd_region_dev(nd_region);
2533 nvdimm_region_notify(nd_region, NVDIMM_REVALIDATE_POISON);
2534 } else
2535 dev = acpi_desc->dev;
2536
2537 dev_dbg(dev, "ARS: range %d %s complete\n", spa->range_index,
2538 test_bit(ARS_SHORT, &nfit_spa->ars_state)
2539 ? "short" : "long");
2540 clear_bit(ARS_SHORT, &nfit_spa->ars_state);
2541 set_bit(ARS_DONE, &nfit_spa->ars_state);
2542}
2543
2544static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc)
2502{ 2545{
2503 struct nvdimm_bus *nvdimm_bus = acpi_desc->nvdimm_bus; 2546 struct nvdimm_bus *nvdimm_bus = acpi_desc->nvdimm_bus;
2547 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2504 int rc; 2548 int rc;
2505 u32 i; 2549 u32 i;
2506 2550
@@ -2579,7 +2623,7 @@ static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
2579 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2623 struct acpi_nfit_system_address *spa = nfit_spa->spa;
2580 struct nd_blk_region_desc *ndbr_desc; 2624 struct nd_blk_region_desc *ndbr_desc;
2581 struct nfit_mem *nfit_mem; 2625 struct nfit_mem *nfit_mem;
2582 int blk_valid = 0, rc; 2626 int rc;
2583 2627
2584 if (!nvdimm) { 2628 if (!nvdimm) {
2585 dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n", 2629 dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n",
@@ -2599,15 +2643,14 @@ static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
2599 if (!nfit_mem || !nfit_mem->bdw) { 2643 if (!nfit_mem || !nfit_mem->bdw) {
2600 dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n", 2644 dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n",
2601 spa->range_index, nvdimm_name(nvdimm)); 2645 spa->range_index, nvdimm_name(nvdimm));
2602 } else { 2646 break;
2603 mapping->size = nfit_mem->bdw->capacity;
2604 mapping->start = nfit_mem->bdw->start_address;
2605 ndr_desc->num_lanes = nfit_mem->bdw->windows;
2606 blk_valid = 1;
2607 } 2647 }
2608 2648
2649 mapping->size = nfit_mem->bdw->capacity;
2650 mapping->start = nfit_mem->bdw->start_address;
2651 ndr_desc->num_lanes = nfit_mem->bdw->windows;
2609 ndr_desc->mapping = mapping; 2652 ndr_desc->mapping = mapping;
2610 ndr_desc->num_mappings = blk_valid; 2653 ndr_desc->num_mappings = 1;
2611 ndbr_desc = to_blk_region_desc(ndr_desc); 2654 ndbr_desc = to_blk_region_desc(ndr_desc);
2612 ndbr_desc->enable = acpi_nfit_blk_region_enable; 2655 ndbr_desc->enable = acpi_nfit_blk_region_enable;
2613 ndbr_desc->do_io = acpi_desc->blk_do_io; 2656 ndbr_desc->do_io = acpi_desc->blk_do_io;
@@ -2655,8 +2698,7 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
2655 return 0; 2698 return 0;
2656 2699
2657 if (spa->range_index == 0 && !nfit_spa_is_virtual(spa)) { 2700 if (spa->range_index == 0 && !nfit_spa_is_virtual(spa)) {
2658 dev_dbg(acpi_desc->dev, "%s: detected invalid spa index\n", 2701 dev_dbg(acpi_desc->dev, "detected invalid spa index\n");
2659 __func__);
2660 return 0; 2702 return 0;
2661 } 2703 }
2662 2704
@@ -2742,301 +2784,243 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
2742 return rc; 2784 return rc;
2743} 2785}
2744 2786
2745static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc, 2787static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc)
2746 u32 max_ars)
2747{ 2788{
2748 struct device *dev = acpi_desc->dev; 2789 struct device *dev = acpi_desc->dev;
2749 struct nd_cmd_ars_status *ars_status; 2790 struct nd_cmd_ars_status *ars_status;
2750 2791
2751 if (acpi_desc->ars_status && acpi_desc->ars_status_size >= max_ars) { 2792 if (acpi_desc->ars_status) {
2752 memset(acpi_desc->ars_status, 0, acpi_desc->ars_status_size); 2793 memset(acpi_desc->ars_status, 0, acpi_desc->max_ars);
2753 return 0; 2794 return 0;
2754 } 2795 }
2755 2796
2756 if (acpi_desc->ars_status) 2797 ars_status = devm_kzalloc(dev, acpi_desc->max_ars, GFP_KERNEL);
2757 devm_kfree(dev, acpi_desc->ars_status);
2758 acpi_desc->ars_status = NULL;
2759 ars_status = devm_kzalloc(dev, max_ars, GFP_KERNEL);
2760 if (!ars_status) 2798 if (!ars_status)
2761 return -ENOMEM; 2799 return -ENOMEM;
2762 acpi_desc->ars_status = ars_status; 2800 acpi_desc->ars_status = ars_status;
2763 acpi_desc->ars_status_size = max_ars;
2764 return 0; 2801 return 0;
2765} 2802}
2766 2803
2767static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc, 2804static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc)
2768 struct nfit_spa *nfit_spa)
2769{ 2805{
2770 struct acpi_nfit_system_address *spa = nfit_spa->spa;
2771 int rc; 2806 int rc;
2772 2807
2773 if (!nfit_spa->max_ars) { 2808 if (ars_status_alloc(acpi_desc))
2774 struct nd_cmd_ars_cap ars_cap;
2775
2776 memset(&ars_cap, 0, sizeof(ars_cap));
2777 rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa);
2778 if (rc < 0)
2779 return rc;
2780 nfit_spa->max_ars = ars_cap.max_ars_out;
2781 nfit_spa->clear_err_unit = ars_cap.clear_err_unit;
2782 /* check that the supported scrub types match the spa type */
2783 if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE &&
2784 ((ars_cap.status >> 16) & ND_ARS_VOLATILE) == 0)
2785 return -ENOTTY;
2786 else if (nfit_spa_type(spa) == NFIT_SPA_PM &&
2787 ((ars_cap.status >> 16) & ND_ARS_PERSISTENT) == 0)
2788 return -ENOTTY;
2789 }
2790
2791 if (ars_status_alloc(acpi_desc, nfit_spa->max_ars))
2792 return -ENOMEM; 2809 return -ENOMEM;
2793 2810
2794 rc = ars_get_status(acpi_desc); 2811 rc = ars_get_status(acpi_desc);
2812
2795 if (rc < 0 && rc != -ENOSPC) 2813 if (rc < 0 && rc != -ENOSPC)
2796 return rc; 2814 return rc;
2797 2815
2798 if (ars_status_process_records(acpi_desc, acpi_desc->ars_status)) 2816 if (ars_status_process_records(acpi_desc))
2799 return -ENOMEM; 2817 return -ENOMEM;
2800 2818
2801 return 0; 2819 return 0;
2802} 2820}
2803 2821
2804static void acpi_nfit_async_scrub(struct acpi_nfit_desc *acpi_desc, 2822static int ars_register(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa,
2805 struct nfit_spa *nfit_spa) 2823 int *query_rc)
2806{ 2824{
2807 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2825 int rc = *query_rc;
2808 unsigned int overflow_retry = scrub_overflow_abort;
2809 u64 init_ars_start = 0, init_ars_len = 0;
2810 struct device *dev = acpi_desc->dev;
2811 unsigned int tmo = scrub_timeout;
2812 int rc;
2813 2826
2814 if (!nfit_spa->ars_required || !nfit_spa->nd_region) 2827 if (no_init_ars)
2815 return; 2828 return acpi_nfit_register_region(acpi_desc, nfit_spa);
2816 2829
2817 rc = ars_start(acpi_desc, nfit_spa); 2830 set_bit(ARS_REQ, &nfit_spa->ars_state);
2818 /* 2831 set_bit(ARS_SHORT, &nfit_spa->ars_state);
2819 * If we timed out the initial scan we'll still be busy here,
2820 * and will wait another timeout before giving up permanently.
2821 */
2822 if (rc < 0 && rc != -EBUSY)
2823 return;
2824
2825 do {
2826 u64 ars_start, ars_len;
2827
2828 if (acpi_desc->cancel)
2829 break;
2830 rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
2831 if (rc == -ENOTTY)
2832 break;
2833 if (rc == -EBUSY && !tmo) {
2834 dev_warn(dev, "range %d ars timeout, aborting\n",
2835 spa->range_index);
2836 break;
2837 }
2838 2832
2833 switch (rc) {
2834 case 0:
2835 case -EAGAIN:
2836 rc = ars_start(acpi_desc, nfit_spa);
2839 if (rc == -EBUSY) { 2837 if (rc == -EBUSY) {
2840 /* 2838 *query_rc = rc;
2841 * Note, entries may be appended to the list
2842 * while the lock is dropped, but the workqueue
2843 * being active prevents entries being deleted /
2844 * freed.
2845 */
2846 mutex_unlock(&acpi_desc->init_mutex);
2847 ssleep(1);
2848 tmo--;
2849 mutex_lock(&acpi_desc->init_mutex);
2850 continue;
2851 }
2852
2853 /* we got some results, but there are more pending... */
2854 if (rc == -ENOSPC && overflow_retry--) {
2855 if (!init_ars_len) {
2856 init_ars_len = acpi_desc->ars_status->length;
2857 init_ars_start = acpi_desc->ars_status->address;
2858 }
2859 rc = ars_continue(acpi_desc);
2860 }
2861
2862 if (rc < 0) {
2863 dev_warn(dev, "range %d ars continuation failed\n",
2864 spa->range_index);
2865 break; 2839 break;
2866 } 2840 } else if (rc == 0) {
2867 2841 rc = acpi_nfit_query_poison(acpi_desc);
2868 if (init_ars_len) {
2869 ars_start = init_ars_start;
2870 ars_len = init_ars_len;
2871 } else { 2842 } else {
2872 ars_start = acpi_desc->ars_status->address; 2843 set_bit(ARS_FAILED, &nfit_spa->ars_state);
2873 ars_len = acpi_desc->ars_status->length; 2844 break;
2874 } 2845 }
2875 dev_dbg(dev, "spa range: %d ars from %#llx + %#llx complete\n", 2846 if (rc == -EAGAIN)
2876 spa->range_index, ars_start, ars_len); 2847 clear_bit(ARS_SHORT, &nfit_spa->ars_state);
2877 /* notify the region about new poison entries */ 2848 else if (rc == 0)
2878 nvdimm_region_notify(nfit_spa->nd_region, 2849 ars_complete(acpi_desc, nfit_spa);
2879 NVDIMM_REVALIDATE_POISON);
2880 break; 2850 break;
2881 } while (1); 2851 case -EBUSY:
2852 case -ENOSPC:
2853 break;
2854 default:
2855 set_bit(ARS_FAILED, &nfit_spa->ars_state);
2856 break;
2857 }
2858
2859 if (test_and_clear_bit(ARS_DONE, &nfit_spa->ars_state))
2860 set_bit(ARS_REQ, &nfit_spa->ars_state);
2861
2862 return acpi_nfit_register_region(acpi_desc, nfit_spa);
2882} 2863}
2883 2864
2884static void acpi_nfit_scrub(struct work_struct *work) 2865static void ars_complete_all(struct acpi_nfit_desc *acpi_desc)
2885{ 2866{
2886 struct device *dev;
2887 u64 init_scrub_length = 0;
2888 struct nfit_spa *nfit_spa; 2867 struct nfit_spa *nfit_spa;
2889 u64 init_scrub_address = 0;
2890 bool init_ars_done = false;
2891 struct acpi_nfit_desc *acpi_desc;
2892 unsigned int tmo = scrub_timeout;
2893 unsigned int overflow_retry = scrub_overflow_abort;
2894
2895 acpi_desc = container_of(work, typeof(*acpi_desc), work);
2896 dev = acpi_desc->dev;
2897
2898 /*
2899 * We scrub in 2 phases. The first phase waits for any platform
2900 * firmware initiated scrubs to complete and then we go search for the
2901 * affected spa regions to mark them scanned. In the second phase we
2902 * initiate a directed scrub for every range that was not scrubbed in
2903 * phase 1. If we're called for a 'rescan', we harmlessly pass through
2904 * the first phase, but really only care about running phase 2, where
2905 * regions can be notified of new poison.
2906 */
2907 2868
2908 /* process platform firmware initiated scrubs */
2909 retry:
2910 mutex_lock(&acpi_desc->init_mutex);
2911 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 2869 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
2912 struct nd_cmd_ars_status *ars_status; 2870 if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
2913 struct acpi_nfit_system_address *spa;
2914 u64 ars_start, ars_len;
2915 int rc;
2916
2917 if (acpi_desc->cancel)
2918 break;
2919
2920 if (nfit_spa->nd_region)
2921 continue; 2871 continue;
2872 ars_complete(acpi_desc, nfit_spa);
2873 }
2874}
2922 2875
2923 if (init_ars_done) { 2876static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc,
2924 /* 2877 int query_rc)
2925 * No need to re-query, we're now just 2878{
2926 * reconciling all the ranges covered by the 2879 unsigned int tmo = acpi_desc->scrub_tmo;
2927 * initial scrub 2880 struct device *dev = acpi_desc->dev;
2928 */ 2881 struct nfit_spa *nfit_spa;
2929 rc = 0;
2930 } else
2931 rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
2932
2933 if (rc == -ENOTTY) {
2934 /* no ars capability, just register spa and move on */
2935 acpi_nfit_register_region(acpi_desc, nfit_spa);
2936 continue;
2937 }
2938
2939 if (rc == -EBUSY && !tmo) {
2940 /* fallthrough to directed scrub in phase 2 */
2941 dev_warn(dev, "timeout awaiting ars results, continuing...\n");
2942 break;
2943 } else if (rc == -EBUSY) {
2944 mutex_unlock(&acpi_desc->init_mutex);
2945 ssleep(1);
2946 tmo--;
2947 goto retry;
2948 }
2949
2950 /* we got some results, but there are more pending... */
2951 if (rc == -ENOSPC && overflow_retry--) {
2952 ars_status = acpi_desc->ars_status;
2953 /*
2954 * Record the original scrub range, so that we
2955 * can recall all the ranges impacted by the
2956 * initial scrub.
2957 */
2958 if (!init_scrub_length) {
2959 init_scrub_length = ars_status->length;
2960 init_scrub_address = ars_status->address;
2961 }
2962 rc = ars_continue(acpi_desc);
2963 if (rc == 0) {
2964 mutex_unlock(&acpi_desc->init_mutex);
2965 goto retry;
2966 }
2967 }
2968 2882
2969 if (rc < 0) { 2883 if (acpi_desc->cancel)
2970 /* 2884 return 0;
2971 * Initial scrub failed, we'll give it one more
2972 * try below...
2973 */
2974 break;
2975 }
2976 2885
2977 /* We got some final results, record completed ranges */ 2886 if (query_rc == -EBUSY) {
2978 ars_status = acpi_desc->ars_status; 2887 dev_dbg(dev, "ARS: ARS busy\n");
2979 if (init_scrub_length) { 2888 return min(30U * 60U, tmo * 2);
2980 ars_start = init_scrub_address; 2889 }
2981 ars_len = ars_start + init_scrub_length; 2890 if (query_rc == -ENOSPC) {
2982 } else { 2891 dev_dbg(dev, "ARS: ARS continue\n");
2983 ars_start = ars_status->address; 2892 ars_continue(acpi_desc);
2984 ars_len = ars_status->length; 2893 return 1;
2985 } 2894 }
2986 spa = nfit_spa->spa; 2895 if (query_rc && query_rc != -EAGAIN) {
2896 unsigned long long addr, end;
2987 2897
2988 if (!init_ars_done) { 2898 addr = acpi_desc->ars_status->address;
2989 init_ars_done = true; 2899 end = addr + acpi_desc->ars_status->length;
2990 dev_dbg(dev, "init scrub %#llx + %#llx complete\n", 2900 dev_dbg(dev, "ARS: %llx-%llx failed (%d)\n", addr, end,
2991 ars_start, ars_len); 2901 query_rc);
2992 }
2993 if (ars_start <= spa->address && ars_start + ars_len
2994 >= spa->address + spa->length)
2995 acpi_nfit_register_region(acpi_desc, nfit_spa);
2996 } 2902 }
2997 2903
2998 /* 2904 ars_complete_all(acpi_desc);
2999 * For all the ranges not covered by an initial scrub we still
3000 * want to see if there are errors, but it's ok to discover them
3001 * asynchronously.
3002 */
3003 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 2905 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
3004 /* 2906 if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
3005 * Flag all the ranges that still need scrubbing, but 2907 continue;
3006 * register them now to make data available. 2908 if (test_bit(ARS_REQ, &nfit_spa->ars_state)) {
3007 */ 2909 int rc = ars_start(acpi_desc, nfit_spa);
3008 if (!nfit_spa->nd_region) { 2910
3009 nfit_spa->ars_required = 1; 2911 clear_bit(ARS_DONE, &nfit_spa->ars_state);
3010 acpi_nfit_register_region(acpi_desc, nfit_spa); 2912 dev = nd_region_dev(nfit_spa->nd_region);
2913 dev_dbg(dev, "ARS: range %d ARS start (%d)\n",
2914 nfit_spa->spa->range_index, rc);
2915 if (rc == 0 || rc == -EBUSY)
2916 return 1;
2917 dev_err(dev, "ARS: range %d ARS failed (%d)\n",
2918 nfit_spa->spa->range_index, rc);
2919 set_bit(ARS_FAILED, &nfit_spa->ars_state);
3011 } 2920 }
3012 } 2921 }
3013 acpi_desc->init_complete = 1; 2922 return 0;
2923}
3014 2924
3015 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) 2925static void acpi_nfit_scrub(struct work_struct *work)
3016 acpi_nfit_async_scrub(acpi_desc, nfit_spa); 2926{
3017 acpi_desc->scrub_count++; 2927 struct acpi_nfit_desc *acpi_desc;
3018 acpi_desc->ars_start_flags = 0; 2928 unsigned int tmo;
3019 if (acpi_desc->scrub_count_state) 2929 int query_rc;
3020 sysfs_notify_dirent(acpi_desc->scrub_count_state); 2930
2931 acpi_desc = container_of(work, typeof(*acpi_desc), dwork.work);
2932 mutex_lock(&acpi_desc->init_mutex);
2933 query_rc = acpi_nfit_query_poison(acpi_desc);
2934 tmo = __acpi_nfit_scrub(acpi_desc, query_rc);
2935 if (tmo) {
2936 queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ);
2937 acpi_desc->scrub_tmo = tmo;
2938 } else {
2939 acpi_desc->scrub_count++;
2940 if (acpi_desc->scrub_count_state)
2941 sysfs_notify_dirent(acpi_desc->scrub_count_state);
2942 }
2943 memset(acpi_desc->ars_status, 0, acpi_desc->max_ars);
3021 mutex_unlock(&acpi_desc->init_mutex); 2944 mutex_unlock(&acpi_desc->init_mutex);
3022} 2945}
3023 2946
2947static void acpi_nfit_init_ars(struct acpi_nfit_desc *acpi_desc,
2948 struct nfit_spa *nfit_spa)
2949{
2950 int type = nfit_spa_type(nfit_spa->spa);
2951 struct nd_cmd_ars_cap ars_cap;
2952 int rc;
2953
2954 memset(&ars_cap, 0, sizeof(ars_cap));
2955 rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa);
2956 if (rc < 0)
2957 return;
2958 /* check that the supported scrub types match the spa type */
2959 if (type == NFIT_SPA_VOLATILE && ((ars_cap.status >> 16)
2960 & ND_ARS_VOLATILE) == 0)
2961 return;
2962 if (type == NFIT_SPA_PM && ((ars_cap.status >> 16)
2963 & ND_ARS_PERSISTENT) == 0)
2964 return;
2965
2966 nfit_spa->max_ars = ars_cap.max_ars_out;
2967 nfit_spa->clear_err_unit = ars_cap.clear_err_unit;
2968 acpi_desc->max_ars = max(nfit_spa->max_ars, acpi_desc->max_ars);
2969 clear_bit(ARS_FAILED, &nfit_spa->ars_state);
2970 set_bit(ARS_REQ, &nfit_spa->ars_state);
2971}
2972
3024static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) 2973static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
3025{ 2974{
3026 struct nfit_spa *nfit_spa; 2975 struct nfit_spa *nfit_spa;
3027 int rc; 2976 int rc, query_rc;
2977
2978 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
2979 set_bit(ARS_FAILED, &nfit_spa->ars_state);
2980 switch (nfit_spa_type(nfit_spa->spa)) {
2981 case NFIT_SPA_VOLATILE:
2982 case NFIT_SPA_PM:
2983 acpi_nfit_init_ars(acpi_desc, nfit_spa);
2984 break;
2985 }
2986 }
2987
2988 /*
2989 * Reap any results that might be pending before starting new
2990 * short requests.
2991 */
2992 query_rc = acpi_nfit_query_poison(acpi_desc);
2993 if (query_rc == 0)
2994 ars_complete_all(acpi_desc);
3028 2995
3029 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) 2996 list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
3030 if (nfit_spa_type(nfit_spa->spa) == NFIT_SPA_DCR) { 2997 switch (nfit_spa_type(nfit_spa->spa)) {
3031 /* BLK regions don't need to wait for ars results */ 2998 case NFIT_SPA_VOLATILE:
2999 case NFIT_SPA_PM:
3000 /* register regions and kick off initial ARS run */
3001 rc = ars_register(acpi_desc, nfit_spa, &query_rc);
3002 if (rc)
3003 return rc;
3004 break;
3005 case NFIT_SPA_BDW:
3006 /* nothing to register */
3007 break;
3008 case NFIT_SPA_DCR:
3009 case NFIT_SPA_VDISK:
3010 case NFIT_SPA_VCD:
3011 case NFIT_SPA_PDISK:
3012 case NFIT_SPA_PCD:
3013 /* register known regions that don't support ARS */
3032 rc = acpi_nfit_register_region(acpi_desc, nfit_spa); 3014 rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
3033 if (rc) 3015 if (rc)
3034 return rc; 3016 return rc;
3017 break;
3018 default:
3019 /* don't register unknown regions */
3020 break;
3035 } 3021 }
3036 3022
3037 acpi_desc->ars_start_flags = 0; 3023 queue_delayed_work(nfit_wq, &acpi_desc->dwork, 0);
3038 if (!acpi_desc->cancel)
3039 queue_work(nfit_wq, &acpi_desc->work);
3040 return 0; 3024 return 0;
3041} 3025}
3042 3026
@@ -3146,8 +3130,7 @@ int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz)
3146 data = add_table(acpi_desc, &prev, data, end); 3130 data = add_table(acpi_desc, &prev, data, end);
3147 3131
3148 if (IS_ERR(data)) { 3132 if (IS_ERR(data)) {
3149 dev_dbg(dev, "%s: nfit table parsing error: %ld\n", __func__, 3133 dev_dbg(dev, "nfit table parsing error: %ld\n", PTR_ERR(data));
3150 PTR_ERR(data));
3151 rc = PTR_ERR(data); 3134 rc = PTR_ERR(data);
3152 goto out_unlock; 3135 goto out_unlock;
3153 } 3136 }
@@ -3172,49 +3155,20 @@ int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz)
3172} 3155}
3173EXPORT_SYMBOL_GPL(acpi_nfit_init); 3156EXPORT_SYMBOL_GPL(acpi_nfit_init);
3174 3157
3175struct acpi_nfit_flush_work {
3176 struct work_struct work;
3177 struct completion cmp;
3178};
3179
3180static void flush_probe(struct work_struct *work)
3181{
3182 struct acpi_nfit_flush_work *flush;
3183
3184 flush = container_of(work, typeof(*flush), work);
3185 complete(&flush->cmp);
3186}
3187
3188static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) 3158static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
3189{ 3159{
3190 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); 3160 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
3191 struct device *dev = acpi_desc->dev; 3161 struct device *dev = acpi_desc->dev;
3192 struct acpi_nfit_flush_work flush;
3193 int rc;
3194 3162
3195 /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */ 3163 /* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
3196 device_lock(dev); 3164 device_lock(dev);
3197 device_unlock(dev); 3165 device_unlock(dev);
3198 3166
3199 /* bounce the init_mutex to make init_complete valid */ 3167 /* Bounce the init_mutex to complete initial registration */
3200 mutex_lock(&acpi_desc->init_mutex); 3168 mutex_lock(&acpi_desc->init_mutex);
3201 if (acpi_desc->cancel || acpi_desc->init_complete) {
3202 mutex_unlock(&acpi_desc->init_mutex);
3203 return 0;
3204 }
3205
3206 /*
3207 * Scrub work could take 10s of seconds, userspace may give up so we
3208 * need to be interruptible while waiting.
3209 */
3210 INIT_WORK_ONSTACK(&flush.work, flush_probe);
3211 init_completion(&flush.cmp);
3212 queue_work(nfit_wq, &flush.work);
3213 mutex_unlock(&acpi_desc->init_mutex); 3169 mutex_unlock(&acpi_desc->init_mutex);
3214 3170
3215 rc = wait_for_completion_interruptible(&flush.cmp); 3171 return 0;
3216 cancel_work_sync(&flush.work);
3217 return rc;
3218} 3172}
3219 3173
3220static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, 3174static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
@@ -3233,20 +3187,18 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
3233 * just needs guarantees that any ars it initiates are not 3187 * just needs guarantees that any ars it initiates are not
3234 * interrupted by any intervening start reqeusts from userspace. 3188 * interrupted by any intervening start reqeusts from userspace.
3235 */ 3189 */
3236 if (work_busy(&acpi_desc->work)) 3190 if (work_busy(&acpi_desc->dwork.work))
3237 return -EBUSY; 3191 return -EBUSY;
3238 3192
3239 return 0; 3193 return 0;
3240} 3194}
3241 3195
3242int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, u8 flags) 3196int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags)
3243{ 3197{
3244 struct device *dev = acpi_desc->dev; 3198 struct device *dev = acpi_desc->dev;
3199 int scheduled = 0, busy = 0;
3245 struct nfit_spa *nfit_spa; 3200 struct nfit_spa *nfit_spa;
3246 3201
3247 if (work_busy(&acpi_desc->work))
3248 return -EBUSY;
3249
3250 mutex_lock(&acpi_desc->init_mutex); 3202 mutex_lock(&acpi_desc->init_mutex);
3251 if (acpi_desc->cancel) { 3203 if (acpi_desc->cancel) {
3252 mutex_unlock(&acpi_desc->init_mutex); 3204 mutex_unlock(&acpi_desc->init_mutex);
@@ -3254,19 +3206,32 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, u8 flags)
3254 } 3206 }
3255 3207
3256 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 3208 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
3257 struct acpi_nfit_system_address *spa = nfit_spa->spa; 3209 int type = nfit_spa_type(nfit_spa->spa);
3258 3210
3259 if (nfit_spa_type(spa) != NFIT_SPA_PM) 3211 if (type != NFIT_SPA_PM && type != NFIT_SPA_VOLATILE)
3212 continue;
3213 if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
3260 continue; 3214 continue;
3261 3215
3262 nfit_spa->ars_required = 1; 3216 if (test_and_set_bit(ARS_REQ, &nfit_spa->ars_state))
3217 busy++;
3218 else {
3219 if (test_bit(ARS_SHORT, &flags))
3220 set_bit(ARS_SHORT, &nfit_spa->ars_state);
3221 scheduled++;
3222 }
3223 }
3224 if (scheduled) {
3225 queue_delayed_work(nfit_wq, &acpi_desc->dwork, 0);
3226 dev_dbg(dev, "ars_scan triggered\n");
3263 } 3227 }
3264 acpi_desc->ars_start_flags = flags;
3265 queue_work(nfit_wq, &acpi_desc->work);
3266 dev_dbg(dev, "%s: ars_scan triggered\n", __func__);
3267 mutex_unlock(&acpi_desc->init_mutex); 3228 mutex_unlock(&acpi_desc->init_mutex);
3268 3229
3269 return 0; 3230 if (scheduled)
3231 return 0;
3232 if (busy)
3233 return -EBUSY;
3234 return -ENOTTY;
3270} 3235}
3271 3236
3272void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev) 3237void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
@@ -3293,7 +3258,8 @@ void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
3293 INIT_LIST_HEAD(&acpi_desc->dimms); 3258 INIT_LIST_HEAD(&acpi_desc->dimms);
3294 INIT_LIST_HEAD(&acpi_desc->list); 3259 INIT_LIST_HEAD(&acpi_desc->list);
3295 mutex_init(&acpi_desc->init_mutex); 3260 mutex_init(&acpi_desc->init_mutex);
3296 INIT_WORK(&acpi_desc->work, acpi_nfit_scrub); 3261 acpi_desc->scrub_tmo = 1;
3262 INIT_DELAYED_WORK(&acpi_desc->dwork, acpi_nfit_scrub);
3297} 3263}
3298EXPORT_SYMBOL_GPL(acpi_nfit_desc_init); 3264EXPORT_SYMBOL_GPL(acpi_nfit_desc_init);
3299 3265
@@ -3317,6 +3283,7 @@ void acpi_nfit_shutdown(void *data)
3317 3283
3318 mutex_lock(&acpi_desc->init_mutex); 3284 mutex_lock(&acpi_desc->init_mutex);
3319 acpi_desc->cancel = 1; 3285 acpi_desc->cancel = 1;
3286 cancel_delayed_work_sync(&acpi_desc->dwork);
3320 mutex_unlock(&acpi_desc->init_mutex); 3287 mutex_unlock(&acpi_desc->init_mutex);
3321 3288
3322 /* 3289 /*
@@ -3370,8 +3337,8 @@ static int acpi_nfit_add(struct acpi_device *adev)
3370 rc = acpi_nfit_init(acpi_desc, obj->buffer.pointer, 3337 rc = acpi_nfit_init(acpi_desc, obj->buffer.pointer,
3371 obj->buffer.length); 3338 obj->buffer.length);
3372 else 3339 else
3373 dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n", 3340 dev_dbg(dev, "invalid type %d, ignoring _FIT\n",
3374 __func__, (int) obj->type); 3341 (int) obj->type);
3375 kfree(buf.pointer); 3342 kfree(buf.pointer);
3376 } else 3343 } else
3377 /* skip over the lead-in header table */ 3344 /* skip over the lead-in header table */
@@ -3400,7 +3367,7 @@ static void acpi_nfit_update_notify(struct device *dev, acpi_handle handle)
3400 3367
3401 if (!dev->driver) { 3368 if (!dev->driver) {
3402 /* dev->driver may be null if we're being removed */ 3369 /* dev->driver may be null if we're being removed */
3403 dev_dbg(dev, "%s: no driver found for dev\n", __func__); 3370 dev_dbg(dev, "no driver found for dev\n");
3404 return; 3371 return;
3405 } 3372 }
3406 3373
@@ -3438,15 +3405,15 @@ static void acpi_nfit_update_notify(struct device *dev, acpi_handle handle)
3438static void acpi_nfit_uc_error_notify(struct device *dev, acpi_handle handle) 3405static void acpi_nfit_uc_error_notify(struct device *dev, acpi_handle handle)
3439{ 3406{
3440 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev); 3407 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev);
3441 u8 flags = (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON) ? 3408 unsigned long flags = (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON) ?
3442 0 : ND_ARS_RETURN_PREV_DATA; 3409 0 : 1 << ARS_SHORT;
3443 3410
3444 acpi_nfit_ars_rescan(acpi_desc, flags); 3411 acpi_nfit_ars_rescan(acpi_desc, flags);
3445} 3412}
3446 3413
3447void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event) 3414void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event)
3448{ 3415{
3449 dev_dbg(dev, "%s: event: 0x%x\n", __func__, event); 3416 dev_dbg(dev, "event: 0x%x\n", event);
3450 3417
3451 switch (event) { 3418 switch (event) {
3452 case NFIT_NOTIFY_UPDATE: 3419 case NFIT_NOTIFY_UPDATE:
diff --git a/drivers/acpi/nfit/mce.c b/drivers/acpi/nfit/mce.c
index b92921439657..e9626bf6ca29 100644
--- a/drivers/acpi/nfit/mce.c
+++ b/drivers/acpi/nfit/mce.c
@@ -51,9 +51,8 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val,
51 if ((spa->address + spa->length - 1) < mce->addr) 51 if ((spa->address + spa->length - 1) < mce->addr)
52 continue; 52 continue;
53 found_match = 1; 53 found_match = 1;
54 dev_dbg(dev, "%s: addr in SPA %d (0x%llx, 0x%llx)\n", 54 dev_dbg(dev, "addr in SPA %d (0x%llx, 0x%llx)\n",
55 __func__, spa->range_index, spa->address, 55 spa->range_index, spa->address, spa->length);
56 spa->length);
57 /* 56 /*
58 * We can break at the first match because we're going 57 * We can break at the first match because we're going
59 * to rescan all the SPA ranges. There shouldn't be any 58 * to rescan all the SPA ranges. There shouldn't be any
diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
index 50d36e166d70..7d15856a739f 100644
--- a/drivers/acpi/nfit/nfit.h
+++ b/drivers/acpi/nfit/nfit.h
@@ -117,10 +117,17 @@ enum nfit_dimm_notifiers {
117 NFIT_NOTIFY_DIMM_HEALTH = 0x81, 117 NFIT_NOTIFY_DIMM_HEALTH = 0x81,
118}; 118};
119 119
120enum nfit_ars_state {
121 ARS_REQ,
122 ARS_DONE,
123 ARS_SHORT,
124 ARS_FAILED,
125};
126
120struct nfit_spa { 127struct nfit_spa {
121 struct list_head list; 128 struct list_head list;
122 struct nd_region *nd_region; 129 struct nd_region *nd_region;
123 unsigned int ars_required:1; 130 unsigned long ars_state;
124 u32 clear_err_unit; 131 u32 clear_err_unit;
125 u32 max_ars; 132 u32 max_ars;
126 struct acpi_nfit_system_address spa[0]; 133 struct acpi_nfit_system_address spa[0];
@@ -171,9 +178,8 @@ struct nfit_mem {
171 struct resource *flush_wpq; 178 struct resource *flush_wpq;
172 unsigned long dsm_mask; 179 unsigned long dsm_mask;
173 int family; 180 int family;
174 u32 has_lsi:1; 181 bool has_lsr;
175 u32 has_lsr:1; 182 bool has_lsw;
176 u32 has_lsw:1;
177}; 183};
178 184
179struct acpi_nfit_desc { 185struct acpi_nfit_desc {
@@ -191,18 +197,18 @@ struct acpi_nfit_desc {
191 struct device *dev; 197 struct device *dev;
192 u8 ars_start_flags; 198 u8 ars_start_flags;
193 struct nd_cmd_ars_status *ars_status; 199 struct nd_cmd_ars_status *ars_status;
194 size_t ars_status_size; 200 struct delayed_work dwork;
195 struct work_struct work;
196 struct list_head list; 201 struct list_head list;
197 struct kernfs_node *scrub_count_state; 202 struct kernfs_node *scrub_count_state;
203 unsigned int max_ars;
198 unsigned int scrub_count; 204 unsigned int scrub_count;
199 unsigned int scrub_mode; 205 unsigned int scrub_mode;
200 unsigned int cancel:1; 206 unsigned int cancel:1;
201 unsigned int init_complete:1;
202 unsigned long dimm_cmd_force_en; 207 unsigned long dimm_cmd_force_en;
203 unsigned long bus_cmd_force_en; 208 unsigned long bus_cmd_force_en;
204 unsigned long bus_nfit_cmd_force_en; 209 unsigned long bus_nfit_cmd_force_en;
205 unsigned int platform_cap; 210 unsigned int platform_cap;
211 unsigned int scrub_tmo;
206 int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa, 212 int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
207 void *iobuf, u64 len, int rw); 213 void *iobuf, u64 len, int rw);
208}; 214};
@@ -244,7 +250,7 @@ struct nfit_blk {
244 250
245extern struct list_head acpi_descs; 251extern struct list_head acpi_descs;
246extern struct mutex acpi_desc_lock; 252extern struct mutex acpi_desc_lock;
247int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, u8 flags); 253int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags);
248 254
249#ifdef CONFIG_X86_MCE 255#ifdef CONFIG_X86_MCE
250void nfit_mce_register(void); 256void nfit_mce_register(void);
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index 2137dbc29877..37be5a306c8f 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -257,8 +257,8 @@ static int __dev_dax_pte_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
257 257
258 dax_region = dev_dax->region; 258 dax_region = dev_dax->region;
259 if (dax_region->align > PAGE_SIZE) { 259 if (dax_region->align > PAGE_SIZE) {
260 dev_dbg(dev, "%s: alignment (%#x) > fault size (%#x)\n", 260 dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n",
261 __func__, dax_region->align, fault_size); 261 dax_region->align, fault_size);
262 return VM_FAULT_SIGBUS; 262 return VM_FAULT_SIGBUS;
263 } 263 }
264 264
@@ -267,8 +267,7 @@ static int __dev_dax_pte_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
267 267
268 phys = dax_pgoff_to_phys(dev_dax, vmf->pgoff, PAGE_SIZE); 268 phys = dax_pgoff_to_phys(dev_dax, vmf->pgoff, PAGE_SIZE);
269 if (phys == -1) { 269 if (phys == -1) {
270 dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__, 270 dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", vmf->pgoff);
271 vmf->pgoff);
272 return VM_FAULT_SIGBUS; 271 return VM_FAULT_SIGBUS;
273 } 272 }
274 273
@@ -299,14 +298,14 @@ static int __dev_dax_pmd_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
299 298
300 dax_region = dev_dax->region; 299 dax_region = dev_dax->region;
301 if (dax_region->align > PMD_SIZE) { 300 if (dax_region->align > PMD_SIZE) {
302 dev_dbg(dev, "%s: alignment (%#x) > fault size (%#x)\n", 301 dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n",
303 __func__, dax_region->align, fault_size); 302 dax_region->align, fault_size);
304 return VM_FAULT_SIGBUS; 303 return VM_FAULT_SIGBUS;
305 } 304 }
306 305
307 /* dax pmd mappings require pfn_t_devmap() */ 306 /* dax pmd mappings require pfn_t_devmap() */
308 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) { 307 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) {
309 dev_dbg(dev, "%s: region lacks devmap flags\n", __func__); 308 dev_dbg(dev, "region lacks devmap flags\n");
310 return VM_FAULT_SIGBUS; 309 return VM_FAULT_SIGBUS;
311 } 310 }
312 311
@@ -323,8 +322,7 @@ static int __dev_dax_pmd_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
323 pgoff = linear_page_index(vmf->vma, pmd_addr); 322 pgoff = linear_page_index(vmf->vma, pmd_addr);
324 phys = dax_pgoff_to_phys(dev_dax, pgoff, PMD_SIZE); 323 phys = dax_pgoff_to_phys(dev_dax, pgoff, PMD_SIZE);
325 if (phys == -1) { 324 if (phys == -1) {
326 dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__, 325 dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", pgoff);
327 pgoff);
328 return VM_FAULT_SIGBUS; 326 return VM_FAULT_SIGBUS;
329 } 327 }
330 328
@@ -351,14 +349,14 @@ static int __dev_dax_pud_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
351 349
352 dax_region = dev_dax->region; 350 dax_region = dev_dax->region;
353 if (dax_region->align > PUD_SIZE) { 351 if (dax_region->align > PUD_SIZE) {
354 dev_dbg(dev, "%s: alignment (%#x) > fault size (%#x)\n", 352 dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n",
355 __func__, dax_region->align, fault_size); 353 dax_region->align, fault_size);
356 return VM_FAULT_SIGBUS; 354 return VM_FAULT_SIGBUS;
357 } 355 }
358 356
359 /* dax pud mappings require pfn_t_devmap() */ 357 /* dax pud mappings require pfn_t_devmap() */
360 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) { 358 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) {
361 dev_dbg(dev, "%s: region lacks devmap flags\n", __func__); 359 dev_dbg(dev, "region lacks devmap flags\n");
362 return VM_FAULT_SIGBUS; 360 return VM_FAULT_SIGBUS;
363 } 361 }
364 362
@@ -375,8 +373,7 @@ static int __dev_dax_pud_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
375 pgoff = linear_page_index(vmf->vma, pud_addr); 373 pgoff = linear_page_index(vmf->vma, pud_addr);
376 phys = dax_pgoff_to_phys(dev_dax, pgoff, PUD_SIZE); 374 phys = dax_pgoff_to_phys(dev_dax, pgoff, PUD_SIZE);
377 if (phys == -1) { 375 if (phys == -1) {
378 dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__, 376 dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", pgoff);
379 pgoff);
380 return VM_FAULT_SIGBUS; 377 return VM_FAULT_SIGBUS;
381 } 378 }
382 379
@@ -399,9 +396,8 @@ static int dev_dax_huge_fault(struct vm_fault *vmf,
399 struct file *filp = vmf->vma->vm_file; 396 struct file *filp = vmf->vma->vm_file;
400 struct dev_dax *dev_dax = filp->private_data; 397 struct dev_dax *dev_dax = filp->private_data;
401 398
402 dev_dbg(&dev_dax->dev, "%s: %s: %s (%#lx - %#lx) size = %d\n", __func__, 399 dev_dbg(&dev_dax->dev, "%s: %s (%#lx - %#lx) size = %d\n", current->comm,
403 current->comm, (vmf->flags & FAULT_FLAG_WRITE) 400 (vmf->flags & FAULT_FLAG_WRITE) ? "write" : "read",
404 ? "write" : "read",
405 vmf->vma->vm_start, vmf->vma->vm_end, pe_size); 401 vmf->vma->vm_start, vmf->vma->vm_end, pe_size);
406 402
407 id = dax_read_lock(); 403 id = dax_read_lock();
@@ -450,7 +446,7 @@ static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
450 struct dev_dax *dev_dax = filp->private_data; 446 struct dev_dax *dev_dax = filp->private_data;
451 int rc, id; 447 int rc, id;
452 448
453 dev_dbg(&dev_dax->dev, "%s\n", __func__); 449 dev_dbg(&dev_dax->dev, "trace\n");
454 450
455 /* 451 /*
456 * We lock to check dax_dev liveness and will re-check at 452 * We lock to check dax_dev liveness and will re-check at
@@ -508,7 +504,7 @@ static int dax_open(struct inode *inode, struct file *filp)
508 struct inode *__dax_inode = dax_inode(dax_dev); 504 struct inode *__dax_inode = dax_inode(dax_dev);
509 struct dev_dax *dev_dax = dax_get_private(dax_dev); 505 struct dev_dax *dev_dax = dax_get_private(dax_dev);
510 506
511 dev_dbg(&dev_dax->dev, "%s\n", __func__); 507 dev_dbg(&dev_dax->dev, "trace\n");
512 inode->i_mapping = __dax_inode->i_mapping; 508 inode->i_mapping = __dax_inode->i_mapping;
513 inode->i_mapping->host = __dax_inode; 509 inode->i_mapping->host = __dax_inode;
514 filp->f_mapping = inode->i_mapping; 510 filp->f_mapping = inode->i_mapping;
@@ -523,7 +519,7 @@ static int dax_release(struct inode *inode, struct file *filp)
523{ 519{
524 struct dev_dax *dev_dax = filp->private_data; 520 struct dev_dax *dev_dax = filp->private_data;
525 521
526 dev_dbg(&dev_dax->dev, "%s\n", __func__); 522 dev_dbg(&dev_dax->dev, "trace\n");
527 return 0; 523 return 0;
528} 524}
529 525
@@ -565,7 +561,7 @@ static void unregister_dev_dax(void *dev)
565 struct inode *inode = dax_inode(dax_dev); 561 struct inode *inode = dax_inode(dax_dev);
566 struct cdev *cdev = inode->i_cdev; 562 struct cdev *cdev = inode->i_cdev;
567 563
568 dev_dbg(dev, "%s\n", __func__); 564 dev_dbg(dev, "trace\n");
569 565
570 kill_dev_dax(dev_dax); 566 kill_dev_dax(dev_dax);
571 cdev_device_del(cdev, dev); 567 cdev_device_del(cdev, dev);
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
index 31b6ecce4c64..fd49b24fd6af 100644
--- a/drivers/dax/pmem.c
+++ b/drivers/dax/pmem.c
@@ -34,7 +34,7 @@ static void dax_pmem_percpu_release(struct percpu_ref *ref)
34{ 34{
35 struct dax_pmem *dax_pmem = to_dax_pmem(ref); 35 struct dax_pmem *dax_pmem = to_dax_pmem(ref);
36 36
37 dev_dbg(dax_pmem->dev, "%s\n", __func__); 37 dev_dbg(dax_pmem->dev, "trace\n");
38 complete(&dax_pmem->cmp); 38 complete(&dax_pmem->cmp);
39} 39}
40 40
@@ -43,7 +43,7 @@ static void dax_pmem_percpu_exit(void *data)
43 struct percpu_ref *ref = data; 43 struct percpu_ref *ref = data;
44 struct dax_pmem *dax_pmem = to_dax_pmem(ref); 44 struct dax_pmem *dax_pmem = to_dax_pmem(ref);
45 45
46 dev_dbg(dax_pmem->dev, "%s\n", __func__); 46 dev_dbg(dax_pmem->dev, "trace\n");
47 wait_for_completion(&dax_pmem->cmp); 47 wait_for_completion(&dax_pmem->cmp);
48 percpu_ref_exit(ref); 48 percpu_ref_exit(ref);
49} 49}
@@ -53,7 +53,7 @@ static void dax_pmem_percpu_kill(void *data)
53 struct percpu_ref *ref = data; 53 struct percpu_ref *ref = data;
54 struct dax_pmem *dax_pmem = to_dax_pmem(ref); 54 struct dax_pmem *dax_pmem = to_dax_pmem(ref);
55 55
56 dev_dbg(dax_pmem->dev, "%s\n", __func__); 56 dev_dbg(dax_pmem->dev, "trace\n");
57 percpu_ref_kill(ref); 57 percpu_ref_kill(ref);
58} 58}
59 59
@@ -150,17 +150,7 @@ static struct nd_device_driver dax_pmem_driver = {
150 .type = ND_DRIVER_DAX_PMEM, 150 .type = ND_DRIVER_DAX_PMEM,
151}; 151};
152 152
153static int __init dax_pmem_init(void) 153module_nd_driver(dax_pmem_driver);
154{
155 return nd_driver_register(&dax_pmem_driver);
156}
157module_init(dax_pmem_init);
158
159static void __exit dax_pmem_exit(void)
160{
161 driver_unregister(&dax_pmem_driver.drv);
162}
163module_exit(dax_pmem_exit);
164 154
165MODULE_LICENSE("GPL v2"); 155MODULE_LICENSE("GPL v2");
166MODULE_AUTHOR("Intel Corporation"); 156MODULE_AUTHOR("Intel Corporation");
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index a65f2e1d9f53..f6c533c4d09b 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -102,4 +102,15 @@ config NVDIMM_DAX
102 102
103 Select Y if unsure 103 Select Y if unsure
104 104
105config OF_PMEM
106 # FIXME: make tristate once OF_NUMA dependency removed
107 bool "Device-tree support for persistent memory regions"
108 depends on OF
109 default LIBNVDIMM
110 help
111 Allows regions of persistent memory to be described in the
112 device-tree.
113
114 Select Y if unsure.
115
105endif 116endif
diff --git a/drivers/nvdimm/Makefile b/drivers/nvdimm/Makefile
index 70d5f3ad9909..e8847045dac0 100644
--- a/drivers/nvdimm/Makefile
+++ b/drivers/nvdimm/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_BLK_DEV_PMEM) += nd_pmem.o
4obj-$(CONFIG_ND_BTT) += nd_btt.o 4obj-$(CONFIG_ND_BTT) += nd_btt.o
5obj-$(CONFIG_ND_BLK) += nd_blk.o 5obj-$(CONFIG_ND_BLK) += nd_blk.o
6obj-$(CONFIG_X86_PMEM_LEGACY) += nd_e820.o 6obj-$(CONFIG_X86_PMEM_LEGACY) += nd_e820.o
7obj-$(CONFIG_OF_PMEM) += of_pmem.o
7 8
8nd_pmem-y := pmem.o 9nd_pmem-y := pmem.o
9 10
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index d58925295aa7..795ad4ff35ca 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -26,7 +26,7 @@ static void nd_btt_release(struct device *dev)
26 struct nd_region *nd_region = to_nd_region(dev->parent); 26 struct nd_region *nd_region = to_nd_region(dev->parent);
27 struct nd_btt *nd_btt = to_nd_btt(dev); 27 struct nd_btt *nd_btt = to_nd_btt(dev);
28 28
29 dev_dbg(dev, "%s\n", __func__); 29 dev_dbg(dev, "trace\n");
30 nd_detach_ndns(&nd_btt->dev, &nd_btt->ndns); 30 nd_detach_ndns(&nd_btt->dev, &nd_btt->ndns);
31 ida_simple_remove(&nd_region->btt_ida, nd_btt->id); 31 ida_simple_remove(&nd_region->btt_ida, nd_btt->id);
32 kfree(nd_btt->uuid); 32 kfree(nd_btt->uuid);
@@ -74,8 +74,8 @@ static ssize_t sector_size_store(struct device *dev,
74 nvdimm_bus_lock(dev); 74 nvdimm_bus_lock(dev);
75 rc = nd_size_select_store(dev, buf, &nd_btt->lbasize, 75 rc = nd_size_select_store(dev, buf, &nd_btt->lbasize,
76 btt_lbasize_supported); 76 btt_lbasize_supported);
77 dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, 77 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
78 rc, buf, buf[len - 1] == '\n' ? "" : "\n"); 78 buf[len - 1] == '\n' ? "" : "\n");
79 nvdimm_bus_unlock(dev); 79 nvdimm_bus_unlock(dev);
80 device_unlock(dev); 80 device_unlock(dev);
81 81
@@ -101,8 +101,8 @@ static ssize_t uuid_store(struct device *dev,
101 101
102 device_lock(dev); 102 device_lock(dev);
103 rc = nd_uuid_store(dev, &nd_btt->uuid, buf, len); 103 rc = nd_uuid_store(dev, &nd_btt->uuid, buf, len);
104 dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, 104 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
105 rc, buf, buf[len - 1] == '\n' ? "" : "\n"); 105 buf[len - 1] == '\n' ? "" : "\n");
106 device_unlock(dev); 106 device_unlock(dev);
107 107
108 return rc ? rc : len; 108 return rc ? rc : len;
@@ -131,8 +131,8 @@ static ssize_t namespace_store(struct device *dev,
131 device_lock(dev); 131 device_lock(dev);
132 nvdimm_bus_lock(dev); 132 nvdimm_bus_lock(dev);
133 rc = nd_namespace_store(dev, &nd_btt->ndns, buf, len); 133 rc = nd_namespace_store(dev, &nd_btt->ndns, buf, len);
134 dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, 134 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
135 rc, buf, buf[len - 1] == '\n' ? "" : "\n"); 135 buf[len - 1] == '\n' ? "" : "\n");
136 nvdimm_bus_unlock(dev); 136 nvdimm_bus_unlock(dev);
137 device_unlock(dev); 137 device_unlock(dev);
138 138
@@ -206,8 +206,8 @@ static struct device *__nd_btt_create(struct nd_region *nd_region,
206 dev->groups = nd_btt_attribute_groups; 206 dev->groups = nd_btt_attribute_groups;
207 device_initialize(&nd_btt->dev); 207 device_initialize(&nd_btt->dev);
208 if (ndns && !__nd_attach_ndns(&nd_btt->dev, ndns, &nd_btt->ndns)) { 208 if (ndns && !__nd_attach_ndns(&nd_btt->dev, ndns, &nd_btt->ndns)) {
209 dev_dbg(&ndns->dev, "%s failed, already claimed by %s\n", 209 dev_dbg(&ndns->dev, "failed, already claimed by %s\n",
210 __func__, dev_name(ndns->claim)); 210 dev_name(ndns->claim));
211 put_device(dev); 211 put_device(dev);
212 return NULL; 212 return NULL;
213 } 213 }
@@ -346,8 +346,7 @@ int nd_btt_probe(struct device *dev, struct nd_namespace_common *ndns)
346 return -ENOMEM; 346 return -ENOMEM;
347 btt_sb = devm_kzalloc(dev, sizeof(*btt_sb), GFP_KERNEL); 347 btt_sb = devm_kzalloc(dev, sizeof(*btt_sb), GFP_KERNEL);
348 rc = __nd_btt_probe(to_nd_btt(btt_dev), ndns, btt_sb); 348 rc = __nd_btt_probe(to_nd_btt(btt_dev), ndns, btt_sb);
349 dev_dbg(dev, "%s: btt: %s\n", __func__, 349 dev_dbg(dev, "btt: %s\n", rc == 0 ? dev_name(btt_dev) : "<none>");
350 rc == 0 ? dev_name(btt_dev) : "<none>");
351 if (rc < 0) { 350 if (rc < 0) {
352 struct nd_btt *nd_btt = to_nd_btt(btt_dev); 351 struct nd_btt *nd_btt = to_nd_btt(btt_dev);
353 352
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 78eabc3a1ab1..a64023690cad 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -358,6 +358,7 @@ struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
358 nvdimm_bus->dev.release = nvdimm_bus_release; 358 nvdimm_bus->dev.release = nvdimm_bus_release;
359 nvdimm_bus->dev.groups = nd_desc->attr_groups; 359 nvdimm_bus->dev.groups = nd_desc->attr_groups;
360 nvdimm_bus->dev.bus = &nvdimm_bus_type; 360 nvdimm_bus->dev.bus = &nvdimm_bus_type;
361 nvdimm_bus->dev.of_node = nd_desc->of_node;
361 dev_set_name(&nvdimm_bus->dev, "ndbus%d", nvdimm_bus->id); 362 dev_set_name(&nvdimm_bus->dev, "ndbus%d", nvdimm_bus->id);
362 rc = device_register(&nvdimm_bus->dev); 363 rc = device_register(&nvdimm_bus->dev);
363 if (rc) { 364 if (rc) {
@@ -984,8 +985,8 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
984 985
985 if (cmd == ND_CMD_CALL) { 986 if (cmd == ND_CMD_CALL) {
986 func = pkg.nd_command; 987 func = pkg.nd_command;
987 dev_dbg(dev, "%s:%s, idx: %llu, in: %u, out: %u, len %llu\n", 988 dev_dbg(dev, "%s, idx: %llu, in: %u, out: %u, len %llu\n",
988 __func__, dimm_name, pkg.nd_command, 989 dimm_name, pkg.nd_command,
989 in_len, out_len, buf_len); 990 in_len, out_len, buf_len);
990 } 991 }
991 992
@@ -996,8 +997,8 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
996 u32 copy; 997 u32 copy;
997 998
998 if (out_size == UINT_MAX) { 999 if (out_size == UINT_MAX) {
999 dev_dbg(dev, "%s:%s unknown output size cmd: %s field: %d\n", 1000 dev_dbg(dev, "%s unknown output size cmd: %s field: %d\n",
1000 __func__, dimm_name, cmd_name, i); 1001 dimm_name, cmd_name, i);
1001 return -EFAULT; 1002 return -EFAULT;
1002 } 1003 }
1003 if (out_len < sizeof(out_env)) 1004 if (out_len < sizeof(out_env))
@@ -1012,9 +1013,8 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
1012 1013
1013 buf_len = (u64) out_len + (u64) in_len; 1014 buf_len = (u64) out_len + (u64) in_len;
1014 if (buf_len > ND_IOCTL_MAX_BUFLEN) { 1015 if (buf_len > ND_IOCTL_MAX_BUFLEN) {
1015 dev_dbg(dev, "%s:%s cmd: %s buf_len: %llu > %d\n", __func__, 1016 dev_dbg(dev, "%s cmd: %s buf_len: %llu > %d\n", dimm_name,
1016 dimm_name, cmd_name, buf_len, 1017 cmd_name, buf_len, ND_IOCTL_MAX_BUFLEN);
1017 ND_IOCTL_MAX_BUFLEN);
1018 return -EINVAL; 1018 return -EINVAL;
1019 } 1019 }
1020 1020
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
index b2fc29b8279b..30852270484f 100644
--- a/drivers/nvdimm/claim.c
+++ b/drivers/nvdimm/claim.c
@@ -148,7 +148,7 @@ ssize_t nd_namespace_store(struct device *dev,
148 char *name; 148 char *name;
149 149
150 if (dev->driver) { 150 if (dev->driver) {
151 dev_dbg(dev, "%s: -EBUSY\n", __func__); 151 dev_dbg(dev, "namespace already active\n");
152 return -EBUSY; 152 return -EBUSY;
153 } 153 }
154 154
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index 1dc527660637..acce050856a8 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -134,7 +134,7 @@ static void nvdimm_map_release(struct kref *kref)
134 nvdimm_map = container_of(kref, struct nvdimm_map, kref); 134 nvdimm_map = container_of(kref, struct nvdimm_map, kref);
135 nvdimm_bus = nvdimm_map->nvdimm_bus; 135 nvdimm_bus = nvdimm_map->nvdimm_bus;
136 136
137 dev_dbg(&nvdimm_bus->dev, "%s: %pa\n", __func__, &nvdimm_map->offset); 137 dev_dbg(&nvdimm_bus->dev, "%pa\n", &nvdimm_map->offset);
138 list_del(&nvdimm_map->list); 138 list_del(&nvdimm_map->list);
139 if (nvdimm_map->flags) 139 if (nvdimm_map->flags)
140 memunmap(nvdimm_map->mem); 140 memunmap(nvdimm_map->mem);
@@ -230,8 +230,8 @@ static int nd_uuid_parse(struct device *dev, u8 *uuid_out, const char *buf,
230 230
231 for (i = 0; i < 16; i++) { 231 for (i = 0; i < 16; i++) {
232 if (!isxdigit(str[0]) || !isxdigit(str[1])) { 232 if (!isxdigit(str[0]) || !isxdigit(str[1])) {
233 dev_dbg(dev, "%s: pos: %d buf[%zd]: %c buf[%zd]: %c\n", 233 dev_dbg(dev, "pos: %d buf[%zd]: %c buf[%zd]: %c\n",
234 __func__, i, str - buf, str[0], 234 i, str - buf, str[0],
235 str + 1 - buf, str[1]); 235 str + 1 - buf, str[1]);
236 return -EINVAL; 236 return -EINVAL;
237 } 237 }
diff --git a/drivers/nvdimm/dax_devs.c b/drivers/nvdimm/dax_devs.c
index 1bf2bd318371..0453f49dc708 100644
--- a/drivers/nvdimm/dax_devs.c
+++ b/drivers/nvdimm/dax_devs.c
@@ -24,7 +24,7 @@ static void nd_dax_release(struct device *dev)
24 struct nd_dax *nd_dax = to_nd_dax(dev); 24 struct nd_dax *nd_dax = to_nd_dax(dev);
25 struct nd_pfn *nd_pfn = &nd_dax->nd_pfn; 25 struct nd_pfn *nd_pfn = &nd_dax->nd_pfn;
26 26
27 dev_dbg(dev, "%s\n", __func__); 27 dev_dbg(dev, "trace\n");
28 nd_detach_ndns(dev, &nd_pfn->ndns); 28 nd_detach_ndns(dev, &nd_pfn->ndns);
29 ida_simple_remove(&nd_region->dax_ida, nd_pfn->id); 29 ida_simple_remove(&nd_region->dax_ida, nd_pfn->id);
30 kfree(nd_pfn->uuid); 30 kfree(nd_pfn->uuid);
@@ -129,8 +129,7 @@ int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns)
129 pfn_sb = devm_kzalloc(dev, sizeof(*pfn_sb), GFP_KERNEL); 129 pfn_sb = devm_kzalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
130 nd_pfn->pfn_sb = pfn_sb; 130 nd_pfn->pfn_sb = pfn_sb;
131 rc = nd_pfn_validate(nd_pfn, DAX_SIG); 131 rc = nd_pfn_validate(nd_pfn, DAX_SIG);
132 dev_dbg(dev, "%s: dax: %s\n", __func__, 132 dev_dbg(dev, "dax: %s\n", rc == 0 ? dev_name(dax_dev) : "<none>");
133 rc == 0 ? dev_name(dax_dev) : "<none>");
134 if (rc < 0) { 133 if (rc < 0) {
135 nd_detach_ndns(dax_dev, &nd_pfn->ndns); 134 nd_detach_ndns(dax_dev, &nd_pfn->ndns);
136 put_device(dax_dev); 135 put_device(dax_dev);
diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c
index f8913b8124b6..233907889f96 100644
--- a/drivers/nvdimm/dimm.c
+++ b/drivers/nvdimm/dimm.c
@@ -67,9 +67,11 @@ static int nvdimm_probe(struct device *dev)
67 ndd->ns_next = nd_label_next_nsindex(ndd->ns_current); 67 ndd->ns_next = nd_label_next_nsindex(ndd->ns_current);
68 nd_label_copy(ndd, to_next_namespace_index(ndd), 68 nd_label_copy(ndd, to_next_namespace_index(ndd),
69 to_current_namespace_index(ndd)); 69 to_current_namespace_index(ndd));
70 rc = nd_label_reserve_dpa(ndd); 70 if (ndd->ns_current >= 0) {
71 if (ndd->ns_current >= 0) 71 rc = nd_label_reserve_dpa(ndd);
72 nvdimm_set_aliasing(dev); 72 if (rc == 0)
73 nvdimm_set_aliasing(dev);
74 }
73 nvdimm_clear_locked(dev); 75 nvdimm_clear_locked(dev);
74 nvdimm_bus_unlock(dev); 76 nvdimm_bus_unlock(dev);
75 77
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index 097794d9f786..e00d45522b80 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -131,7 +131,7 @@ int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
131 } 131 }
132 memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length); 132 memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length);
133 } 133 }
134 dev_dbg(ndd->dev, "%s: len: %zu rc: %d\n", __func__, offset, rc); 134 dev_dbg(ndd->dev, "len: %zu rc: %d\n", offset, rc);
135 kfree(cmd); 135 kfree(cmd);
136 136
137 return rc; 137 return rc;
@@ -266,8 +266,7 @@ void nvdimm_drvdata_release(struct kref *kref)
266 struct device *dev = ndd->dev; 266 struct device *dev = ndd->dev;
267 struct resource *res, *_r; 267 struct resource *res, *_r;
268 268
269 dev_dbg(dev, "%s\n", __func__); 269 dev_dbg(dev, "trace\n");
270
271 nvdimm_bus_lock(dev); 270 nvdimm_bus_lock(dev);
272 for_each_dpa_resource_safe(ndd, res, _r) 271 for_each_dpa_resource_safe(ndd, res, _r)
273 nvdimm_free_dpa(ndd, res); 272 nvdimm_free_dpa(ndd, res);
@@ -660,7 +659,7 @@ int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count)
660 nd_synchronize(); 659 nd_synchronize();
661 660
662 device_for_each_child(&nvdimm_bus->dev, &count, count_dimms); 661 device_for_each_child(&nvdimm_bus->dev, &count, count_dimms);
663 dev_dbg(&nvdimm_bus->dev, "%s: count: %d\n", __func__, count); 662 dev_dbg(&nvdimm_bus->dev, "count: %d\n", count);
664 if (count != dimm_count) 663 if (count != dimm_count)
665 return -ENXIO; 664 return -ENXIO;
666 return 0; 665 return 0;
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index de66c02f6140..1d28cd656536 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -45,9 +45,27 @@ unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd)
45 return ndd->nslabel_size; 45 return ndd->nslabel_size;
46} 46}
47 47
48static size_t __sizeof_namespace_index(u32 nslot)
49{
50 return ALIGN(sizeof(struct nd_namespace_index) + DIV_ROUND_UP(nslot, 8),
51 NSINDEX_ALIGN);
52}
53
54static int __nvdimm_num_label_slots(struct nvdimm_drvdata *ndd,
55 size_t index_size)
56{
57 return (ndd->nsarea.config_size - index_size * 2) /
58 sizeof_namespace_label(ndd);
59}
60
48int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd) 61int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd)
49{ 62{
50 return ndd->nsarea.config_size / (sizeof_namespace_label(ndd) + 1); 63 u32 tmp_nslot, n;
64
65 tmp_nslot = ndd->nsarea.config_size / sizeof_namespace_label(ndd);
66 n = __sizeof_namespace_index(tmp_nslot) / NSINDEX_ALIGN;
67
68 return __nvdimm_num_label_slots(ndd, NSINDEX_ALIGN * n);
51} 69}
52 70
53size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd) 71size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd)
@@ -55,18 +73,14 @@ size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd)
55 u32 nslot, space, size; 73 u32 nslot, space, size;
56 74
57 /* 75 /*
58 * The minimum index space is 512 bytes, with that amount of 76 * Per UEFI 2.7, the minimum size of the Label Storage Area is large
59 * index we can describe ~1400 labels which is less than a byte 77 * enough to hold 2 index blocks and 2 labels. The minimum index
60 * of overhead per label. Round up to a byte of overhead per 78 * block size is 256 bytes, and the minimum label size is 256 bytes.
61 * label and determine the size of the index region. Yes, this
62 * starts to waste space at larger config_sizes, but it's
63 * unlikely we'll ever see anything but 128K.
64 */ 79 */
65 nslot = nvdimm_num_label_slots(ndd); 80 nslot = nvdimm_num_label_slots(ndd);
66 space = ndd->nsarea.config_size - nslot * sizeof_namespace_label(ndd); 81 space = ndd->nsarea.config_size - nslot * sizeof_namespace_label(ndd);
67 size = ALIGN(sizeof(struct nd_namespace_index) + DIV_ROUND_UP(nslot, 8), 82 size = __sizeof_namespace_index(nslot) * 2;
68 NSINDEX_ALIGN) * 2; 83 if (size <= space && nslot >= 2)
69 if (size <= space)
70 return size / 2; 84 return size / 2;
71 85
72 dev_err(ndd->dev, "label area (%d) too small to host (%d byte) labels\n", 86 dev_err(ndd->dev, "label area (%d) too small to host (%d byte) labels\n",
@@ -121,8 +135,7 @@ static int __nd_label_validate(struct nvdimm_drvdata *ndd)
121 135
122 memcpy(sig, nsindex[i]->sig, NSINDEX_SIG_LEN); 136 memcpy(sig, nsindex[i]->sig, NSINDEX_SIG_LEN);
123 if (memcmp(sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN) != 0) { 137 if (memcmp(sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN) != 0) {
124 dev_dbg(dev, "%s: nsindex%d signature invalid\n", 138 dev_dbg(dev, "nsindex%d signature invalid\n", i);
125 __func__, i);
126 continue; 139 continue;
127 } 140 }
128 141
@@ -135,8 +148,8 @@ static int __nd_label_validate(struct nvdimm_drvdata *ndd)
135 labelsize = 128; 148 labelsize = 128;
136 149
137 if (labelsize != sizeof_namespace_label(ndd)) { 150 if (labelsize != sizeof_namespace_label(ndd)) {
138 dev_dbg(dev, "%s: nsindex%d labelsize %d invalid\n", 151 dev_dbg(dev, "nsindex%d labelsize %d invalid\n",
139 __func__, i, nsindex[i]->labelsize); 152 i, nsindex[i]->labelsize);
140 continue; 153 continue;
141 } 154 }
142 155
@@ -145,30 +158,28 @@ static int __nd_label_validate(struct nvdimm_drvdata *ndd)
145 sum = nd_fletcher64(nsindex[i], sizeof_namespace_index(ndd), 1); 158 sum = nd_fletcher64(nsindex[i], sizeof_namespace_index(ndd), 1);
146 nsindex[i]->checksum = __cpu_to_le64(sum_save); 159 nsindex[i]->checksum = __cpu_to_le64(sum_save);
147 if (sum != sum_save) { 160 if (sum != sum_save) {
148 dev_dbg(dev, "%s: nsindex%d checksum invalid\n", 161 dev_dbg(dev, "nsindex%d checksum invalid\n", i);
149 __func__, i);
150 continue; 162 continue;
151 } 163 }
152 164
153 seq = __le32_to_cpu(nsindex[i]->seq); 165 seq = __le32_to_cpu(nsindex[i]->seq);
154 if ((seq & NSINDEX_SEQ_MASK) == 0) { 166 if ((seq & NSINDEX_SEQ_MASK) == 0) {
155 dev_dbg(dev, "%s: nsindex%d sequence: %#x invalid\n", 167 dev_dbg(dev, "nsindex%d sequence: %#x invalid\n", i, seq);
156 __func__, i, seq);
157 continue; 168 continue;
158 } 169 }
159 170
160 /* sanity check the index against expected values */ 171 /* sanity check the index against expected values */
161 if (__le64_to_cpu(nsindex[i]->myoff) 172 if (__le64_to_cpu(nsindex[i]->myoff)
162 != i * sizeof_namespace_index(ndd)) { 173 != i * sizeof_namespace_index(ndd)) {
163 dev_dbg(dev, "%s: nsindex%d myoff: %#llx invalid\n", 174 dev_dbg(dev, "nsindex%d myoff: %#llx invalid\n",
164 __func__, i, (unsigned long long) 175 i, (unsigned long long)
165 __le64_to_cpu(nsindex[i]->myoff)); 176 __le64_to_cpu(nsindex[i]->myoff));
166 continue; 177 continue;
167 } 178 }
168 if (__le64_to_cpu(nsindex[i]->otheroff) 179 if (__le64_to_cpu(nsindex[i]->otheroff)
169 != (!i) * sizeof_namespace_index(ndd)) { 180 != (!i) * sizeof_namespace_index(ndd)) {
170 dev_dbg(dev, "%s: nsindex%d otheroff: %#llx invalid\n", 181 dev_dbg(dev, "nsindex%d otheroff: %#llx invalid\n",
171 __func__, i, (unsigned long long) 182 i, (unsigned long long)
172 __le64_to_cpu(nsindex[i]->otheroff)); 183 __le64_to_cpu(nsindex[i]->otheroff));
173 continue; 184 continue;
174 } 185 }
@@ -176,8 +187,7 @@ static int __nd_label_validate(struct nvdimm_drvdata *ndd)
176 size = __le64_to_cpu(nsindex[i]->mysize); 187 size = __le64_to_cpu(nsindex[i]->mysize);
177 if (size > sizeof_namespace_index(ndd) 188 if (size > sizeof_namespace_index(ndd)
178 || size < sizeof(struct nd_namespace_index)) { 189 || size < sizeof(struct nd_namespace_index)) {
179 dev_dbg(dev, "%s: nsindex%d mysize: %#llx invalid\n", 190 dev_dbg(dev, "nsindex%d mysize: %#llx invalid\n", i, size);
180 __func__, i, size);
181 continue; 191 continue;
182 } 192 }
183 193
@@ -185,9 +195,8 @@ static int __nd_label_validate(struct nvdimm_drvdata *ndd)
185 if (nslot * sizeof_namespace_label(ndd) 195 if (nslot * sizeof_namespace_label(ndd)
186 + 2 * sizeof_namespace_index(ndd) 196 + 2 * sizeof_namespace_index(ndd)
187 > ndd->nsarea.config_size) { 197 > ndd->nsarea.config_size) {
188 dev_dbg(dev, "%s: nsindex%d nslot: %u invalid, config_size: %#x\n", 198 dev_dbg(dev, "nsindex%d nslot: %u invalid, config_size: %#x\n",
189 __func__, i, nslot, 199 i, nslot, ndd->nsarea.config_size);
190 ndd->nsarea.config_size);
191 continue; 200 continue;
192 } 201 }
193 valid[i] = true; 202 valid[i] = true;
@@ -356,8 +365,8 @@ static bool slot_valid(struct nvdimm_drvdata *ndd,
356 sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1); 365 sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
357 nd_label->checksum = __cpu_to_le64(sum_save); 366 nd_label->checksum = __cpu_to_le64(sum_save);
358 if (sum != sum_save) { 367 if (sum != sum_save) {
359 dev_dbg(ndd->dev, "%s fail checksum. slot: %d expect: %#llx\n", 368 dev_dbg(ndd->dev, "fail checksum. slot: %d expect: %#llx\n",
360 __func__, slot, sum); 369 slot, sum);
361 return false; 370 return false;
362 } 371 }
363 } 372 }
@@ -422,8 +431,8 @@ int nd_label_active_count(struct nvdimm_drvdata *ndd)
422 u64 dpa = __le64_to_cpu(nd_label->dpa); 431 u64 dpa = __le64_to_cpu(nd_label->dpa);
423 432
424 dev_dbg(ndd->dev, 433 dev_dbg(ndd->dev,
425 "%s: slot%d invalid slot: %d dpa: %llx size: %llx\n", 434 "slot%d invalid slot: %d dpa: %llx size: %llx\n",
426 __func__, slot, label_slot, dpa, size); 435 slot, label_slot, dpa, size);
427 continue; 436 continue;
428 } 437 }
429 count++; 438 count++;
@@ -650,7 +659,7 @@ static int __pmem_label_update(struct nd_region *nd_region,
650 slot = nd_label_alloc_slot(ndd); 659 slot = nd_label_alloc_slot(ndd);
651 if (slot == UINT_MAX) 660 if (slot == UINT_MAX)
652 return -ENXIO; 661 return -ENXIO;
653 dev_dbg(ndd->dev, "%s: allocated: %d\n", __func__, slot); 662 dev_dbg(ndd->dev, "allocated: %d\n", slot);
654 663
655 nd_label = to_label(ndd, slot); 664 nd_label = to_label(ndd, slot);
656 memset(nd_label, 0, sizeof_namespace_label(ndd)); 665 memset(nd_label, 0, sizeof_namespace_label(ndd));
@@ -678,7 +687,7 @@ static int __pmem_label_update(struct nd_region *nd_region,
678 sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1); 687 sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
679 nd_label->checksum = __cpu_to_le64(sum); 688 nd_label->checksum = __cpu_to_le64(sum);
680 } 689 }
681 nd_dbg_dpa(nd_region, ndd, res, "%s\n", __func__); 690 nd_dbg_dpa(nd_region, ndd, res, "\n");
682 691
683 /* update label */ 692 /* update label */
684 offset = nd_label_offset(ndd, nd_label); 693 offset = nd_label_offset(ndd, nd_label);
@@ -700,7 +709,7 @@ static int __pmem_label_update(struct nd_region *nd_region,
700 break; 709 break;
701 } 710 }
702 if (victim) { 711 if (victim) {
703 dev_dbg(ndd->dev, "%s: free: %d\n", __func__, slot); 712 dev_dbg(ndd->dev, "free: %d\n", slot);
704 slot = to_slot(ndd, victim->label); 713 slot = to_slot(ndd, victim->label);
705 nd_label_free_slot(ndd, slot); 714 nd_label_free_slot(ndd, slot);
706 victim->label = NULL; 715 victim->label = NULL;
@@ -868,7 +877,7 @@ static int __blk_label_update(struct nd_region *nd_region,
868 slot = nd_label_alloc_slot(ndd); 877 slot = nd_label_alloc_slot(ndd);
869 if (slot == UINT_MAX) 878 if (slot == UINT_MAX)
870 goto abort; 879 goto abort;
871 dev_dbg(ndd->dev, "%s: allocated: %d\n", __func__, slot); 880 dev_dbg(ndd->dev, "allocated: %d\n", slot);
872 881
873 nd_label = to_label(ndd, slot); 882 nd_label = to_label(ndd, slot);
874 memset(nd_label, 0, sizeof_namespace_label(ndd)); 883 memset(nd_label, 0, sizeof_namespace_label(ndd));
@@ -928,7 +937,7 @@ static int __blk_label_update(struct nd_region *nd_region,
928 937
929 /* free up now unused slots in the new index */ 938 /* free up now unused slots in the new index */
930 for_each_set_bit(slot, victim_map, victim_map ? nslot : 0) { 939 for_each_set_bit(slot, victim_map, victim_map ? nslot : 0) {
931 dev_dbg(ndd->dev, "%s: free: %d\n", __func__, slot); 940 dev_dbg(ndd->dev, "free: %d\n", slot);
932 nd_label_free_slot(ndd, slot); 941 nd_label_free_slot(ndd, slot);
933 } 942 }
934 943
@@ -1092,7 +1101,7 @@ static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid)
1092 active--; 1101 active--;
1093 slot = to_slot(ndd, nd_label); 1102 slot = to_slot(ndd, nd_label);
1094 nd_label_free_slot(ndd, slot); 1103 nd_label_free_slot(ndd, slot);
1095 dev_dbg(ndd->dev, "%s: free: %d\n", __func__, slot); 1104 dev_dbg(ndd->dev, "free: %d\n", slot);
1096 list_move_tail(&label_ent->list, &list); 1105 list_move_tail(&label_ent->list, &list);
1097 label_ent->label = NULL; 1106 label_ent->label = NULL;
1098 } 1107 }
@@ -1100,7 +1109,7 @@ static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid)
1100 1109
1101 if (active == 0) { 1110 if (active == 0) {
1102 nd_mapping_free_labels(nd_mapping); 1111 nd_mapping_free_labels(nd_mapping);
1103 dev_dbg(ndd->dev, "%s: no more active labels\n", __func__); 1112 dev_dbg(ndd->dev, "no more active labels\n");
1104 } 1113 }
1105 mutex_unlock(&nd_mapping->lock); 1114 mutex_unlock(&nd_mapping->lock);
1106 1115
diff --git a/drivers/nvdimm/label.h b/drivers/nvdimm/label.h
index 1ebf4d3d01ba..18bbe183b3a9 100644
--- a/drivers/nvdimm/label.h
+++ b/drivers/nvdimm/label.h
@@ -33,7 +33,7 @@ enum {
33 BTTINFO_UUID_LEN = 16, 33 BTTINFO_UUID_LEN = 16,
34 BTTINFO_FLAG_ERROR = 0x1, /* error state (read-only) */ 34 BTTINFO_FLAG_ERROR = 0x1, /* error state (read-only) */
35 BTTINFO_MAJOR_VERSION = 1, 35 BTTINFO_MAJOR_VERSION = 1,
36 ND_LABEL_MIN_SIZE = 512 * 129, /* see sizeof_namespace_index() */ 36 ND_LABEL_MIN_SIZE = 256 * 4, /* see sizeof_namespace_index() */
37 ND_LABEL_ID_SIZE = 50, 37 ND_LABEL_ID_SIZE = 50,
38 ND_NSINDEX_INIT = 0x1, 38 ND_NSINDEX_INIT = 0x1,
39}; 39};
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 658ada497be0..28afdd668905 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -421,7 +421,7 @@ static ssize_t alt_name_store(struct device *dev,
421 rc = __alt_name_store(dev, buf, len); 421 rc = __alt_name_store(dev, buf, len);
422 if (rc >= 0) 422 if (rc >= 0)
423 rc = nd_namespace_label_update(nd_region, dev); 423 rc = nd_namespace_label_update(nd_region, dev);
424 dev_dbg(dev, "%s: %s(%zd)\n", __func__, rc < 0 ? "fail " : "", rc); 424 dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc);
425 nvdimm_bus_unlock(dev); 425 nvdimm_bus_unlock(dev);
426 device_unlock(dev); 426 device_unlock(dev);
427 427
@@ -1007,7 +1007,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
1007 if (uuid_not_set(uuid, dev, __func__)) 1007 if (uuid_not_set(uuid, dev, __func__))
1008 return -ENXIO; 1008 return -ENXIO;
1009 if (nd_region->ndr_mappings == 0) { 1009 if (nd_region->ndr_mappings == 0) {
1010 dev_dbg(dev, "%s: not associated with dimm(s)\n", __func__); 1010 dev_dbg(dev, "not associated with dimm(s)\n");
1011 return -ENXIO; 1011 return -ENXIO;
1012 } 1012 }
1013 1013
@@ -1105,8 +1105,7 @@ static ssize_t size_store(struct device *dev,
1105 *uuid = NULL; 1105 *uuid = NULL;
1106 } 1106 }
1107 1107
1108 dev_dbg(dev, "%s: %llx %s (%d)\n", __func__, val, rc < 0 1108 dev_dbg(dev, "%llx %s (%d)\n", val, rc < 0 ? "fail" : "success", rc);
1109 ? "fail" : "success", rc);
1110 1109
1111 nvdimm_bus_unlock(dev); 1110 nvdimm_bus_unlock(dev);
1112 device_unlock(dev); 1111 device_unlock(dev);
@@ -1270,8 +1269,8 @@ static ssize_t uuid_store(struct device *dev,
1270 rc = nd_namespace_label_update(nd_region, dev); 1269 rc = nd_namespace_label_update(nd_region, dev);
1271 else 1270 else
1272 kfree(uuid); 1271 kfree(uuid);
1273 dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, 1272 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
1274 rc, buf, buf[len - 1] == '\n' ? "" : "\n"); 1273 buf[len - 1] == '\n' ? "" : "\n");
1275 nvdimm_bus_unlock(dev); 1274 nvdimm_bus_unlock(dev);
1276 device_unlock(dev); 1275 device_unlock(dev);
1277 1276
@@ -1355,9 +1354,8 @@ static ssize_t sector_size_store(struct device *dev,
1355 rc = nd_size_select_store(dev, buf, lbasize, supported); 1354 rc = nd_size_select_store(dev, buf, lbasize, supported);
1356 if (rc >= 0) 1355 if (rc >= 0)
1357 rc = nd_namespace_label_update(nd_region, dev); 1356 rc = nd_namespace_label_update(nd_region, dev);
1358 dev_dbg(dev, "%s: result: %zd %s: %s%s", __func__, 1357 dev_dbg(dev, "result: %zd %s: %s%s", rc, rc < 0 ? "tried" : "wrote",
1359 rc, rc < 0 ? "tried" : "wrote", buf, 1358 buf, buf[len - 1] == '\n' ? "" : "\n");
1360 buf[len - 1] == '\n' ? "" : "\n");
1361 nvdimm_bus_unlock(dev); 1359 nvdimm_bus_unlock(dev);
1362 device_unlock(dev); 1360 device_unlock(dev);
1363 1361
@@ -1519,7 +1517,7 @@ static ssize_t holder_class_store(struct device *dev,
1519 rc = __holder_class_store(dev, buf); 1517 rc = __holder_class_store(dev, buf);
1520 if (rc >= 0) 1518 if (rc >= 0)
1521 rc = nd_namespace_label_update(nd_region, dev); 1519 rc = nd_namespace_label_update(nd_region, dev);
1522 dev_dbg(dev, "%s: %s(%zd)\n", __func__, rc < 0 ? "fail " : "", rc); 1520 dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc);
1523 nvdimm_bus_unlock(dev); 1521 nvdimm_bus_unlock(dev);
1524 device_unlock(dev); 1522 device_unlock(dev);
1525 1523
@@ -1717,8 +1715,7 @@ struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
1717 if (uuid_not_set(nsblk->uuid, &ndns->dev, __func__)) 1715 if (uuid_not_set(nsblk->uuid, &ndns->dev, __func__))
1718 return ERR_PTR(-ENODEV); 1716 return ERR_PTR(-ENODEV);
1719 if (!nsblk->lbasize) { 1717 if (!nsblk->lbasize) {
1720 dev_dbg(&ndns->dev, "%s: sector size not set\n", 1718 dev_dbg(&ndns->dev, "sector size not set\n");
1721 __func__);
1722 return ERR_PTR(-ENODEV); 1719 return ERR_PTR(-ENODEV);
1723 } 1720 }
1724 if (!nd_namespace_blk_validate(nsblk)) 1721 if (!nd_namespace_blk_validate(nsblk))
@@ -1798,9 +1795,7 @@ static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid,
1798 } 1795 }
1799 1796
1800 if (found_uuid) { 1797 if (found_uuid) {
1801 dev_dbg(ndd->dev, 1798 dev_dbg(ndd->dev, "duplicate entry for uuid\n");
1802 "%s duplicate entry for uuid\n",
1803 __func__);
1804 return false; 1799 return false;
1805 } 1800 }
1806 found_uuid = true; 1801 found_uuid = true;
@@ -1926,7 +1921,7 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
1926 } 1921 }
1927 1922
1928 if (i < nd_region->ndr_mappings) { 1923 if (i < nd_region->ndr_mappings) {
1929 struct nvdimm_drvdata *ndd = to_ndd(&nd_region->mapping[i]); 1924 struct nvdimm *nvdimm = nd_region->mapping[i].nvdimm;
1930 1925
1931 /* 1926 /*
1932 * Give up if we don't find an instance of a uuid at each 1927 * Give up if we don't find an instance of a uuid at each
@@ -1934,7 +1929,7 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
1934 * find a dimm with two instances of the same uuid. 1929 * find a dimm with two instances of the same uuid.
1935 */ 1930 */
1936 dev_err(&nd_region->dev, "%s missing label for %pUb\n", 1931 dev_err(&nd_region->dev, "%s missing label for %pUb\n",
1937 dev_name(ndd->dev), nd_label->uuid); 1932 nvdimm_name(nvdimm), nd_label->uuid);
1938 rc = -EINVAL; 1933 rc = -EINVAL;
1939 goto err; 1934 goto err;
1940 } 1935 }
@@ -1994,14 +1989,13 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
1994 namespace_pmem_release(dev); 1989 namespace_pmem_release(dev);
1995 switch (rc) { 1990 switch (rc) {
1996 case -EINVAL: 1991 case -EINVAL:
1997 dev_dbg(&nd_region->dev, "%s: invalid label(s)\n", __func__); 1992 dev_dbg(&nd_region->dev, "invalid label(s)\n");
1998 break; 1993 break;
1999 case -ENODEV: 1994 case -ENODEV:
2000 dev_dbg(&nd_region->dev, "%s: label not found\n", __func__); 1995 dev_dbg(&nd_region->dev, "label not found\n");
2001 break; 1996 break;
2002 default: 1997 default:
2003 dev_dbg(&nd_region->dev, "%s: unexpected err: %d\n", 1998 dev_dbg(&nd_region->dev, "unexpected err: %d\n", rc);
2004 __func__, rc);
2005 break; 1999 break;
2006 } 2000 }
2007 return ERR_PTR(rc); 2001 return ERR_PTR(rc);
@@ -2334,8 +2328,8 @@ static struct device **scan_labels(struct nd_region *nd_region)
2334 2328
2335 } 2329 }
2336 2330
2337 dev_dbg(&nd_region->dev, "%s: discovered %d %s namespace%s\n", 2331 dev_dbg(&nd_region->dev, "discovered %d %s namespace%s\n",
2338 __func__, count, is_nd_blk(&nd_region->dev) 2332 count, is_nd_blk(&nd_region->dev)
2339 ? "blk" : "pmem", count == 1 ? "" : "s"); 2333 ? "blk" : "pmem", count == 1 ? "" : "s");
2340 2334
2341 if (count == 0) { 2335 if (count == 0) {
@@ -2467,7 +2461,7 @@ static int init_active_labels(struct nd_region *nd_region)
2467 get_ndd(ndd); 2461 get_ndd(ndd);
2468 2462
2469 count = nd_label_active_count(ndd); 2463 count = nd_label_active_count(ndd);
2470 dev_dbg(ndd->dev, "%s: %d\n", __func__, count); 2464 dev_dbg(ndd->dev, "count: %d\n", count);
2471 if (!count) 2465 if (!count)
2472 continue; 2466 continue;
2473 for (j = 0; j < count; j++) { 2467 for (j = 0; j < count; j++) {
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 8d6375ee0fda..9dad5d737309 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -341,7 +341,6 @@ static inline struct device *nd_dax_create(struct nd_region *nd_region)
341} 341}
342#endif 342#endif
343 343
344struct nd_region *to_nd_region(struct device *dev);
345int nd_region_to_nstype(struct nd_region *nd_region); 344int nd_region_to_nstype(struct nd_region *nd_region);
346int nd_region_register_namespaces(struct nd_region *nd_region, int *err); 345int nd_region_register_namespaces(struct nd_region *nd_region, int *err);
347u64 nd_region_interleave_set_cookie(struct nd_region *nd_region, 346u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
diff --git a/drivers/nvdimm/of_pmem.c b/drivers/nvdimm/of_pmem.c
new file mode 100644
index 000000000000..85013bad35de
--- /dev/null
+++ b/drivers/nvdimm/of_pmem.c
@@ -0,0 +1,119 @@
1// SPDX-License-Identifier: GPL-2.0+
2
3#define pr_fmt(fmt) "of_pmem: " fmt
4
5#include <linux/of_platform.h>
6#include <linux/of_address.h>
7#include <linux/libnvdimm.h>
8#include <linux/module.h>
9#include <linux/ioport.h>
10#include <linux/slab.h>
11
12static const struct attribute_group *region_attr_groups[] = {
13 &nd_region_attribute_group,
14 &nd_device_attribute_group,
15 NULL,
16};
17
18static const struct attribute_group *bus_attr_groups[] = {
19 &nvdimm_bus_attribute_group,
20 NULL,
21};
22
23struct of_pmem_private {
24 struct nvdimm_bus_descriptor bus_desc;
25 struct nvdimm_bus *bus;
26};
27
28static int of_pmem_region_probe(struct platform_device *pdev)
29{
30 struct of_pmem_private *priv;
31 struct device_node *np;
32 struct nvdimm_bus *bus;
33 bool is_volatile;
34 int i;
35
36 np = dev_of_node(&pdev->dev);
37 if (!np)
38 return -ENXIO;
39
40 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
41 if (!priv)
42 return -ENOMEM;
43
44 priv->bus_desc.attr_groups = bus_attr_groups;
45 priv->bus_desc.provider_name = "of_pmem";
46 priv->bus_desc.module = THIS_MODULE;
47 priv->bus_desc.of_node = np;
48
49 priv->bus = bus = nvdimm_bus_register(&pdev->dev, &priv->bus_desc);
50 if (!bus) {
51 kfree(priv);
52 return -ENODEV;
53 }
54 platform_set_drvdata(pdev, priv);
55
56 is_volatile = !!of_find_property(np, "volatile", NULL);
57 dev_dbg(&pdev->dev, "Registering %s regions from %pOF\n",
58 is_volatile ? "volatile" : "non-volatile", np);
59
60 for (i = 0; i < pdev->num_resources; i++) {
61 struct nd_region_desc ndr_desc;
62 struct nd_region *region;
63
64 /*
65 * NB: libnvdimm copies the data from ndr_desc into it's own
66 * structures so passing a stack pointer is fine.
67 */
68 memset(&ndr_desc, 0, sizeof(ndr_desc));
69 ndr_desc.attr_groups = region_attr_groups;
70 ndr_desc.numa_node = of_node_to_nid(np);
71 ndr_desc.res = &pdev->resource[i];
72 ndr_desc.of_node = np;
73 set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
74
75 if (is_volatile)
76 region = nvdimm_volatile_region_create(bus, &ndr_desc);
77 else
78 region = nvdimm_pmem_region_create(bus, &ndr_desc);
79
80 if (!region)
81 dev_warn(&pdev->dev, "Unable to register region %pR from %pOF\n",
82 ndr_desc.res, np);
83 else
84 dev_dbg(&pdev->dev, "Registered region %pR from %pOF\n",
85 ndr_desc.res, np);
86 }
87
88 return 0;
89}
90
91static int of_pmem_region_remove(struct platform_device *pdev)
92{
93 struct of_pmem_private *priv = platform_get_drvdata(pdev);
94
95 nvdimm_bus_unregister(priv->bus);
96 kfree(priv);
97
98 return 0;
99}
100
101static const struct of_device_id of_pmem_region_match[] = {
102 { .compatible = "pmem-region" },
103 { },
104};
105
106static struct platform_driver of_pmem_region_driver = {
107 .probe = of_pmem_region_probe,
108 .remove = of_pmem_region_remove,
109 .driver = {
110 .name = "of_pmem",
111 .owner = THIS_MODULE,
112 .of_match_table = of_pmem_region_match,
113 },
114};
115
116module_platform_driver(of_pmem_region_driver);
117MODULE_DEVICE_TABLE(of, of_pmem_region_match);
118MODULE_LICENSE("GPL");
119MODULE_AUTHOR("IBM Corporation");
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 2f4d18752c97..30b08791597d 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -27,7 +27,7 @@ static void nd_pfn_release(struct device *dev)
27 struct nd_region *nd_region = to_nd_region(dev->parent); 27 struct nd_region *nd_region = to_nd_region(dev->parent);
28 struct nd_pfn *nd_pfn = to_nd_pfn(dev); 28 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
29 29
30 dev_dbg(dev, "%s\n", __func__); 30 dev_dbg(dev, "trace\n");
31 nd_detach_ndns(&nd_pfn->dev, &nd_pfn->ndns); 31 nd_detach_ndns(&nd_pfn->dev, &nd_pfn->ndns);
32 ida_simple_remove(&nd_region->pfn_ida, nd_pfn->id); 32 ida_simple_remove(&nd_region->pfn_ida, nd_pfn->id);
33 kfree(nd_pfn->uuid); 33 kfree(nd_pfn->uuid);
@@ -94,8 +94,8 @@ static ssize_t mode_store(struct device *dev,
94 else 94 else
95 rc = -EINVAL; 95 rc = -EINVAL;
96 } 96 }
97 dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, 97 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
98 rc, buf, buf[len - 1] == '\n' ? "" : "\n"); 98 buf[len - 1] == '\n' ? "" : "\n");
99 nvdimm_bus_unlock(dev); 99 nvdimm_bus_unlock(dev);
100 device_unlock(dev); 100 device_unlock(dev);
101 101
@@ -144,8 +144,8 @@ static ssize_t align_store(struct device *dev,
144 nvdimm_bus_lock(dev); 144 nvdimm_bus_lock(dev);
145 rc = nd_size_select_store(dev, buf, &nd_pfn->align, 145 rc = nd_size_select_store(dev, buf, &nd_pfn->align,
146 nd_pfn_supported_alignments()); 146 nd_pfn_supported_alignments());
147 dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, 147 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
148 rc, buf, buf[len - 1] == '\n' ? "" : "\n"); 148 buf[len - 1] == '\n' ? "" : "\n");
149 nvdimm_bus_unlock(dev); 149 nvdimm_bus_unlock(dev);
150 device_unlock(dev); 150 device_unlock(dev);
151 151
@@ -171,8 +171,8 @@ static ssize_t uuid_store(struct device *dev,
171 171
172 device_lock(dev); 172 device_lock(dev);
173 rc = nd_uuid_store(dev, &nd_pfn->uuid, buf, len); 173 rc = nd_uuid_store(dev, &nd_pfn->uuid, buf, len);
174 dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, 174 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
175 rc, buf, buf[len - 1] == '\n' ? "" : "\n"); 175 buf[len - 1] == '\n' ? "" : "\n");
176 device_unlock(dev); 176 device_unlock(dev);
177 177
178 return rc ? rc : len; 178 return rc ? rc : len;
@@ -201,8 +201,8 @@ static ssize_t namespace_store(struct device *dev,
201 device_lock(dev); 201 device_lock(dev);
202 nvdimm_bus_lock(dev); 202 nvdimm_bus_lock(dev);
203 rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len); 203 rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len);
204 dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, 204 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
205 rc, buf, buf[len - 1] == '\n' ? "" : "\n"); 205 buf[len - 1] == '\n' ? "" : "\n");
206 nvdimm_bus_unlock(dev); 206 nvdimm_bus_unlock(dev);
207 device_unlock(dev); 207 device_unlock(dev);
208 208
@@ -314,8 +314,8 @@ struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
314 dev = &nd_pfn->dev; 314 dev = &nd_pfn->dev;
315 device_initialize(&nd_pfn->dev); 315 device_initialize(&nd_pfn->dev);
316 if (ndns && !__nd_attach_ndns(&nd_pfn->dev, ndns, &nd_pfn->ndns)) { 316 if (ndns && !__nd_attach_ndns(&nd_pfn->dev, ndns, &nd_pfn->ndns)) {
317 dev_dbg(&ndns->dev, "%s failed, already claimed by %s\n", 317 dev_dbg(&ndns->dev, "failed, already claimed by %s\n",
318 __func__, dev_name(ndns->claim)); 318 dev_name(ndns->claim));
319 put_device(dev); 319 put_device(dev);
320 return NULL; 320 return NULL;
321 } 321 }
@@ -510,8 +510,7 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
510 nd_pfn = to_nd_pfn(pfn_dev); 510 nd_pfn = to_nd_pfn(pfn_dev);
511 nd_pfn->pfn_sb = pfn_sb; 511 nd_pfn->pfn_sb = pfn_sb;
512 rc = nd_pfn_validate(nd_pfn, PFN_SIG); 512 rc = nd_pfn_validate(nd_pfn, PFN_SIG);
513 dev_dbg(dev, "%s: pfn: %s\n", __func__, 513 dev_dbg(dev, "pfn: %s\n", rc == 0 ? dev_name(pfn_dev) : "<none>");
514 rc == 0 ? dev_name(pfn_dev) : "<none>");
515 if (rc < 0) { 514 if (rc < 0) {
516 nd_detach_ndns(pfn_dev, &nd_pfn->ndns); 515 nd_detach_ndns(pfn_dev, &nd_pfn->ndns);
517 put_device(pfn_dev); 516 put_device(pfn_dev);
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 06f8dcc52ca6..85dfb3dc3981 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -66,7 +66,7 @@ static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
66 rc = BLK_STS_IOERR; 66 rc = BLK_STS_IOERR;
67 if (cleared > 0 && cleared / 512) { 67 if (cleared > 0 && cleared / 512) {
68 cleared /= 512; 68 cleared /= 512;
69 dev_dbg(dev, "%s: %#llx clear %ld sector%s\n", __func__, 69 dev_dbg(dev, "%#llx clear %ld sector%s\n",
70 (unsigned long long) sector, cleared, 70 (unsigned long long) sector, cleared,
71 cleared > 1 ? "s" : ""); 71 cleared > 1 ? "s" : "");
72 badblocks_clear(&pmem->bb, sector, cleared); 72 badblocks_clear(&pmem->bb, sector, cleared);
@@ -547,17 +547,7 @@ static struct nd_device_driver nd_pmem_driver = {
547 .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM, 547 .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM,
548}; 548};
549 549
550static int __init pmem_init(void) 550module_nd_driver(nd_pmem_driver);
551{
552 return nd_driver_register(&nd_pmem_driver);
553}
554module_init(pmem_init);
555
556static void pmem_exit(void)
557{
558 driver_unregister(&nd_pmem_driver.drv);
559}
560module_exit(pmem_exit);
561 551
562MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>"); 552MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
563MODULE_LICENSE("GPL v2"); 553MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c
index 034f0a07d627..b9ca0033cc99 100644
--- a/drivers/nvdimm/region.c
+++ b/drivers/nvdimm/region.c
@@ -27,10 +27,10 @@ static int nd_region_probe(struct device *dev)
27 if (nd_region->num_lanes > num_online_cpus() 27 if (nd_region->num_lanes > num_online_cpus()
28 && nd_region->num_lanes < num_possible_cpus() 28 && nd_region->num_lanes < num_possible_cpus()
29 && !test_and_set_bit(0, &once)) { 29 && !test_and_set_bit(0, &once)) {
30 dev_info(dev, "online cpus (%d) < concurrent i/o lanes (%d) < possible cpus (%d)\n", 30 dev_dbg(dev, "online cpus (%d) < concurrent i/o lanes (%d) < possible cpus (%d)\n",
31 num_online_cpus(), nd_region->num_lanes, 31 num_online_cpus(), nd_region->num_lanes,
32 num_possible_cpus()); 32 num_possible_cpus());
33 dev_info(dev, "setting nr_cpus=%d may yield better libnvdimm device performance\n", 33 dev_dbg(dev, "setting nr_cpus=%d may yield better libnvdimm device performance\n",
34 nd_region->num_lanes); 34 nd_region->num_lanes);
35 } 35 }
36 36
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index 1593e1806b16..a612be6f019d 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -182,6 +182,14 @@ struct nd_region *to_nd_region(struct device *dev)
182} 182}
183EXPORT_SYMBOL_GPL(to_nd_region); 183EXPORT_SYMBOL_GPL(to_nd_region);
184 184
185struct device *nd_region_dev(struct nd_region *nd_region)
186{
187 if (!nd_region)
188 return NULL;
189 return &nd_region->dev;
190}
191EXPORT_SYMBOL_GPL(nd_region_dev);
192
185struct nd_blk_region *to_nd_blk_region(struct device *dev) 193struct nd_blk_region *to_nd_blk_region(struct device *dev)
186{ 194{
187 struct nd_region *nd_region = to_nd_region(dev); 195 struct nd_region *nd_region = to_nd_region(dev);
@@ -1014,6 +1022,7 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
1014 dev->parent = &nvdimm_bus->dev; 1022 dev->parent = &nvdimm_bus->dev;
1015 dev->type = dev_type; 1023 dev->type = dev_type;
1016 dev->groups = ndr_desc->attr_groups; 1024 dev->groups = ndr_desc->attr_groups;
1025 dev->of_node = ndr_desc->of_node;
1017 nd_region->ndr_size = resource_size(ndr_desc->res); 1026 nd_region->ndr_size = resource_size(ndr_desc->res);
1018 nd_region->ndr_start = ndr_desc->res->start; 1027 nd_region->ndr_start = ndr_desc->res->start;
1019 nd_device_register(dev); 1028 nd_device_register(dev);
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index ff855ed965fb..097072c5a852 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -76,12 +76,14 @@ typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc,
76 struct nvdimm *nvdimm, unsigned int cmd, void *buf, 76 struct nvdimm *nvdimm, unsigned int cmd, void *buf,
77 unsigned int buf_len, int *cmd_rc); 77 unsigned int buf_len, int *cmd_rc);
78 78
79struct device_node;
79struct nvdimm_bus_descriptor { 80struct nvdimm_bus_descriptor {
80 const struct attribute_group **attr_groups; 81 const struct attribute_group **attr_groups;
81 unsigned long bus_dsm_mask; 82 unsigned long bus_dsm_mask;
82 unsigned long cmd_mask; 83 unsigned long cmd_mask;
83 struct module *module; 84 struct module *module;
84 char *provider_name; 85 char *provider_name;
86 struct device_node *of_node;
85 ndctl_fn ndctl; 87 ndctl_fn ndctl;
86 int (*flush_probe)(struct nvdimm_bus_descriptor *nd_desc); 88 int (*flush_probe)(struct nvdimm_bus_descriptor *nd_desc);
87 int (*clear_to_send)(struct nvdimm_bus_descriptor *nd_desc, 89 int (*clear_to_send)(struct nvdimm_bus_descriptor *nd_desc,
@@ -123,6 +125,7 @@ struct nd_region_desc {
123 int num_lanes; 125 int num_lanes;
124 int numa_node; 126 int numa_node;
125 unsigned long flags; 127 unsigned long flags;
128 struct device_node *of_node;
126}; 129};
127 130
128struct device; 131struct device;
@@ -164,6 +167,7 @@ void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus);
164struct nvdimm_bus *to_nvdimm_bus(struct device *dev); 167struct nvdimm_bus *to_nvdimm_bus(struct device *dev);
165struct nvdimm *to_nvdimm(struct device *dev); 168struct nvdimm *to_nvdimm(struct device *dev);
166struct nd_region *to_nd_region(struct device *dev); 169struct nd_region *to_nd_region(struct device *dev);
170struct device *nd_region_dev(struct nd_region *nd_region);
167struct nd_blk_region *to_nd_blk_region(struct device *dev); 171struct nd_blk_region *to_nd_blk_region(struct device *dev);
168struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus); 172struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus);
169struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus); 173struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus);
diff --git a/include/linux/nd.h b/include/linux/nd.h
index 5dc6b695437d..43c181a6add5 100644
--- a/include/linux/nd.h
+++ b/include/linux/nd.h
@@ -180,6 +180,12 @@ struct nd_region;
180void nvdimm_region_notify(struct nd_region *nd_region, enum nvdimm_event event); 180void nvdimm_region_notify(struct nd_region *nd_region, enum nvdimm_event event);
181int __must_check __nd_driver_register(struct nd_device_driver *nd_drv, 181int __must_check __nd_driver_register(struct nd_device_driver *nd_drv,
182 struct module *module, const char *mod_name); 182 struct module *module, const char *mod_name);
183static inline void nd_driver_unregister(struct nd_device_driver *drv)
184{
185 driver_unregister(&drv->drv);
186}
183#define nd_driver_register(driver) \ 187#define nd_driver_register(driver) \
184 __nd_driver_register(driver, THIS_MODULE, KBUILD_MODNAME) 188 __nd_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
189#define module_nd_driver(driver) \
190 module_driver(driver, nd_driver_register, nd_driver_unregister)
185#endif /* __LINUX_ND_H__ */ 191#endif /* __LINUX_ND_H__ */
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index 620fa78b3b1b..cb166be4918d 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -104,7 +104,8 @@ enum {
104 NUM_HINTS = 8, 104 NUM_HINTS = 8,
105 NUM_BDW = NUM_DCR, 105 NUM_BDW = NUM_DCR,
106 NUM_SPA = NUM_PM + NUM_DCR + NUM_BDW, 106 NUM_SPA = NUM_PM + NUM_DCR + NUM_BDW,
107 NUM_MEM = NUM_DCR + NUM_BDW + 2 /* spa0 iset */ + 4 /* spa1 iset */, 107 NUM_MEM = NUM_DCR + NUM_BDW + 2 /* spa0 iset */
108 + 4 /* spa1 iset */ + 1 /* spa11 iset */,
108 DIMM_SIZE = SZ_32M, 109 DIMM_SIZE = SZ_32M,
109 LABEL_SIZE = SZ_128K, 110 LABEL_SIZE = SZ_128K,
110 SPA_VCD_SIZE = SZ_4M, 111 SPA_VCD_SIZE = SZ_4M,
@@ -153,6 +154,7 @@ struct nfit_test {
153 void *nfit_buf; 154 void *nfit_buf;
154 dma_addr_t nfit_dma; 155 dma_addr_t nfit_dma;
155 size_t nfit_size; 156 size_t nfit_size;
157 size_t nfit_filled;
156 int dcr_idx; 158 int dcr_idx;
157 int num_dcr; 159 int num_dcr;
158 int num_pm; 160 int num_pm;
@@ -709,7 +711,9 @@ static void smart_notify(struct device *bus_dev,
709 >= thresh->media_temperature) 711 >= thresh->media_temperature)
710 || ((thresh->alarm_control & ND_INTEL_SMART_CTEMP_TRIP) 712 || ((thresh->alarm_control & ND_INTEL_SMART_CTEMP_TRIP)
711 && smart->ctrl_temperature 713 && smart->ctrl_temperature
712 >= thresh->ctrl_temperature)) { 714 >= thresh->ctrl_temperature)
715 || (smart->health != ND_INTEL_SMART_NON_CRITICAL_HEALTH)
716 || (smart->shutdown_state != 0)) {
713 device_lock(bus_dev); 717 device_lock(bus_dev);
714 __acpi_nvdimm_notify(dimm_dev, 0x81); 718 __acpi_nvdimm_notify(dimm_dev, 0x81);
715 device_unlock(bus_dev); 719 device_unlock(bus_dev);
@@ -735,6 +739,32 @@ static int nfit_test_cmd_smart_set_threshold(
735 return 0; 739 return 0;
736} 740}
737 741
742static int nfit_test_cmd_smart_inject(
743 struct nd_intel_smart_inject *inj,
744 unsigned int buf_len,
745 struct nd_intel_smart_threshold *thresh,
746 struct nd_intel_smart *smart,
747 struct device *bus_dev, struct device *dimm_dev)
748{
749 if (buf_len != sizeof(*inj))
750 return -EINVAL;
751
752 if (inj->mtemp_enable)
753 smart->media_temperature = inj->media_temperature;
754 if (inj->spare_enable)
755 smart->spares = inj->spares;
756 if (inj->fatal_enable)
757 smart->health = ND_INTEL_SMART_FATAL_HEALTH;
758 if (inj->unsafe_shutdown_enable) {
759 smart->shutdown_state = 1;
760 smart->shutdown_count++;
761 }
762 inj->status = 0;
763 smart_notify(bus_dev, dimm_dev, smart, thresh);
764
765 return 0;
766}
767
738static void uc_error_notify(struct work_struct *work) 768static void uc_error_notify(struct work_struct *work)
739{ 769{
740 struct nfit_test *t = container_of(work, typeof(*t), work); 770 struct nfit_test *t = container_of(work, typeof(*t), work);
@@ -935,6 +965,13 @@ static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
935 t->dcr_idx], 965 t->dcr_idx],
936 &t->smart[i - t->dcr_idx], 966 &t->smart[i - t->dcr_idx],
937 &t->pdev.dev, t->dimm_dev[i]); 967 &t->pdev.dev, t->dimm_dev[i]);
968 case ND_INTEL_SMART_INJECT:
969 return nfit_test_cmd_smart_inject(buf,
970 buf_len,
971 &t->smart_threshold[i -
972 t->dcr_idx],
973 &t->smart[i - t->dcr_idx],
974 &t->pdev.dev, t->dimm_dev[i]);
938 default: 975 default:
939 return -ENOTTY; 976 return -ENOTTY;
940 } 977 }
@@ -1222,7 +1259,7 @@ static void smart_init(struct nfit_test *t)
1222 | ND_INTEL_SMART_MTEMP_VALID, 1259 | ND_INTEL_SMART_MTEMP_VALID,
1223 .health = ND_INTEL_SMART_NON_CRITICAL_HEALTH, 1260 .health = ND_INTEL_SMART_NON_CRITICAL_HEALTH,
1224 .media_temperature = 23 * 16, 1261 .media_temperature = 23 * 16,
1225 .ctrl_temperature = 30 * 16, 1262 .ctrl_temperature = 25 * 16,
1226 .pmic_temperature = 40 * 16, 1263 .pmic_temperature = 40 * 16,
1227 .spares = 75, 1264 .spares = 75,
1228 .alarm_flags = ND_INTEL_SMART_SPARE_TRIP 1265 .alarm_flags = ND_INTEL_SMART_SPARE_TRIP
@@ -1366,7 +1403,7 @@ static void nfit_test0_setup(struct nfit_test *t)
1366 struct acpi_nfit_data_region *bdw; 1403 struct acpi_nfit_data_region *bdw;
1367 struct acpi_nfit_flush_address *flush; 1404 struct acpi_nfit_flush_address *flush;
1368 struct acpi_nfit_capabilities *pcap; 1405 struct acpi_nfit_capabilities *pcap;
1369 unsigned int offset, i; 1406 unsigned int offset = 0, i;
1370 1407
1371 /* 1408 /*
1372 * spa0 (interleave first half of dimm0 and dimm1, note storage 1409 * spa0 (interleave first half of dimm0 and dimm1, note storage
@@ -1380,93 +1417,102 @@ static void nfit_test0_setup(struct nfit_test *t)
1380 spa->range_index = 0+1; 1417 spa->range_index = 0+1;
1381 spa->address = t->spa_set_dma[0]; 1418 spa->address = t->spa_set_dma[0];
1382 spa->length = SPA0_SIZE; 1419 spa->length = SPA0_SIZE;
1420 offset += spa->header.length;
1383 1421
1384 /* 1422 /*
1385 * spa1 (interleave last half of the 4 DIMMS, note storage 1423 * spa1 (interleave last half of the 4 DIMMS, note storage
1386 * does not actually alias the related block-data-window 1424 * does not actually alias the related block-data-window
1387 * regions) 1425 * regions)
1388 */ 1426 */
1389 spa = nfit_buf + sizeof(*spa); 1427 spa = nfit_buf + offset;
1390 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 1428 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1391 spa->header.length = sizeof(*spa); 1429 spa->header.length = sizeof(*spa);
1392 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16); 1430 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
1393 spa->range_index = 1+1; 1431 spa->range_index = 1+1;
1394 spa->address = t->spa_set_dma[1]; 1432 spa->address = t->spa_set_dma[1];
1395 spa->length = SPA1_SIZE; 1433 spa->length = SPA1_SIZE;
1434 offset += spa->header.length;
1396 1435
1397 /* spa2 (dcr0) dimm0 */ 1436 /* spa2 (dcr0) dimm0 */
1398 spa = nfit_buf + sizeof(*spa) * 2; 1437 spa = nfit_buf + offset;
1399 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 1438 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1400 spa->header.length = sizeof(*spa); 1439 spa->header.length = sizeof(*spa);
1401 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); 1440 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
1402 spa->range_index = 2+1; 1441 spa->range_index = 2+1;
1403 spa->address = t->dcr_dma[0]; 1442 spa->address = t->dcr_dma[0];
1404 spa->length = DCR_SIZE; 1443 spa->length = DCR_SIZE;
1444 offset += spa->header.length;
1405 1445
1406 /* spa3 (dcr1) dimm1 */ 1446 /* spa3 (dcr1) dimm1 */
1407 spa = nfit_buf + sizeof(*spa) * 3; 1447 spa = nfit_buf + offset;
1408 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 1448 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1409 spa->header.length = sizeof(*spa); 1449 spa->header.length = sizeof(*spa);
1410 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); 1450 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
1411 spa->range_index = 3+1; 1451 spa->range_index = 3+1;
1412 spa->address = t->dcr_dma[1]; 1452 spa->address = t->dcr_dma[1];
1413 spa->length = DCR_SIZE; 1453 spa->length = DCR_SIZE;
1454 offset += spa->header.length;
1414 1455
1415 /* spa4 (dcr2) dimm2 */ 1456 /* spa4 (dcr2) dimm2 */
1416 spa = nfit_buf + sizeof(*spa) * 4; 1457 spa = nfit_buf + offset;
1417 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 1458 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1418 spa->header.length = sizeof(*spa); 1459 spa->header.length = sizeof(*spa);
1419 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); 1460 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
1420 spa->range_index = 4+1; 1461 spa->range_index = 4+1;
1421 spa->address = t->dcr_dma[2]; 1462 spa->address = t->dcr_dma[2];
1422 spa->length = DCR_SIZE; 1463 spa->length = DCR_SIZE;
1464 offset += spa->header.length;
1423 1465
1424 /* spa5 (dcr3) dimm3 */ 1466 /* spa5 (dcr3) dimm3 */
1425 spa = nfit_buf + sizeof(*spa) * 5; 1467 spa = nfit_buf + offset;
1426 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 1468 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1427 spa->header.length = sizeof(*spa); 1469 spa->header.length = sizeof(*spa);
1428 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); 1470 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
1429 spa->range_index = 5+1; 1471 spa->range_index = 5+1;
1430 spa->address = t->dcr_dma[3]; 1472 spa->address = t->dcr_dma[3];
1431 spa->length = DCR_SIZE; 1473 spa->length = DCR_SIZE;
1474 offset += spa->header.length;
1432 1475
1433 /* spa6 (bdw for dcr0) dimm0 */ 1476 /* spa6 (bdw for dcr0) dimm0 */
1434 spa = nfit_buf + sizeof(*spa) * 6; 1477 spa = nfit_buf + offset;
1435 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 1478 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1436 spa->header.length = sizeof(*spa); 1479 spa->header.length = sizeof(*spa);
1437 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); 1480 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
1438 spa->range_index = 6+1; 1481 spa->range_index = 6+1;
1439 spa->address = t->dimm_dma[0]; 1482 spa->address = t->dimm_dma[0];
1440 spa->length = DIMM_SIZE; 1483 spa->length = DIMM_SIZE;
1484 offset += spa->header.length;
1441 1485
1442 /* spa7 (bdw for dcr1) dimm1 */ 1486 /* spa7 (bdw for dcr1) dimm1 */
1443 spa = nfit_buf + sizeof(*spa) * 7; 1487 spa = nfit_buf + offset;
1444 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 1488 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1445 spa->header.length = sizeof(*spa); 1489 spa->header.length = sizeof(*spa);
1446 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); 1490 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
1447 spa->range_index = 7+1; 1491 spa->range_index = 7+1;
1448 spa->address = t->dimm_dma[1]; 1492 spa->address = t->dimm_dma[1];
1449 spa->length = DIMM_SIZE; 1493 spa->length = DIMM_SIZE;
1494 offset += spa->header.length;
1450 1495
1451 /* spa8 (bdw for dcr2) dimm2 */ 1496 /* spa8 (bdw for dcr2) dimm2 */
1452 spa = nfit_buf + sizeof(*spa) * 8; 1497 spa = nfit_buf + offset;
1453 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 1498 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1454 spa->header.length = sizeof(*spa); 1499 spa->header.length = sizeof(*spa);
1455 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); 1500 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
1456 spa->range_index = 8+1; 1501 spa->range_index = 8+1;
1457 spa->address = t->dimm_dma[2]; 1502 spa->address = t->dimm_dma[2];
1458 spa->length = DIMM_SIZE; 1503 spa->length = DIMM_SIZE;
1504 offset += spa->header.length;
1459 1505
1460 /* spa9 (bdw for dcr3) dimm3 */ 1506 /* spa9 (bdw for dcr3) dimm3 */
1461 spa = nfit_buf + sizeof(*spa) * 9; 1507 spa = nfit_buf + offset;
1462 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 1508 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1463 spa->header.length = sizeof(*spa); 1509 spa->header.length = sizeof(*spa);
1464 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); 1510 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
1465 spa->range_index = 9+1; 1511 spa->range_index = 9+1;
1466 spa->address = t->dimm_dma[3]; 1512 spa->address = t->dimm_dma[3];
1467 spa->length = DIMM_SIZE; 1513 spa->length = DIMM_SIZE;
1514 offset += spa->header.length;
1468 1515
1469 offset = sizeof(*spa) * 10;
1470 /* mem-region0 (spa0, dimm0) */ 1516 /* mem-region0 (spa0, dimm0) */
1471 memdev = nfit_buf + offset; 1517 memdev = nfit_buf + offset;
1472 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1518 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
@@ -1481,9 +1527,10 @@ static void nfit_test0_setup(struct nfit_test *t)
1481 memdev->address = 0; 1527 memdev->address = 0;
1482 memdev->interleave_index = 0; 1528 memdev->interleave_index = 0;
1483 memdev->interleave_ways = 2; 1529 memdev->interleave_ways = 2;
1530 offset += memdev->header.length;
1484 1531
1485 /* mem-region1 (spa0, dimm1) */ 1532 /* mem-region1 (spa0, dimm1) */
1486 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map); 1533 memdev = nfit_buf + offset;
1487 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1534 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1488 memdev->header.length = sizeof(*memdev); 1535 memdev->header.length = sizeof(*memdev);
1489 memdev->device_handle = handle[1]; 1536 memdev->device_handle = handle[1];
@@ -1497,9 +1544,10 @@ static void nfit_test0_setup(struct nfit_test *t)
1497 memdev->interleave_index = 0; 1544 memdev->interleave_index = 0;
1498 memdev->interleave_ways = 2; 1545 memdev->interleave_ways = 2;
1499 memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED; 1546 memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
1547 offset += memdev->header.length;
1500 1548
1501 /* mem-region2 (spa1, dimm0) */ 1549 /* mem-region2 (spa1, dimm0) */
1502 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 2; 1550 memdev = nfit_buf + offset;
1503 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1551 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1504 memdev->header.length = sizeof(*memdev); 1552 memdev->header.length = sizeof(*memdev);
1505 memdev->device_handle = handle[0]; 1553 memdev->device_handle = handle[0];
@@ -1513,9 +1561,10 @@ static void nfit_test0_setup(struct nfit_test *t)
1513 memdev->interleave_index = 0; 1561 memdev->interleave_index = 0;
1514 memdev->interleave_ways = 4; 1562 memdev->interleave_ways = 4;
1515 memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED; 1563 memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
1564 offset += memdev->header.length;
1516 1565
1517 /* mem-region3 (spa1, dimm1) */ 1566 /* mem-region3 (spa1, dimm1) */
1518 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 3; 1567 memdev = nfit_buf + offset;
1519 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1568 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1520 memdev->header.length = sizeof(*memdev); 1569 memdev->header.length = sizeof(*memdev);
1521 memdev->device_handle = handle[1]; 1570 memdev->device_handle = handle[1];
@@ -1528,9 +1577,10 @@ static void nfit_test0_setup(struct nfit_test *t)
1528 memdev->address = SPA0_SIZE/2; 1577 memdev->address = SPA0_SIZE/2;
1529 memdev->interleave_index = 0; 1578 memdev->interleave_index = 0;
1530 memdev->interleave_ways = 4; 1579 memdev->interleave_ways = 4;
1580 offset += memdev->header.length;
1531 1581
1532 /* mem-region4 (spa1, dimm2) */ 1582 /* mem-region4 (spa1, dimm2) */
1533 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 4; 1583 memdev = nfit_buf + offset;
1534 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1584 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1535 memdev->header.length = sizeof(*memdev); 1585 memdev->header.length = sizeof(*memdev);
1536 memdev->device_handle = handle[2]; 1586 memdev->device_handle = handle[2];
@@ -1544,9 +1594,10 @@ static void nfit_test0_setup(struct nfit_test *t)
1544 memdev->interleave_index = 0; 1594 memdev->interleave_index = 0;
1545 memdev->interleave_ways = 4; 1595 memdev->interleave_ways = 4;
1546 memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED; 1596 memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
1597 offset += memdev->header.length;
1547 1598
1548 /* mem-region5 (spa1, dimm3) */ 1599 /* mem-region5 (spa1, dimm3) */
1549 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 5; 1600 memdev = nfit_buf + offset;
1550 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1601 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1551 memdev->header.length = sizeof(*memdev); 1602 memdev->header.length = sizeof(*memdev);
1552 memdev->device_handle = handle[3]; 1603 memdev->device_handle = handle[3];
@@ -1559,9 +1610,10 @@ static void nfit_test0_setup(struct nfit_test *t)
1559 memdev->address = SPA0_SIZE/2; 1610 memdev->address = SPA0_SIZE/2;
1560 memdev->interleave_index = 0; 1611 memdev->interleave_index = 0;
1561 memdev->interleave_ways = 4; 1612 memdev->interleave_ways = 4;
1613 offset += memdev->header.length;
1562 1614
1563 /* mem-region6 (spa/dcr0, dimm0) */ 1615 /* mem-region6 (spa/dcr0, dimm0) */
1564 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 6; 1616 memdev = nfit_buf + offset;
1565 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1617 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1566 memdev->header.length = sizeof(*memdev); 1618 memdev->header.length = sizeof(*memdev);
1567 memdev->device_handle = handle[0]; 1619 memdev->device_handle = handle[0];
@@ -1574,9 +1626,10 @@ static void nfit_test0_setup(struct nfit_test *t)
1574 memdev->address = 0; 1626 memdev->address = 0;
1575 memdev->interleave_index = 0; 1627 memdev->interleave_index = 0;
1576 memdev->interleave_ways = 1; 1628 memdev->interleave_ways = 1;
1629 offset += memdev->header.length;
1577 1630
1578 /* mem-region7 (spa/dcr1, dimm1) */ 1631 /* mem-region7 (spa/dcr1, dimm1) */
1579 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 7; 1632 memdev = nfit_buf + offset;
1580 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1633 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1581 memdev->header.length = sizeof(*memdev); 1634 memdev->header.length = sizeof(*memdev);
1582 memdev->device_handle = handle[1]; 1635 memdev->device_handle = handle[1];
@@ -1589,9 +1642,10 @@ static void nfit_test0_setup(struct nfit_test *t)
1589 memdev->address = 0; 1642 memdev->address = 0;
1590 memdev->interleave_index = 0; 1643 memdev->interleave_index = 0;
1591 memdev->interleave_ways = 1; 1644 memdev->interleave_ways = 1;
1645 offset += memdev->header.length;
1592 1646
1593 /* mem-region8 (spa/dcr2, dimm2) */ 1647 /* mem-region8 (spa/dcr2, dimm2) */
1594 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 8; 1648 memdev = nfit_buf + offset;
1595 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1649 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1596 memdev->header.length = sizeof(*memdev); 1650 memdev->header.length = sizeof(*memdev);
1597 memdev->device_handle = handle[2]; 1651 memdev->device_handle = handle[2];
@@ -1604,9 +1658,10 @@ static void nfit_test0_setup(struct nfit_test *t)
1604 memdev->address = 0; 1658 memdev->address = 0;
1605 memdev->interleave_index = 0; 1659 memdev->interleave_index = 0;
1606 memdev->interleave_ways = 1; 1660 memdev->interleave_ways = 1;
1661 offset += memdev->header.length;
1607 1662
1608 /* mem-region9 (spa/dcr3, dimm3) */ 1663 /* mem-region9 (spa/dcr3, dimm3) */
1609 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 9; 1664 memdev = nfit_buf + offset;
1610 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1665 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1611 memdev->header.length = sizeof(*memdev); 1666 memdev->header.length = sizeof(*memdev);
1612 memdev->device_handle = handle[3]; 1667 memdev->device_handle = handle[3];
@@ -1619,9 +1674,10 @@ static void nfit_test0_setup(struct nfit_test *t)
1619 memdev->address = 0; 1674 memdev->address = 0;
1620 memdev->interleave_index = 0; 1675 memdev->interleave_index = 0;
1621 memdev->interleave_ways = 1; 1676 memdev->interleave_ways = 1;
1677 offset += memdev->header.length;
1622 1678
1623 /* mem-region10 (spa/bdw0, dimm0) */ 1679 /* mem-region10 (spa/bdw0, dimm0) */
1624 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 10; 1680 memdev = nfit_buf + offset;
1625 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1681 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1626 memdev->header.length = sizeof(*memdev); 1682 memdev->header.length = sizeof(*memdev);
1627 memdev->device_handle = handle[0]; 1683 memdev->device_handle = handle[0];
@@ -1634,9 +1690,10 @@ static void nfit_test0_setup(struct nfit_test *t)
1634 memdev->address = 0; 1690 memdev->address = 0;
1635 memdev->interleave_index = 0; 1691 memdev->interleave_index = 0;
1636 memdev->interleave_ways = 1; 1692 memdev->interleave_ways = 1;
1693 offset += memdev->header.length;
1637 1694
1638 /* mem-region11 (spa/bdw1, dimm1) */ 1695 /* mem-region11 (spa/bdw1, dimm1) */
1639 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 11; 1696 memdev = nfit_buf + offset;
1640 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1697 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1641 memdev->header.length = sizeof(*memdev); 1698 memdev->header.length = sizeof(*memdev);
1642 memdev->device_handle = handle[1]; 1699 memdev->device_handle = handle[1];
@@ -1649,9 +1706,10 @@ static void nfit_test0_setup(struct nfit_test *t)
1649 memdev->address = 0; 1706 memdev->address = 0;
1650 memdev->interleave_index = 0; 1707 memdev->interleave_index = 0;
1651 memdev->interleave_ways = 1; 1708 memdev->interleave_ways = 1;
1709 offset += memdev->header.length;
1652 1710
1653 /* mem-region12 (spa/bdw2, dimm2) */ 1711 /* mem-region12 (spa/bdw2, dimm2) */
1654 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 12; 1712 memdev = nfit_buf + offset;
1655 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1713 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1656 memdev->header.length = sizeof(*memdev); 1714 memdev->header.length = sizeof(*memdev);
1657 memdev->device_handle = handle[2]; 1715 memdev->device_handle = handle[2];
@@ -1664,9 +1722,10 @@ static void nfit_test0_setup(struct nfit_test *t)
1664 memdev->address = 0; 1722 memdev->address = 0;
1665 memdev->interleave_index = 0; 1723 memdev->interleave_index = 0;
1666 memdev->interleave_ways = 1; 1724 memdev->interleave_ways = 1;
1725 offset += memdev->header.length;
1667 1726
1668 /* mem-region13 (spa/dcr3, dimm3) */ 1727 /* mem-region13 (spa/dcr3, dimm3) */
1669 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 13; 1728 memdev = nfit_buf + offset;
1670 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 1729 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1671 memdev->header.length = sizeof(*memdev); 1730 memdev->header.length = sizeof(*memdev);
1672 memdev->device_handle = handle[3]; 1731 memdev->device_handle = handle[3];
@@ -1680,12 +1739,12 @@ static void nfit_test0_setup(struct nfit_test *t)
1680 memdev->interleave_index = 0; 1739 memdev->interleave_index = 0;
1681 memdev->interleave_ways = 1; 1740 memdev->interleave_ways = 1;
1682 memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED; 1741 memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
1742 offset += memdev->header.length;
1683 1743
1684 offset = offset + sizeof(struct acpi_nfit_memory_map) * 14;
1685 /* dcr-descriptor0: blk */ 1744 /* dcr-descriptor0: blk */
1686 dcr = nfit_buf + offset; 1745 dcr = nfit_buf + offset;
1687 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 1746 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1688 dcr->header.length = sizeof(struct acpi_nfit_control_region); 1747 dcr->header.length = sizeof(*dcr);
1689 dcr->region_index = 0+1; 1748 dcr->region_index = 0+1;
1690 dcr_common_init(dcr); 1749 dcr_common_init(dcr);
1691 dcr->serial_number = ~handle[0]; 1750 dcr->serial_number = ~handle[0];
@@ -1696,11 +1755,12 @@ static void nfit_test0_setup(struct nfit_test *t)
1696 dcr->command_size = 8; 1755 dcr->command_size = 8;
1697 dcr->status_offset = 8; 1756 dcr->status_offset = 8;
1698 dcr->status_size = 4; 1757 dcr->status_size = 4;
1758 offset += dcr->header.length;
1699 1759
1700 /* dcr-descriptor1: blk */ 1760 /* dcr-descriptor1: blk */
1701 dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region); 1761 dcr = nfit_buf + offset;
1702 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 1762 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1703 dcr->header.length = sizeof(struct acpi_nfit_control_region); 1763 dcr->header.length = sizeof(*dcr);
1704 dcr->region_index = 1+1; 1764 dcr->region_index = 1+1;
1705 dcr_common_init(dcr); 1765 dcr_common_init(dcr);
1706 dcr->serial_number = ~handle[1]; 1766 dcr->serial_number = ~handle[1];
@@ -1711,11 +1771,12 @@ static void nfit_test0_setup(struct nfit_test *t)
1711 dcr->command_size = 8; 1771 dcr->command_size = 8;
1712 dcr->status_offset = 8; 1772 dcr->status_offset = 8;
1713 dcr->status_size = 4; 1773 dcr->status_size = 4;
1774 offset += dcr->header.length;
1714 1775
1715 /* dcr-descriptor2: blk */ 1776 /* dcr-descriptor2: blk */
1716 dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 2; 1777 dcr = nfit_buf + offset;
1717 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 1778 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1718 dcr->header.length = sizeof(struct acpi_nfit_control_region); 1779 dcr->header.length = sizeof(*dcr);
1719 dcr->region_index = 2+1; 1780 dcr->region_index = 2+1;
1720 dcr_common_init(dcr); 1781 dcr_common_init(dcr);
1721 dcr->serial_number = ~handle[2]; 1782 dcr->serial_number = ~handle[2];
@@ -1726,11 +1787,12 @@ static void nfit_test0_setup(struct nfit_test *t)
1726 dcr->command_size = 8; 1787 dcr->command_size = 8;
1727 dcr->status_offset = 8; 1788 dcr->status_offset = 8;
1728 dcr->status_size = 4; 1789 dcr->status_size = 4;
1790 offset += dcr->header.length;
1729 1791
1730 /* dcr-descriptor3: blk */ 1792 /* dcr-descriptor3: blk */
1731 dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 3; 1793 dcr = nfit_buf + offset;
1732 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 1794 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1733 dcr->header.length = sizeof(struct acpi_nfit_control_region); 1795 dcr->header.length = sizeof(*dcr);
1734 dcr->region_index = 3+1; 1796 dcr->region_index = 3+1;
1735 dcr_common_init(dcr); 1797 dcr_common_init(dcr);
1736 dcr->serial_number = ~handle[3]; 1798 dcr->serial_number = ~handle[3];
@@ -1741,8 +1803,8 @@ static void nfit_test0_setup(struct nfit_test *t)
1741 dcr->command_size = 8; 1803 dcr->command_size = 8;
1742 dcr->status_offset = 8; 1804 dcr->status_offset = 8;
1743 dcr->status_size = 4; 1805 dcr->status_size = 4;
1806 offset += dcr->header.length;
1744 1807
1745 offset = offset + sizeof(struct acpi_nfit_control_region) * 4;
1746 /* dcr-descriptor0: pmem */ 1808 /* dcr-descriptor0: pmem */
1747 dcr = nfit_buf + offset; 1809 dcr = nfit_buf + offset;
1748 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 1810 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
@@ -1753,10 +1815,10 @@ static void nfit_test0_setup(struct nfit_test *t)
1753 dcr->serial_number = ~handle[0]; 1815 dcr->serial_number = ~handle[0];
1754 dcr->code = NFIT_FIC_BYTEN; 1816 dcr->code = NFIT_FIC_BYTEN;
1755 dcr->windows = 0; 1817 dcr->windows = 0;
1818 offset += dcr->header.length;
1756 1819
1757 /* dcr-descriptor1: pmem */ 1820 /* dcr-descriptor1: pmem */
1758 dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region, 1821 dcr = nfit_buf + offset;
1759 window_size);
1760 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 1822 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1761 dcr->header.length = offsetof(struct acpi_nfit_control_region, 1823 dcr->header.length = offsetof(struct acpi_nfit_control_region,
1762 window_size); 1824 window_size);
@@ -1765,10 +1827,10 @@ static void nfit_test0_setup(struct nfit_test *t)
1765 dcr->serial_number = ~handle[1]; 1827 dcr->serial_number = ~handle[1];
1766 dcr->code = NFIT_FIC_BYTEN; 1828 dcr->code = NFIT_FIC_BYTEN;
1767 dcr->windows = 0; 1829 dcr->windows = 0;
1830 offset += dcr->header.length;
1768 1831
1769 /* dcr-descriptor2: pmem */ 1832 /* dcr-descriptor2: pmem */
1770 dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region, 1833 dcr = nfit_buf + offset;
1771 window_size) * 2;
1772 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 1834 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1773 dcr->header.length = offsetof(struct acpi_nfit_control_region, 1835 dcr->header.length = offsetof(struct acpi_nfit_control_region,
1774 window_size); 1836 window_size);
@@ -1777,10 +1839,10 @@ static void nfit_test0_setup(struct nfit_test *t)
1777 dcr->serial_number = ~handle[2]; 1839 dcr->serial_number = ~handle[2];
1778 dcr->code = NFIT_FIC_BYTEN; 1840 dcr->code = NFIT_FIC_BYTEN;
1779 dcr->windows = 0; 1841 dcr->windows = 0;
1842 offset += dcr->header.length;
1780 1843
1781 /* dcr-descriptor3: pmem */ 1844 /* dcr-descriptor3: pmem */
1782 dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region, 1845 dcr = nfit_buf + offset;
1783 window_size) * 3;
1784 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 1846 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1785 dcr->header.length = offsetof(struct acpi_nfit_control_region, 1847 dcr->header.length = offsetof(struct acpi_nfit_control_region,
1786 window_size); 1848 window_size);
@@ -1789,54 +1851,56 @@ static void nfit_test0_setup(struct nfit_test *t)
1789 dcr->serial_number = ~handle[3]; 1851 dcr->serial_number = ~handle[3];
1790 dcr->code = NFIT_FIC_BYTEN; 1852 dcr->code = NFIT_FIC_BYTEN;
1791 dcr->windows = 0; 1853 dcr->windows = 0;
1854 offset += dcr->header.length;
1792 1855
1793 offset = offset + offsetof(struct acpi_nfit_control_region,
1794 window_size) * 4;
1795 /* bdw0 (spa/dcr0, dimm0) */ 1856 /* bdw0 (spa/dcr0, dimm0) */
1796 bdw = nfit_buf + offset; 1857 bdw = nfit_buf + offset;
1797 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION; 1858 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1798 bdw->header.length = sizeof(struct acpi_nfit_data_region); 1859 bdw->header.length = sizeof(*bdw);
1799 bdw->region_index = 0+1; 1860 bdw->region_index = 0+1;
1800 bdw->windows = 1; 1861 bdw->windows = 1;
1801 bdw->offset = 0; 1862 bdw->offset = 0;
1802 bdw->size = BDW_SIZE; 1863 bdw->size = BDW_SIZE;
1803 bdw->capacity = DIMM_SIZE; 1864 bdw->capacity = DIMM_SIZE;
1804 bdw->start_address = 0; 1865 bdw->start_address = 0;
1866 offset += bdw->header.length;
1805 1867
1806 /* bdw1 (spa/dcr1, dimm1) */ 1868 /* bdw1 (spa/dcr1, dimm1) */
1807 bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region); 1869 bdw = nfit_buf + offset;
1808 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION; 1870 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1809 bdw->header.length = sizeof(struct acpi_nfit_data_region); 1871 bdw->header.length = sizeof(*bdw);
1810 bdw->region_index = 1+1; 1872 bdw->region_index = 1+1;
1811 bdw->windows = 1; 1873 bdw->windows = 1;
1812 bdw->offset = 0; 1874 bdw->offset = 0;
1813 bdw->size = BDW_SIZE; 1875 bdw->size = BDW_SIZE;
1814 bdw->capacity = DIMM_SIZE; 1876 bdw->capacity = DIMM_SIZE;
1815 bdw->start_address = 0; 1877 bdw->start_address = 0;
1878 offset += bdw->header.length;
1816 1879
1817 /* bdw2 (spa/dcr2, dimm2) */ 1880 /* bdw2 (spa/dcr2, dimm2) */
1818 bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 2; 1881 bdw = nfit_buf + offset;
1819 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION; 1882 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1820 bdw->header.length = sizeof(struct acpi_nfit_data_region); 1883 bdw->header.length = sizeof(*bdw);
1821 bdw->region_index = 2+1; 1884 bdw->region_index = 2+1;
1822 bdw->windows = 1; 1885 bdw->windows = 1;
1823 bdw->offset = 0; 1886 bdw->offset = 0;
1824 bdw->size = BDW_SIZE; 1887 bdw->size = BDW_SIZE;
1825 bdw->capacity = DIMM_SIZE; 1888 bdw->capacity = DIMM_SIZE;
1826 bdw->start_address = 0; 1889 bdw->start_address = 0;
1890 offset += bdw->header.length;
1827 1891
1828 /* bdw3 (spa/dcr3, dimm3) */ 1892 /* bdw3 (spa/dcr3, dimm3) */
1829 bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 3; 1893 bdw = nfit_buf + offset;
1830 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION; 1894 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1831 bdw->header.length = sizeof(struct acpi_nfit_data_region); 1895 bdw->header.length = sizeof(*bdw);
1832 bdw->region_index = 3+1; 1896 bdw->region_index = 3+1;
1833 bdw->windows = 1; 1897 bdw->windows = 1;
1834 bdw->offset = 0; 1898 bdw->offset = 0;
1835 bdw->size = BDW_SIZE; 1899 bdw->size = BDW_SIZE;
1836 bdw->capacity = DIMM_SIZE; 1900 bdw->capacity = DIMM_SIZE;
1837 bdw->start_address = 0; 1901 bdw->start_address = 0;
1902 offset += bdw->header.length;
1838 1903
1839 offset = offset + sizeof(struct acpi_nfit_data_region) * 4;
1840 /* flush0 (dimm0) */ 1904 /* flush0 (dimm0) */
1841 flush = nfit_buf + offset; 1905 flush = nfit_buf + offset;
1842 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS; 1906 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
@@ -1845,48 +1909,52 @@ static void nfit_test0_setup(struct nfit_test *t)
1845 flush->hint_count = NUM_HINTS; 1909 flush->hint_count = NUM_HINTS;
1846 for (i = 0; i < NUM_HINTS; i++) 1910 for (i = 0; i < NUM_HINTS; i++)
1847 flush->hint_address[i] = t->flush_dma[0] + i * sizeof(u64); 1911 flush->hint_address[i] = t->flush_dma[0] + i * sizeof(u64);
1912 offset += flush->header.length;
1848 1913
1849 /* flush1 (dimm1) */ 1914 /* flush1 (dimm1) */
1850 flush = nfit_buf + offset + flush_hint_size * 1; 1915 flush = nfit_buf + offset;
1851 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS; 1916 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1852 flush->header.length = flush_hint_size; 1917 flush->header.length = flush_hint_size;
1853 flush->device_handle = handle[1]; 1918 flush->device_handle = handle[1];
1854 flush->hint_count = NUM_HINTS; 1919 flush->hint_count = NUM_HINTS;
1855 for (i = 0; i < NUM_HINTS; i++) 1920 for (i = 0; i < NUM_HINTS; i++)
1856 flush->hint_address[i] = t->flush_dma[1] + i * sizeof(u64); 1921 flush->hint_address[i] = t->flush_dma[1] + i * sizeof(u64);
1922 offset += flush->header.length;
1857 1923
1858 /* flush2 (dimm2) */ 1924 /* flush2 (dimm2) */
1859 flush = nfit_buf + offset + flush_hint_size * 2; 1925 flush = nfit_buf + offset;
1860 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS; 1926 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1861 flush->header.length = flush_hint_size; 1927 flush->header.length = flush_hint_size;
1862 flush->device_handle = handle[2]; 1928 flush->device_handle = handle[2];
1863 flush->hint_count = NUM_HINTS; 1929 flush->hint_count = NUM_HINTS;
1864 for (i = 0; i < NUM_HINTS; i++) 1930 for (i = 0; i < NUM_HINTS; i++)
1865 flush->hint_address[i] = t->flush_dma[2] + i * sizeof(u64); 1931 flush->hint_address[i] = t->flush_dma[2] + i * sizeof(u64);
1932 offset += flush->header.length;
1866 1933
1867 /* flush3 (dimm3) */ 1934 /* flush3 (dimm3) */
1868 flush = nfit_buf + offset + flush_hint_size * 3; 1935 flush = nfit_buf + offset;
1869 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS; 1936 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1870 flush->header.length = flush_hint_size; 1937 flush->header.length = flush_hint_size;
1871 flush->device_handle = handle[3]; 1938 flush->device_handle = handle[3];
1872 flush->hint_count = NUM_HINTS; 1939 flush->hint_count = NUM_HINTS;
1873 for (i = 0; i < NUM_HINTS; i++) 1940 for (i = 0; i < NUM_HINTS; i++)
1874 flush->hint_address[i] = t->flush_dma[3] + i * sizeof(u64); 1941 flush->hint_address[i] = t->flush_dma[3] + i * sizeof(u64);
1942 offset += flush->header.length;
1875 1943
1876 /* platform capabilities */ 1944 /* platform capabilities */
1877 pcap = nfit_buf + offset + flush_hint_size * 4; 1945 pcap = nfit_buf + offset;
1878 pcap->header.type = ACPI_NFIT_TYPE_CAPABILITIES; 1946 pcap->header.type = ACPI_NFIT_TYPE_CAPABILITIES;
1879 pcap->header.length = sizeof(*pcap); 1947 pcap->header.length = sizeof(*pcap);
1880 pcap->highest_capability = 1; 1948 pcap->highest_capability = 1;
1881 pcap->capabilities = ACPI_NFIT_CAPABILITY_CACHE_FLUSH | 1949 pcap->capabilities = ACPI_NFIT_CAPABILITY_CACHE_FLUSH |
1882 ACPI_NFIT_CAPABILITY_MEM_FLUSH; 1950 ACPI_NFIT_CAPABILITY_MEM_FLUSH;
1951 offset += pcap->header.length;
1883 1952
1884 if (t->setup_hotplug) { 1953 if (t->setup_hotplug) {
1885 offset = offset + flush_hint_size * 4 + sizeof(*pcap);
1886 /* dcr-descriptor4: blk */ 1954 /* dcr-descriptor4: blk */
1887 dcr = nfit_buf + offset; 1955 dcr = nfit_buf + offset;
1888 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 1956 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1889 dcr->header.length = sizeof(struct acpi_nfit_control_region); 1957 dcr->header.length = sizeof(*dcr);
1890 dcr->region_index = 8+1; 1958 dcr->region_index = 8+1;
1891 dcr_common_init(dcr); 1959 dcr_common_init(dcr);
1892 dcr->serial_number = ~handle[4]; 1960 dcr->serial_number = ~handle[4];
@@ -1897,8 +1965,8 @@ static void nfit_test0_setup(struct nfit_test *t)
1897 dcr->command_size = 8; 1965 dcr->command_size = 8;
1898 dcr->status_offset = 8; 1966 dcr->status_offset = 8;
1899 dcr->status_size = 4; 1967 dcr->status_size = 4;
1968 offset += dcr->header.length;
1900 1969
1901 offset = offset + sizeof(struct acpi_nfit_control_region);
1902 /* dcr-descriptor4: pmem */ 1970 /* dcr-descriptor4: pmem */
1903 dcr = nfit_buf + offset; 1971 dcr = nfit_buf + offset;
1904 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 1972 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
@@ -1909,21 +1977,20 @@ static void nfit_test0_setup(struct nfit_test *t)
1909 dcr->serial_number = ~handle[4]; 1977 dcr->serial_number = ~handle[4];
1910 dcr->code = NFIT_FIC_BYTEN; 1978 dcr->code = NFIT_FIC_BYTEN;
1911 dcr->windows = 0; 1979 dcr->windows = 0;
1980 offset += dcr->header.length;
1912 1981
1913 offset = offset + offsetof(struct acpi_nfit_control_region,
1914 window_size);
1915 /* bdw4 (spa/dcr4, dimm4) */ 1982 /* bdw4 (spa/dcr4, dimm4) */
1916 bdw = nfit_buf + offset; 1983 bdw = nfit_buf + offset;
1917 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION; 1984 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1918 bdw->header.length = sizeof(struct acpi_nfit_data_region); 1985 bdw->header.length = sizeof(*bdw);
1919 bdw->region_index = 8+1; 1986 bdw->region_index = 8+1;
1920 bdw->windows = 1; 1987 bdw->windows = 1;
1921 bdw->offset = 0; 1988 bdw->offset = 0;
1922 bdw->size = BDW_SIZE; 1989 bdw->size = BDW_SIZE;
1923 bdw->capacity = DIMM_SIZE; 1990 bdw->capacity = DIMM_SIZE;
1924 bdw->start_address = 0; 1991 bdw->start_address = 0;
1992 offset += bdw->header.length;
1925 1993
1926 offset = offset + sizeof(struct acpi_nfit_data_region);
1927 /* spa10 (dcr4) dimm4 */ 1994 /* spa10 (dcr4) dimm4 */
1928 spa = nfit_buf + offset; 1995 spa = nfit_buf + offset;
1929 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 1996 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
@@ -1932,30 +1999,32 @@ static void nfit_test0_setup(struct nfit_test *t)
1932 spa->range_index = 10+1; 1999 spa->range_index = 10+1;
1933 spa->address = t->dcr_dma[4]; 2000 spa->address = t->dcr_dma[4];
1934 spa->length = DCR_SIZE; 2001 spa->length = DCR_SIZE;
2002 offset += spa->header.length;
1935 2003
1936 /* 2004 /*
1937 * spa11 (single-dimm interleave for hotplug, note storage 2005 * spa11 (single-dimm interleave for hotplug, note storage
1938 * does not actually alias the related block-data-window 2006 * does not actually alias the related block-data-window
1939 * regions) 2007 * regions)
1940 */ 2008 */
1941 spa = nfit_buf + offset + sizeof(*spa); 2009 spa = nfit_buf + offset;
1942 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 2010 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1943 spa->header.length = sizeof(*spa); 2011 spa->header.length = sizeof(*spa);
1944 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16); 2012 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
1945 spa->range_index = 11+1; 2013 spa->range_index = 11+1;
1946 spa->address = t->spa_set_dma[2]; 2014 spa->address = t->spa_set_dma[2];
1947 spa->length = SPA0_SIZE; 2015 spa->length = SPA0_SIZE;
2016 offset += spa->header.length;
1948 2017
1949 /* spa12 (bdw for dcr4) dimm4 */ 2018 /* spa12 (bdw for dcr4) dimm4 */
1950 spa = nfit_buf + offset + sizeof(*spa) * 2; 2019 spa = nfit_buf + offset;
1951 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 2020 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1952 spa->header.length = sizeof(*spa); 2021 spa->header.length = sizeof(*spa);
1953 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); 2022 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
1954 spa->range_index = 12+1; 2023 spa->range_index = 12+1;
1955 spa->address = t->dimm_dma[4]; 2024 spa->address = t->dimm_dma[4];
1956 spa->length = DIMM_SIZE; 2025 spa->length = DIMM_SIZE;
2026 offset += spa->header.length;
1957 2027
1958 offset = offset + sizeof(*spa) * 3;
1959 /* mem-region14 (spa/dcr4, dimm4) */ 2028 /* mem-region14 (spa/dcr4, dimm4) */
1960 memdev = nfit_buf + offset; 2029 memdev = nfit_buf + offset;
1961 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 2030 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
@@ -1970,10 +2039,10 @@ static void nfit_test0_setup(struct nfit_test *t)
1970 memdev->address = 0; 2039 memdev->address = 0;
1971 memdev->interleave_index = 0; 2040 memdev->interleave_index = 0;
1972 memdev->interleave_ways = 1; 2041 memdev->interleave_ways = 1;
2042 offset += memdev->header.length;
1973 2043
1974 /* mem-region15 (spa0, dimm4) */ 2044 /* mem-region15 (spa11, dimm4) */
1975 memdev = nfit_buf + offset + 2045 memdev = nfit_buf + offset;
1976 sizeof(struct acpi_nfit_memory_map);
1977 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 2046 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1978 memdev->header.length = sizeof(*memdev); 2047 memdev->header.length = sizeof(*memdev);
1979 memdev->device_handle = handle[4]; 2048 memdev->device_handle = handle[4];
@@ -1987,10 +2056,10 @@ static void nfit_test0_setup(struct nfit_test *t)
1987 memdev->interleave_index = 0; 2056 memdev->interleave_index = 0;
1988 memdev->interleave_ways = 1; 2057 memdev->interleave_ways = 1;
1989 memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED; 2058 memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
2059 offset += memdev->header.length;
1990 2060
1991 /* mem-region16 (spa/bdw4, dimm4) */ 2061 /* mem-region16 (spa/bdw4, dimm4) */
1992 memdev = nfit_buf + offset + 2062 memdev = nfit_buf + offset;
1993 sizeof(struct acpi_nfit_memory_map) * 2;
1994 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 2063 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1995 memdev->header.length = sizeof(*memdev); 2064 memdev->header.length = sizeof(*memdev);
1996 memdev->device_handle = handle[4]; 2065 memdev->device_handle = handle[4];
@@ -2003,8 +2072,8 @@ static void nfit_test0_setup(struct nfit_test *t)
2003 memdev->address = 0; 2072 memdev->address = 0;
2004 memdev->interleave_index = 0; 2073 memdev->interleave_index = 0;
2005 memdev->interleave_ways = 1; 2074 memdev->interleave_ways = 1;
2075 offset += memdev->header.length;
2006 2076
2007 offset = offset + sizeof(struct acpi_nfit_memory_map) * 3;
2008 /* flush3 (dimm4) */ 2077 /* flush3 (dimm4) */
2009 flush = nfit_buf + offset; 2078 flush = nfit_buf + offset;
2010 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS; 2079 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
@@ -2014,8 +2083,14 @@ static void nfit_test0_setup(struct nfit_test *t)
2014 for (i = 0; i < NUM_HINTS; i++) 2083 for (i = 0; i < NUM_HINTS; i++)
2015 flush->hint_address[i] = t->flush_dma[4] 2084 flush->hint_address[i] = t->flush_dma[4]
2016 + i * sizeof(u64); 2085 + i * sizeof(u64);
2086 offset += flush->header.length;
2087
2088 /* sanity check to make sure we've filled the buffer */
2089 WARN_ON(offset != t->nfit_size);
2017 } 2090 }
2018 2091
2092 t->nfit_filled = offset;
2093
2019 post_ars_status(&t->ars_state, &t->badrange, t->spa_set_dma[0], 2094 post_ars_status(&t->ars_state, &t->badrange, t->spa_set_dma[0],
2020 SPA0_SIZE); 2095 SPA0_SIZE);
2021 2096
@@ -2026,6 +2101,7 @@ static void nfit_test0_setup(struct nfit_test *t)
2026 set_bit(ND_INTEL_SMART, &acpi_desc->dimm_cmd_force_en); 2101 set_bit(ND_INTEL_SMART, &acpi_desc->dimm_cmd_force_en);
2027 set_bit(ND_INTEL_SMART_THRESHOLD, &acpi_desc->dimm_cmd_force_en); 2102 set_bit(ND_INTEL_SMART_THRESHOLD, &acpi_desc->dimm_cmd_force_en);
2028 set_bit(ND_INTEL_SMART_SET_THRESHOLD, &acpi_desc->dimm_cmd_force_en); 2103 set_bit(ND_INTEL_SMART_SET_THRESHOLD, &acpi_desc->dimm_cmd_force_en);
2104 set_bit(ND_INTEL_SMART_INJECT, &acpi_desc->dimm_cmd_force_en);
2029 set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_cmd_force_en); 2105 set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_cmd_force_en);
2030 set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en); 2106 set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en);
2031 set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en); 2107 set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en);
@@ -2061,17 +2137,18 @@ static void nfit_test1_setup(struct nfit_test *t)
2061 spa->range_index = 0+1; 2137 spa->range_index = 0+1;
2062 spa->address = t->spa_set_dma[0]; 2138 spa->address = t->spa_set_dma[0];
2063 spa->length = SPA2_SIZE; 2139 spa->length = SPA2_SIZE;
2140 offset += spa->header.length;
2064 2141
2065 /* virtual cd region */ 2142 /* virtual cd region */
2066 spa = nfit_buf + sizeof(*spa); 2143 spa = nfit_buf + offset;
2067 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 2144 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
2068 spa->header.length = sizeof(*spa); 2145 spa->header.length = sizeof(*spa);
2069 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_VCD), 16); 2146 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_VCD), 16);
2070 spa->range_index = 0; 2147 spa->range_index = 0;
2071 spa->address = t->spa_set_dma[1]; 2148 spa->address = t->spa_set_dma[1];
2072 spa->length = SPA_VCD_SIZE; 2149 spa->length = SPA_VCD_SIZE;
2150 offset += spa->header.length;
2073 2151
2074 offset += sizeof(*spa) * 2;
2075 /* mem-region0 (spa0, dimm0) */ 2152 /* mem-region0 (spa0, dimm0) */
2076 memdev = nfit_buf + offset; 2153 memdev = nfit_buf + offset;
2077 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 2154 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
@@ -2089,8 +2166,8 @@ static void nfit_test1_setup(struct nfit_test *t)
2089 memdev->flags = ACPI_NFIT_MEM_SAVE_FAILED | ACPI_NFIT_MEM_RESTORE_FAILED 2166 memdev->flags = ACPI_NFIT_MEM_SAVE_FAILED | ACPI_NFIT_MEM_RESTORE_FAILED
2090 | ACPI_NFIT_MEM_FLUSH_FAILED | ACPI_NFIT_MEM_HEALTH_OBSERVED 2167 | ACPI_NFIT_MEM_FLUSH_FAILED | ACPI_NFIT_MEM_HEALTH_OBSERVED
2091 | ACPI_NFIT_MEM_NOT_ARMED; 2168 | ACPI_NFIT_MEM_NOT_ARMED;
2169 offset += memdev->header.length;
2092 2170
2093 offset += sizeof(*memdev);
2094 /* dcr-descriptor0 */ 2171 /* dcr-descriptor0 */
2095 dcr = nfit_buf + offset; 2172 dcr = nfit_buf + offset;
2096 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 2173 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
@@ -2101,8 +2178,8 @@ static void nfit_test1_setup(struct nfit_test *t)
2101 dcr->serial_number = ~handle[5]; 2178 dcr->serial_number = ~handle[5];
2102 dcr->code = NFIT_FIC_BYTE; 2179 dcr->code = NFIT_FIC_BYTE;
2103 dcr->windows = 0; 2180 dcr->windows = 0;
2104
2105 offset += dcr->header.length; 2181 offset += dcr->header.length;
2182
2106 memdev = nfit_buf + offset; 2183 memdev = nfit_buf + offset;
2107 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 2184 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
2108 memdev->header.length = sizeof(*memdev); 2185 memdev->header.length = sizeof(*memdev);
@@ -2117,9 +2194,9 @@ static void nfit_test1_setup(struct nfit_test *t)
2117 memdev->interleave_index = 0; 2194 memdev->interleave_index = 0;
2118 memdev->interleave_ways = 1; 2195 memdev->interleave_ways = 1;
2119 memdev->flags = ACPI_NFIT_MEM_MAP_FAILED; 2196 memdev->flags = ACPI_NFIT_MEM_MAP_FAILED;
2197 offset += memdev->header.length;
2120 2198
2121 /* dcr-descriptor1 */ 2199 /* dcr-descriptor1 */
2122 offset += sizeof(*memdev);
2123 dcr = nfit_buf + offset; 2200 dcr = nfit_buf + offset;
2124 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 2201 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
2125 dcr->header.length = offsetof(struct acpi_nfit_control_region, 2202 dcr->header.length = offsetof(struct acpi_nfit_control_region,
@@ -2129,6 +2206,12 @@ static void nfit_test1_setup(struct nfit_test *t)
2129 dcr->serial_number = ~handle[6]; 2206 dcr->serial_number = ~handle[6];
2130 dcr->code = NFIT_FIC_BYTE; 2207 dcr->code = NFIT_FIC_BYTE;
2131 dcr->windows = 0; 2208 dcr->windows = 0;
2209 offset += dcr->header.length;
2210
2211 /* sanity check to make sure we've filled the buffer */
2212 WARN_ON(offset != t->nfit_size);
2213
2214 t->nfit_filled = offset;
2132 2215
2133 post_ars_status(&t->ars_state, &t->badrange, t->spa_set_dma[0], 2216 post_ars_status(&t->ars_state, &t->badrange, t->spa_set_dma[0],
2134 SPA2_SIZE); 2217 SPA2_SIZE);
@@ -2487,7 +2570,7 @@ static int nfit_test_probe(struct platform_device *pdev)
2487 nd_desc->ndctl = nfit_test_ctl; 2570 nd_desc->ndctl = nfit_test_ctl;
2488 2571
2489 rc = acpi_nfit_init(acpi_desc, nfit_test->nfit_buf, 2572 rc = acpi_nfit_init(acpi_desc, nfit_test->nfit_buf,
2490 nfit_test->nfit_size); 2573 nfit_test->nfit_filled);
2491 if (rc) 2574 if (rc)
2492 return rc; 2575 return rc;
2493 2576
diff --git a/tools/testing/nvdimm/test/nfit_test.h b/tools/testing/nvdimm/test/nfit_test.h
index 428344519cdf..33752e06ff8d 100644
--- a/tools/testing/nvdimm/test/nfit_test.h
+++ b/tools/testing/nvdimm/test/nfit_test.h
@@ -93,6 +93,7 @@ struct nd_cmd_ars_err_inj_stat {
93#define ND_INTEL_FW_FINISH_UPDATE 15 93#define ND_INTEL_FW_FINISH_UPDATE 15
94#define ND_INTEL_FW_FINISH_QUERY 16 94#define ND_INTEL_FW_FINISH_QUERY 16
95#define ND_INTEL_SMART_SET_THRESHOLD 17 95#define ND_INTEL_SMART_SET_THRESHOLD 17
96#define ND_INTEL_SMART_INJECT 18
96 97
97#define ND_INTEL_SMART_HEALTH_VALID (1 << 0) 98#define ND_INTEL_SMART_HEALTH_VALID (1 << 0)
98#define ND_INTEL_SMART_SPARES_VALID (1 << 1) 99#define ND_INTEL_SMART_SPARES_VALID (1 << 1)
@@ -111,6 +112,10 @@ struct nd_cmd_ars_err_inj_stat {
111#define ND_INTEL_SMART_NON_CRITICAL_HEALTH (1 << 0) 112#define ND_INTEL_SMART_NON_CRITICAL_HEALTH (1 << 0)
112#define ND_INTEL_SMART_CRITICAL_HEALTH (1 << 1) 113#define ND_INTEL_SMART_CRITICAL_HEALTH (1 << 1)
113#define ND_INTEL_SMART_FATAL_HEALTH (1 << 2) 114#define ND_INTEL_SMART_FATAL_HEALTH (1 << 2)
115#define ND_INTEL_SMART_INJECT_MTEMP (1 << 0)
116#define ND_INTEL_SMART_INJECT_SPARE (1 << 1)
117#define ND_INTEL_SMART_INJECT_FATAL (1 << 2)
118#define ND_INTEL_SMART_INJECT_SHUTDOWN (1 << 3)
114 119
115struct nd_intel_smart { 120struct nd_intel_smart {
116 __u32 status; 121 __u32 status;
@@ -158,6 +163,17 @@ struct nd_intel_smart_set_threshold {
158 __u32 status; 163 __u32 status;
159} __packed; 164} __packed;
160 165
166struct nd_intel_smart_inject {
167 __u64 flags;
168 __u8 mtemp_enable;
169 __u16 media_temperature;
170 __u8 spare_enable;
171 __u8 spares;
172 __u8 fatal_enable;
173 __u8 unsafe_shutdown_enable;
174 __u32 status;
175} __packed;
176
161#define INTEL_FW_STORAGE_SIZE 0x100000 177#define INTEL_FW_STORAGE_SIZE 0x100000
162#define INTEL_FW_MAX_SEND_LEN 0xFFEC 178#define INTEL_FW_MAX_SEND_LEN 0xFFEC
163#define INTEL_FW_QUERY_INTERVAL 250000 179#define INTEL_FW_QUERY_INTERVAL 250000