aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS17
-rw-r--r--drivers/acpi/nfit/core.c185
-rw-r--r--drivers/acpi/nfit/nfit.h17
-rw-r--r--drivers/dax/super.c38
-rw-r--r--drivers/nvdimm/btt.c33
-rw-r--r--drivers/nvdimm/btt.h2
-rw-r--r--drivers/nvdimm/btt_devs.c8
-rw-r--r--drivers/nvdimm/dimm_devs.c7
-rw-r--r--drivers/nvdimm/label.c26
-rw-r--r--drivers/nvdimm/namespace_devs.c18
-rw-r--r--drivers/nvdimm/of_pmem.c1
-rw-r--r--drivers/nvdimm/pfn_devs.c24
-rw-r--r--drivers/nvdimm/region_devs.c7
-rw-r--r--include/linux/libnvdimm.h2
-rw-r--r--include/uapi/linux/ndctl.h1
15 files changed, 273 insertions, 113 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 4f004dc0a0f2..f835946d78ce 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4643,10 +4643,11 @@ S: Maintained
4643F: drivers/i2c/busses/i2c-diolan-u2c.c 4643F: drivers/i2c/busses/i2c-diolan-u2c.c
4644 4644
4645FILESYSTEM DIRECT ACCESS (DAX) 4645FILESYSTEM DIRECT ACCESS (DAX)
4646M: Matthew Wilcox <willy@infradead.org> 4646M: Dan Williams <dan.j.williams@intel.com>
4647M: Ross Zwisler <zwisler@kernel.org> 4647R: Matthew Wilcox <willy@infradead.org>
4648M: Jan Kara <jack@suse.cz> 4648R: Jan Kara <jack@suse.cz>
4649L: linux-fsdevel@vger.kernel.org 4649L: linux-fsdevel@vger.kernel.org
4650L: linux-nvdimm@lists.01.org
4650S: Supported 4651S: Supported
4651F: fs/dax.c 4652F: fs/dax.c
4652F: include/linux/dax.h 4653F: include/linux/dax.h
@@ -4654,9 +4655,9 @@ F: include/trace/events/fs_dax.h
4654 4655
4655DEVICE DIRECT ACCESS (DAX) 4656DEVICE DIRECT ACCESS (DAX)
4656M: Dan Williams <dan.j.williams@intel.com> 4657M: Dan Williams <dan.j.williams@intel.com>
4657M: Dave Jiang <dave.jiang@intel.com>
4658M: Ross Zwisler <zwisler@kernel.org>
4659M: Vishal Verma <vishal.l.verma@intel.com> 4658M: Vishal Verma <vishal.l.verma@intel.com>
4659M: Keith Busch <keith.busch@intel.com>
4660M: Dave Jiang <dave.jiang@intel.com>
4660L: linux-nvdimm@lists.01.org 4661L: linux-nvdimm@lists.01.org
4661S: Supported 4662S: Supported
4662F: drivers/dax/ 4663F: drivers/dax/
@@ -8812,7 +8813,6 @@ S: Maintained
8812F: tools/lib/lockdep/ 8813F: tools/lib/lockdep/
8813 8814
8814LIBNVDIMM BLK: MMIO-APERTURE DRIVER 8815LIBNVDIMM BLK: MMIO-APERTURE DRIVER
8815M: Ross Zwisler <zwisler@kernel.org>
8816M: Dan Williams <dan.j.williams@intel.com> 8816M: Dan Williams <dan.j.williams@intel.com>
8817M: Vishal Verma <vishal.l.verma@intel.com> 8817M: Vishal Verma <vishal.l.verma@intel.com>
8818M: Dave Jiang <dave.jiang@intel.com> 8818M: Dave Jiang <dave.jiang@intel.com>
@@ -8825,7 +8825,6 @@ F: drivers/nvdimm/region_devs.c
8825LIBNVDIMM BTT: BLOCK TRANSLATION TABLE 8825LIBNVDIMM BTT: BLOCK TRANSLATION TABLE
8826M: Vishal Verma <vishal.l.verma@intel.com> 8826M: Vishal Verma <vishal.l.verma@intel.com>
8827M: Dan Williams <dan.j.williams@intel.com> 8827M: Dan Williams <dan.j.williams@intel.com>
8828M: Ross Zwisler <zwisler@kernel.org>
8829M: Dave Jiang <dave.jiang@intel.com> 8828M: Dave Jiang <dave.jiang@intel.com>
8830L: linux-nvdimm@lists.01.org 8829L: linux-nvdimm@lists.01.org
8831Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ 8830Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
@@ -8833,7 +8832,6 @@ S: Supported
8833F: drivers/nvdimm/btt* 8832F: drivers/nvdimm/btt*
8834 8833
8835LIBNVDIMM PMEM: PERSISTENT MEMORY DRIVER 8834LIBNVDIMM PMEM: PERSISTENT MEMORY DRIVER
8836M: Ross Zwisler <zwisler@kernel.org>
8837M: Dan Williams <dan.j.williams@intel.com> 8835M: Dan Williams <dan.j.williams@intel.com>
8838M: Vishal Verma <vishal.l.verma@intel.com> 8836M: Vishal Verma <vishal.l.verma@intel.com>
8839M: Dave Jiang <dave.jiang@intel.com> 8837M: Dave Jiang <dave.jiang@intel.com>
@@ -8852,9 +8850,10 @@ F: Documentation/devicetree/bindings/pmem/pmem-region.txt
8852 8850
8853LIBNVDIMM: NON-VOLATILE MEMORY DEVICE SUBSYSTEM 8851LIBNVDIMM: NON-VOLATILE MEMORY DEVICE SUBSYSTEM
8854M: Dan Williams <dan.j.williams@intel.com> 8852M: Dan Williams <dan.j.williams@intel.com>
8855M: Ross Zwisler <zwisler@kernel.org>
8856M: Vishal Verma <vishal.l.verma@intel.com> 8853M: Vishal Verma <vishal.l.verma@intel.com>
8857M: Dave Jiang <dave.jiang@intel.com> 8854M: Dave Jiang <dave.jiang@intel.com>
8855M: Keith Busch <keith.busch@intel.com>
8856M: Ira Weiny <ira.weiny@intel.com>
8858L: linux-nvdimm@lists.01.org 8857L: linux-nvdimm@lists.01.org
8859Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ 8858Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
8860T: git git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm.git 8859T: git git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm.git
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index e18ade5d74e9..df8979008dd4 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -55,6 +55,10 @@ static bool no_init_ars;
55module_param(no_init_ars, bool, 0644); 55module_param(no_init_ars, bool, 0644);
56MODULE_PARM_DESC(no_init_ars, "Skip ARS run at nfit init time"); 56MODULE_PARM_DESC(no_init_ars, "Skip ARS run at nfit init time");
57 57
58static bool force_labels;
59module_param(force_labels, bool, 0444);
60MODULE_PARM_DESC(force_labels, "Opt-in to labels despite missing methods");
61
58LIST_HEAD(acpi_descs); 62LIST_HEAD(acpi_descs);
59DEFINE_MUTEX(acpi_desc_lock); 63DEFINE_MUTEX(acpi_desc_lock);
60 64
@@ -415,7 +419,7 @@ static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
415 if (call_pkg) { 419 if (call_pkg) {
416 int i; 420 int i;
417 421
418 if (nfit_mem->family != call_pkg->nd_family) 422 if (nfit_mem && nfit_mem->family != call_pkg->nd_family)
419 return -ENOTTY; 423 return -ENOTTY;
420 424
421 for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++) 425 for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
@@ -424,6 +428,10 @@ static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
424 return call_pkg->nd_command; 428 return call_pkg->nd_command;
425 } 429 }
426 430
431 /* In the !call_pkg case, bus commands == bus functions */
432 if (!nfit_mem)
433 return cmd;
434
427 /* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */ 435 /* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */
428 if (nfit_mem->family == NVDIMM_FAMILY_INTEL) 436 if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
429 return cmd; 437 return cmd;
@@ -454,17 +462,18 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
454 if (cmd_rc) 462 if (cmd_rc)
455 *cmd_rc = -EINVAL; 463 *cmd_rc = -EINVAL;
456 464
465 if (cmd == ND_CMD_CALL)
466 call_pkg = buf;
467 func = cmd_to_func(nfit_mem, cmd, call_pkg);
468 if (func < 0)
469 return func;
470
457 if (nvdimm) { 471 if (nvdimm) {
458 struct acpi_device *adev = nfit_mem->adev; 472 struct acpi_device *adev = nfit_mem->adev;
459 473
460 if (!adev) 474 if (!adev)
461 return -ENOTTY; 475 return -ENOTTY;
462 476
463 if (cmd == ND_CMD_CALL)
464 call_pkg = buf;
465 func = cmd_to_func(nfit_mem, cmd, call_pkg);
466 if (func < 0)
467 return func;
468 dimm_name = nvdimm_name(nvdimm); 477 dimm_name = nvdimm_name(nvdimm);
469 cmd_name = nvdimm_cmd_name(cmd); 478 cmd_name = nvdimm_cmd_name(cmd);
470 cmd_mask = nvdimm_cmd_mask(nvdimm); 479 cmd_mask = nvdimm_cmd_mask(nvdimm);
@@ -475,12 +484,9 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
475 } else { 484 } else {
476 struct acpi_device *adev = to_acpi_dev(acpi_desc); 485 struct acpi_device *adev = to_acpi_dev(acpi_desc);
477 486
478 func = cmd;
479 cmd_name = nvdimm_bus_cmd_name(cmd); 487 cmd_name = nvdimm_bus_cmd_name(cmd);
480 cmd_mask = nd_desc->cmd_mask; 488 cmd_mask = nd_desc->cmd_mask;
481 dsm_mask = cmd_mask; 489 dsm_mask = nd_desc->bus_dsm_mask;
482 if (cmd == ND_CMD_CALL)
483 dsm_mask = nd_desc->bus_dsm_mask;
484 desc = nd_cmd_bus_desc(cmd); 490 desc = nd_cmd_bus_desc(cmd);
485 guid = to_nfit_uuid(NFIT_DEV_BUS); 491 guid = to_nfit_uuid(NFIT_DEV_BUS);
486 handle = adev->handle; 492 handle = adev->handle;
@@ -554,6 +560,13 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
554 return -EINVAL; 560 return -EINVAL;
555 } 561 }
556 562
563 if (out_obj->type != ACPI_TYPE_BUFFER) {
564 dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n",
565 dimm_name, cmd_name, out_obj->type);
566 rc = -EINVAL;
567 goto out;
568 }
569
557 if (call_pkg) { 570 if (call_pkg) {
558 call_pkg->nd_fw_size = out_obj->buffer.length; 571 call_pkg->nd_fw_size = out_obj->buffer.length;
559 memcpy(call_pkg->nd_payload + call_pkg->nd_size_in, 572 memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
@@ -572,13 +585,6 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
572 return 0; 585 return 0;
573 } 586 }
574 587
575 if (out_obj->package.type != ACPI_TYPE_BUFFER) {
576 dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n",
577 dimm_name, cmd_name, out_obj->type);
578 rc = -EINVAL;
579 goto out;
580 }
581
582 dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name, 588 dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name,
583 cmd_name, out_obj->buffer.length); 589 cmd_name, out_obj->buffer.length);
584 print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4, 590 print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4,
@@ -1317,19 +1323,30 @@ static ssize_t scrub_show(struct device *dev,
1317 struct device_attribute *attr, char *buf) 1323 struct device_attribute *attr, char *buf)
1318{ 1324{
1319 struct nvdimm_bus_descriptor *nd_desc; 1325 struct nvdimm_bus_descriptor *nd_desc;
1326 struct acpi_nfit_desc *acpi_desc;
1320 ssize_t rc = -ENXIO; 1327 ssize_t rc = -ENXIO;
1328 bool busy;
1321 1329
1322 device_lock(dev); 1330 device_lock(dev);
1323 nd_desc = dev_get_drvdata(dev); 1331 nd_desc = dev_get_drvdata(dev);
1324 if (nd_desc) { 1332 if (!nd_desc) {
1325 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1333 device_unlock(dev);
1334 return rc;
1335 }
1336 acpi_desc = to_acpi_desc(nd_desc);
1326 1337
1327 mutex_lock(&acpi_desc->init_mutex); 1338 mutex_lock(&acpi_desc->init_mutex);
1328 rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, 1339 busy = test_bit(ARS_BUSY, &acpi_desc->scrub_flags)
1329 acpi_desc->scrub_busy 1340 && !test_bit(ARS_CANCEL, &acpi_desc->scrub_flags);
1330 && !acpi_desc->cancel ? "+\n" : "\n"); 1341 rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, busy ? "+\n" : "\n");
1331 mutex_unlock(&acpi_desc->init_mutex); 1342 /* Allow an admin to poll the busy state at a higher rate */
1343 if (busy && capable(CAP_SYS_RAWIO) && !test_and_set_bit(ARS_POLL,
1344 &acpi_desc->scrub_flags)) {
1345 acpi_desc->scrub_tmo = 1;
1346 mod_delayed_work(nfit_wq, &acpi_desc->dwork, HZ);
1332 } 1347 }
1348
1349 mutex_unlock(&acpi_desc->init_mutex);
1333 device_unlock(dev); 1350 device_unlock(dev);
1334 return rc; 1351 return rc;
1335} 1352}
@@ -1759,14 +1776,14 @@ static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method)
1759 1776
1760__weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem) 1777__weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem)
1761{ 1778{
1779 struct device *dev = &nfit_mem->adev->dev;
1762 struct nd_intel_smart smart = { 0 }; 1780 struct nd_intel_smart smart = { 0 };
1763 union acpi_object in_buf = { 1781 union acpi_object in_buf = {
1764 .type = ACPI_TYPE_BUFFER, 1782 .buffer.type = ACPI_TYPE_BUFFER,
1765 .buffer.pointer = (char *) &smart, 1783 .buffer.length = 0,
1766 .buffer.length = sizeof(smart),
1767 }; 1784 };
1768 union acpi_object in_obj = { 1785 union acpi_object in_obj = {
1769 .type = ACPI_TYPE_PACKAGE, 1786 .package.type = ACPI_TYPE_PACKAGE,
1770 .package.count = 1, 1787 .package.count = 1,
1771 .package.elements = &in_buf, 1788 .package.elements = &in_buf,
1772 }; 1789 };
@@ -1781,8 +1798,15 @@ __weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem)
1781 return; 1798 return;
1782 1799
1783 out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj); 1800 out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj);
1784 if (!out_obj) 1801 if (!out_obj || out_obj->type != ACPI_TYPE_BUFFER
1802 || out_obj->buffer.length < sizeof(smart)) {
1803 dev_dbg(dev->parent, "%s: failed to retrieve initial health\n",
1804 dev_name(dev));
1805 ACPI_FREE(out_obj);
1785 return; 1806 return;
1807 }
1808 memcpy(&smart, out_obj->buffer.pointer, sizeof(smart));
1809 ACPI_FREE(out_obj);
1786 1810
1787 if (smart.flags & ND_INTEL_SMART_SHUTDOWN_VALID) { 1811 if (smart.flags & ND_INTEL_SMART_SHUTDOWN_VALID) {
1788 if (smart.shutdown_state) 1812 if (smart.shutdown_state)
@@ -1793,7 +1817,6 @@ __weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem)
1793 set_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags); 1817 set_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags);
1794 nfit_mem->dirty_shutdown = smart.shutdown_count; 1818 nfit_mem->dirty_shutdown = smart.shutdown_count;
1795 } 1819 }
1796 ACPI_FREE(out_obj);
1797} 1820}
1798 1821
1799static void populate_shutdown_status(struct nfit_mem *nfit_mem) 1822static void populate_shutdown_status(struct nfit_mem *nfit_mem)
@@ -1861,9 +1884,17 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
1861 dev_set_drvdata(&adev_dimm->dev, nfit_mem); 1884 dev_set_drvdata(&adev_dimm->dev, nfit_mem);
1862 1885
1863 /* 1886 /*
1864 * Until standardization materializes we need to consider 4 1887 * There are 4 "legacy" NVDIMM command sets
1865 * different command sets. Note, that checking for function0 (bit0) 1888 * (NVDIMM_FAMILY_{INTEL,MSFT,HPE1,HPE2}) that were created before
1866 * tells us if any commands are reachable through this GUID. 1889 * an EFI working group was established to constrain this
1890 * proliferation. The nfit driver probes for the supported command
1891 * set by GUID. Note, if you're a platform developer looking to add
1892 * a new command set to this probe, consider using an existing set,
1893 * or otherwise seek approval to publish the command set at
1894 * http://www.uefi.org/RFIC_LIST.
1895 *
1896 * Note, that checking for function0 (bit0) tells us if any commands
1897 * are reachable through this GUID.
1867 */ 1898 */
1868 for (i = 0; i <= NVDIMM_FAMILY_MAX; i++) 1899 for (i = 0; i <= NVDIMM_FAMILY_MAX; i++)
1869 if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) 1900 if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
@@ -1886,6 +1917,8 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
1886 dsm_mask &= ~(1 << 8); 1917 dsm_mask &= ~(1 << 8);
1887 } else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) { 1918 } else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) {
1888 dsm_mask = 0xffffffff; 1919 dsm_mask = 0xffffffff;
1920 } else if (nfit_mem->family == NVDIMM_FAMILY_HYPERV) {
1921 dsm_mask = 0x1f;
1889 } else { 1922 } else {
1890 dev_dbg(dev, "unknown dimm command family\n"); 1923 dev_dbg(dev, "unknown dimm command family\n");
1891 nfit_mem->family = -1; 1924 nfit_mem->family = -1;
@@ -1915,18 +1948,32 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
1915 | 1 << ND_CMD_SET_CONFIG_DATA; 1948 | 1 << ND_CMD_SET_CONFIG_DATA;
1916 if (family == NVDIMM_FAMILY_INTEL 1949 if (family == NVDIMM_FAMILY_INTEL
1917 && (dsm_mask & label_mask) == label_mask) 1950 && (dsm_mask & label_mask) == label_mask)
1918 return 0; 1951 /* skip _LS{I,R,W} enabling */;
1952 else {
1953 if (acpi_nvdimm_has_method(adev_dimm, "_LSI")
1954 && acpi_nvdimm_has_method(adev_dimm, "_LSR")) {
1955 dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev));
1956 set_bit(NFIT_MEM_LSR, &nfit_mem->flags);
1957 }
1919 1958
1920 if (acpi_nvdimm_has_method(adev_dimm, "_LSI") 1959 if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)
1921 && acpi_nvdimm_has_method(adev_dimm, "_LSR")) { 1960 && acpi_nvdimm_has_method(adev_dimm, "_LSW")) {
1922 dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev)); 1961 dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev));
1923 set_bit(NFIT_MEM_LSR, &nfit_mem->flags); 1962 set_bit(NFIT_MEM_LSW, &nfit_mem->flags);
1924 } 1963 }
1925 1964
1926 if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags) 1965 /*
1927 && acpi_nvdimm_has_method(adev_dimm, "_LSW")) { 1966 * Quirk read-only label configurations to preserve
1928 dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev)); 1967 * access to label-less namespaces by default.
1929 set_bit(NFIT_MEM_LSW, &nfit_mem->flags); 1968 */
1969 if (!test_bit(NFIT_MEM_LSW, &nfit_mem->flags)
1970 && !force_labels) {
1971 dev_dbg(dev, "%s: No _LSW, disable labels\n",
1972 dev_name(&adev_dimm->dev));
1973 clear_bit(NFIT_MEM_LSR, &nfit_mem->flags);
1974 } else
1975 dev_dbg(dev, "%s: Force enable labels\n",
1976 dev_name(&adev_dimm->dev));
1930 } 1977 }
1931 1978
1932 populate_shutdown_status(nfit_mem); 1979 populate_shutdown_status(nfit_mem);
@@ -2027,6 +2074,10 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
2027 cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK; 2074 cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK;
2028 } 2075 }
2029 2076
2077 /* Quirk to ignore LOCAL for labels on HYPERV DIMMs */
2078 if (nfit_mem->family == NVDIMM_FAMILY_HYPERV)
2079 set_bit(NDD_NOBLK, &flags);
2080
2030 if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) { 2081 if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) {
2031 set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask); 2082 set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
2032 set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask); 2083 set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
@@ -2050,7 +2101,7 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
2050 if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0) 2101 if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
2051 continue; 2102 continue;
2052 2103
2053 dev_info(acpi_desc->dev, "%s flags:%s%s%s%s%s\n", 2104 dev_err(acpi_desc->dev, "Error found in NVDIMM %s flags:%s%s%s%s%s\n",
2054 nvdimm_name(nvdimm), 2105 nvdimm_name(nvdimm),
2055 mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "", 2106 mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
2056 mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"", 2107 mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
@@ -2641,7 +2692,10 @@ static int ars_start(struct acpi_nfit_desc *acpi_desc,
2641 2692
2642 if (rc < 0) 2693 if (rc < 0)
2643 return rc; 2694 return rc;
2644 return cmd_rc; 2695 if (cmd_rc < 0)
2696 return cmd_rc;
2697 set_bit(ARS_VALID, &acpi_desc->scrub_flags);
2698 return 0;
2645} 2699}
2646 2700
2647static int ars_continue(struct acpi_nfit_desc *acpi_desc) 2701static int ars_continue(struct acpi_nfit_desc *acpi_desc)
@@ -2651,11 +2705,11 @@ static int ars_continue(struct acpi_nfit_desc *acpi_desc)
2651 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 2705 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2652 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; 2706 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2653 2707
2654 memset(&ars_start, 0, sizeof(ars_start)); 2708 ars_start = (struct nd_cmd_ars_start) {
2655 ars_start.address = ars_status->restart_address; 2709 .address = ars_status->restart_address,
2656 ars_start.length = ars_status->restart_length; 2710 .length = ars_status->restart_length,
2657 ars_start.type = ars_status->type; 2711 .type = ars_status->type,
2658 ars_start.flags = acpi_desc->ars_start_flags; 2712 };
2659 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start, 2713 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
2660 sizeof(ars_start), &cmd_rc); 2714 sizeof(ars_start), &cmd_rc);
2661 if (rc < 0) 2715 if (rc < 0)
@@ -2734,6 +2788,17 @@ static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc)
2734 */ 2788 */
2735 if (ars_status->out_length < 44) 2789 if (ars_status->out_length < 44)
2736 return 0; 2790 return 0;
2791
2792 /*
2793 * Ignore potentially stale results that are only refreshed
2794 * after a start-ARS event.
2795 */
2796 if (!test_and_clear_bit(ARS_VALID, &acpi_desc->scrub_flags)) {
2797 dev_dbg(acpi_desc->dev, "skip %d stale records\n",
2798 ars_status->num_records);
2799 return 0;
2800 }
2801
2737 for (i = 0; i < ars_status->num_records; i++) { 2802 for (i = 0; i < ars_status->num_records; i++) {
2738 /* only process full records */ 2803 /* only process full records */
2739 if (ars_status->out_length 2804 if (ars_status->out_length
@@ -3004,14 +3069,16 @@ static int ars_register(struct acpi_nfit_desc *acpi_desc,
3004{ 3069{
3005 int rc; 3070 int rc;
3006 3071
3007 if (no_init_ars || test_bit(ARS_FAILED, &nfit_spa->ars_state)) 3072 if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
3008 return acpi_nfit_register_region(acpi_desc, nfit_spa); 3073 return acpi_nfit_register_region(acpi_desc, nfit_spa);
3009 3074
3010 set_bit(ARS_REQ_SHORT, &nfit_spa->ars_state); 3075 set_bit(ARS_REQ_SHORT, &nfit_spa->ars_state);
3011 set_bit(ARS_REQ_LONG, &nfit_spa->ars_state); 3076 if (!no_init_ars)
3077 set_bit(ARS_REQ_LONG, &nfit_spa->ars_state);
3012 3078
3013 switch (acpi_nfit_query_poison(acpi_desc)) { 3079 switch (acpi_nfit_query_poison(acpi_desc)) {
3014 case 0: 3080 case 0:
3081 case -ENOSPC:
3015 case -EAGAIN: 3082 case -EAGAIN:
3016 rc = ars_start(acpi_desc, nfit_spa, ARS_REQ_SHORT); 3083 rc = ars_start(acpi_desc, nfit_spa, ARS_REQ_SHORT);
3017 /* shouldn't happen, try again later */ 3084 /* shouldn't happen, try again later */
@@ -3036,7 +3103,6 @@ static int ars_register(struct acpi_nfit_desc *acpi_desc,
3036 break; 3103 break;
3037 case -EBUSY: 3104 case -EBUSY:
3038 case -ENOMEM: 3105 case -ENOMEM:
3039 case -ENOSPC:
3040 /* 3106 /*
3041 * BIOS was using ARS, wait for it to complete (or 3107 * BIOS was using ARS, wait for it to complete (or
3042 * resources to become available) and then perform our 3108 * resources to become available) and then perform our
@@ -3071,7 +3137,7 @@ static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc,
3071 3137
3072 lockdep_assert_held(&acpi_desc->init_mutex); 3138 lockdep_assert_held(&acpi_desc->init_mutex);
3073 3139
3074 if (acpi_desc->cancel) 3140 if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags))
3075 return 0; 3141 return 0;
3076 3142
3077 if (query_rc == -EBUSY) { 3143 if (query_rc == -EBUSY) {
@@ -3145,7 +3211,7 @@ static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo)
3145{ 3211{
3146 lockdep_assert_held(&acpi_desc->init_mutex); 3212 lockdep_assert_held(&acpi_desc->init_mutex);
3147 3213
3148 acpi_desc->scrub_busy = 1; 3214 set_bit(ARS_BUSY, &acpi_desc->scrub_flags);
3149 /* note this should only be set from within the workqueue */ 3215 /* note this should only be set from within the workqueue */
3150 if (tmo) 3216 if (tmo)
3151 acpi_desc->scrub_tmo = tmo; 3217 acpi_desc->scrub_tmo = tmo;
@@ -3161,7 +3227,7 @@ static void notify_ars_done(struct acpi_nfit_desc *acpi_desc)
3161{ 3227{
3162 lockdep_assert_held(&acpi_desc->init_mutex); 3228 lockdep_assert_held(&acpi_desc->init_mutex);
3163 3229
3164 acpi_desc->scrub_busy = 0; 3230 clear_bit(ARS_BUSY, &acpi_desc->scrub_flags);
3165 acpi_desc->scrub_count++; 3231 acpi_desc->scrub_count++;
3166 if (acpi_desc->scrub_count_state) 3232 if (acpi_desc->scrub_count_state)
3167 sysfs_notify_dirent(acpi_desc->scrub_count_state); 3233 sysfs_notify_dirent(acpi_desc->scrub_count_state);
@@ -3182,6 +3248,7 @@ static void acpi_nfit_scrub(struct work_struct *work)
3182 else 3248 else
3183 notify_ars_done(acpi_desc); 3249 notify_ars_done(acpi_desc);
3184 memset(acpi_desc->ars_status, 0, acpi_desc->max_ars); 3250 memset(acpi_desc->ars_status, 0, acpi_desc->max_ars);
3251 clear_bit(ARS_POLL, &acpi_desc->scrub_flags);
3185 mutex_unlock(&acpi_desc->init_mutex); 3252 mutex_unlock(&acpi_desc->init_mutex);
3186} 3253}
3187 3254
@@ -3216,6 +3283,7 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
3216 struct nfit_spa *nfit_spa; 3283 struct nfit_spa *nfit_spa;
3217 int rc; 3284 int rc;
3218 3285
3286 set_bit(ARS_VALID, &acpi_desc->scrub_flags);
3219 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 3287 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
3220 switch (nfit_spa_type(nfit_spa->spa)) { 3288 switch (nfit_spa_type(nfit_spa->spa)) {
3221 case NFIT_SPA_VOLATILE: 3289 case NFIT_SPA_VOLATILE:
@@ -3450,7 +3518,7 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc,
3450 struct nfit_spa *nfit_spa; 3518 struct nfit_spa *nfit_spa;
3451 3519
3452 mutex_lock(&acpi_desc->init_mutex); 3520 mutex_lock(&acpi_desc->init_mutex);
3453 if (acpi_desc->cancel) { 3521 if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags)) {
3454 mutex_unlock(&acpi_desc->init_mutex); 3522 mutex_unlock(&acpi_desc->init_mutex);
3455 return 0; 3523 return 0;
3456 } 3524 }
@@ -3529,7 +3597,7 @@ void acpi_nfit_shutdown(void *data)
3529 mutex_unlock(&acpi_desc_lock); 3597 mutex_unlock(&acpi_desc_lock);
3530 3598
3531 mutex_lock(&acpi_desc->init_mutex); 3599 mutex_lock(&acpi_desc->init_mutex);
3532 acpi_desc->cancel = 1; 3600 set_bit(ARS_CANCEL, &acpi_desc->scrub_flags);
3533 cancel_delayed_work_sync(&acpi_desc->dwork); 3601 cancel_delayed_work_sync(&acpi_desc->dwork);
3534 mutex_unlock(&acpi_desc->init_mutex); 3602 mutex_unlock(&acpi_desc->init_mutex);
3535 3603
@@ -3729,6 +3797,7 @@ static __init int nfit_init(void)
3729 guid_parse(UUID_NFIT_DIMM_N_HPE1, &nfit_uuid[NFIT_DEV_DIMM_N_HPE1]); 3797 guid_parse(UUID_NFIT_DIMM_N_HPE1, &nfit_uuid[NFIT_DEV_DIMM_N_HPE1]);
3730 guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]); 3798 guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
3731 guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]); 3799 guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]);
3800 guid_parse(UUID_NFIT_DIMM_N_HYPERV, &nfit_uuid[NFIT_DEV_DIMM_N_HYPERV]);
3732 3801
3733 nfit_wq = create_singlethread_workqueue("nfit"); 3802 nfit_wq = create_singlethread_workqueue("nfit");
3734 if (!nfit_wq) 3803 if (!nfit_wq)
diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
index 33691aecfcee..2f8cf2a11e3b 100644
--- a/drivers/acpi/nfit/nfit.h
+++ b/drivers/acpi/nfit/nfit.h
@@ -34,11 +34,14 @@
34/* https://msdn.microsoft.com/library/windows/hardware/mt604741 */ 34/* https://msdn.microsoft.com/library/windows/hardware/mt604741 */
35#define UUID_NFIT_DIMM_N_MSFT "1ee68b36-d4bd-4a1a-9a16-4f8e53d46e05" 35#define UUID_NFIT_DIMM_N_MSFT "1ee68b36-d4bd-4a1a-9a16-4f8e53d46e05"
36 36
37/* http://www.uefi.org/RFIC_LIST (see "Virtual NVDIMM 0x1901") */
38#define UUID_NFIT_DIMM_N_HYPERV "5746c5f2-a9a2-4264-ad0e-e4ddc9e09e80"
39
37#define ACPI_NFIT_MEM_FAILED_MASK (ACPI_NFIT_MEM_SAVE_FAILED \ 40#define ACPI_NFIT_MEM_FAILED_MASK (ACPI_NFIT_MEM_SAVE_FAILED \
38 | ACPI_NFIT_MEM_RESTORE_FAILED | ACPI_NFIT_MEM_FLUSH_FAILED \ 41 | ACPI_NFIT_MEM_RESTORE_FAILED | ACPI_NFIT_MEM_FLUSH_FAILED \
39 | ACPI_NFIT_MEM_NOT_ARMED | ACPI_NFIT_MEM_MAP_FAILED) 42 | ACPI_NFIT_MEM_NOT_ARMED | ACPI_NFIT_MEM_MAP_FAILED)
40 43
41#define NVDIMM_FAMILY_MAX NVDIMM_FAMILY_MSFT 44#define NVDIMM_FAMILY_MAX NVDIMM_FAMILY_HYPERV
42 45
43#define NVDIMM_STANDARD_CMDMASK \ 46#define NVDIMM_STANDARD_CMDMASK \
44(1 << ND_CMD_SMART | 1 << ND_CMD_SMART_THRESHOLD | 1 << ND_CMD_DIMM_FLAGS \ 47(1 << ND_CMD_SMART | 1 << ND_CMD_SMART_THRESHOLD | 1 << ND_CMD_DIMM_FLAGS \
@@ -94,6 +97,7 @@ enum nfit_uuids {
94 NFIT_DEV_DIMM_N_HPE1 = NVDIMM_FAMILY_HPE1, 97 NFIT_DEV_DIMM_N_HPE1 = NVDIMM_FAMILY_HPE1,
95 NFIT_DEV_DIMM_N_HPE2 = NVDIMM_FAMILY_HPE2, 98 NFIT_DEV_DIMM_N_HPE2 = NVDIMM_FAMILY_HPE2,
96 NFIT_DEV_DIMM_N_MSFT = NVDIMM_FAMILY_MSFT, 99 NFIT_DEV_DIMM_N_MSFT = NVDIMM_FAMILY_MSFT,
100 NFIT_DEV_DIMM_N_HYPERV = NVDIMM_FAMILY_HYPERV,
97 NFIT_SPA_VOLATILE, 101 NFIT_SPA_VOLATILE,
98 NFIT_SPA_PM, 102 NFIT_SPA_PM,
99 NFIT_SPA_DCR, 103 NFIT_SPA_DCR,
@@ -210,6 +214,13 @@ struct nfit_mem {
210 int family; 214 int family;
211}; 215};
212 216
217enum scrub_flags {
218 ARS_BUSY,
219 ARS_CANCEL,
220 ARS_VALID,
221 ARS_POLL,
222};
223
213struct acpi_nfit_desc { 224struct acpi_nfit_desc {
214 struct nvdimm_bus_descriptor nd_desc; 225 struct nvdimm_bus_descriptor nd_desc;
215 struct acpi_table_header acpi_header; 226 struct acpi_table_header acpi_header;
@@ -223,7 +234,6 @@ struct acpi_nfit_desc {
223 struct list_head idts; 234 struct list_head idts;
224 struct nvdimm_bus *nvdimm_bus; 235 struct nvdimm_bus *nvdimm_bus;
225 struct device *dev; 236 struct device *dev;
226 u8 ars_start_flags;
227 struct nd_cmd_ars_status *ars_status; 237 struct nd_cmd_ars_status *ars_status;
228 struct nfit_spa *scrub_spa; 238 struct nfit_spa *scrub_spa;
229 struct delayed_work dwork; 239 struct delayed_work dwork;
@@ -232,8 +242,7 @@ struct acpi_nfit_desc {
232 unsigned int max_ars; 242 unsigned int max_ars;
233 unsigned int scrub_count; 243 unsigned int scrub_count;
234 unsigned int scrub_mode; 244 unsigned int scrub_mode;
235 unsigned int scrub_busy:1; 245 unsigned long scrub_flags;
236 unsigned int cancel:1;
237 unsigned long dimm_cmd_force_en; 246 unsigned long dimm_cmd_force_en;
238 unsigned long bus_cmd_force_en; 247 unsigned long bus_cmd_force_en;
239 unsigned long bus_nfit_cmd_force_en; 248 unsigned long bus_nfit_cmd_force_en;
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 6e928f37d084..0cb8c30ea278 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -86,12 +86,14 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
86{ 86{
87 struct dax_device *dax_dev; 87 struct dax_device *dax_dev;
88 bool dax_enabled = false; 88 bool dax_enabled = false;
89 pgoff_t pgoff, pgoff_end;
89 struct request_queue *q; 90 struct request_queue *q;
90 pgoff_t pgoff;
91 int err, id;
92 pfn_t pfn;
93 long len;
94 char buf[BDEVNAME_SIZE]; 91 char buf[BDEVNAME_SIZE];
92 void *kaddr, *end_kaddr;
93 pfn_t pfn, end_pfn;
94 sector_t last_page;
95 long len, len2;
96 int err, id;
95 97
96 if (blocksize != PAGE_SIZE) { 98 if (blocksize != PAGE_SIZE) {
97 pr_debug("%s: error: unsupported blocksize for dax\n", 99 pr_debug("%s: error: unsupported blocksize for dax\n",
@@ -113,6 +115,14 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
113 return false; 115 return false;
114 } 116 }
115 117
118 last_page = PFN_DOWN(i_size_read(bdev->bd_inode) - 1) * 8;
119 err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end);
120 if (err) {
121 pr_debug("%s: error: unaligned partition for dax\n",
122 bdevname(bdev, buf));
123 return false;
124 }
125
116 dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 126 dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
117 if (!dax_dev) { 127 if (!dax_dev) {
118 pr_debug("%s: error: device does not support dax\n", 128 pr_debug("%s: error: device does not support dax\n",
@@ -121,14 +131,15 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
121 } 131 }
122 132
123 id = dax_read_lock(); 133 id = dax_read_lock();
124 len = dax_direct_access(dax_dev, pgoff, 1, NULL, &pfn); 134 len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
135 len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn);
125 dax_read_unlock(id); 136 dax_read_unlock(id);
126 137
127 put_dax(dax_dev); 138 put_dax(dax_dev);
128 139
129 if (len < 1) { 140 if (len < 1 || len2 < 1) {
130 pr_debug("%s: error: dax access failed (%ld)\n", 141 pr_debug("%s: error: dax access failed (%ld)\n",
131 bdevname(bdev, buf), len); 142 bdevname(bdev, buf), len < 1 ? len : len2);
132 return false; 143 return false;
133 } 144 }
134 145
@@ -143,13 +154,20 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
143 */ 154 */
144 WARN_ON(IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API)); 155 WARN_ON(IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API));
145 dax_enabled = true; 156 dax_enabled = true;
146 } else if (pfn_t_devmap(pfn)) { 157 } else if (pfn_t_devmap(pfn) && pfn_t_devmap(end_pfn)) {
147 struct dev_pagemap *pgmap; 158 struct dev_pagemap *pgmap, *end_pgmap;
148 159
149 pgmap = get_dev_pagemap(pfn_t_to_pfn(pfn), NULL); 160 pgmap = get_dev_pagemap(pfn_t_to_pfn(pfn), NULL);
150 if (pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX) 161 end_pgmap = get_dev_pagemap(pfn_t_to_pfn(end_pfn), NULL);
162 if (pgmap && pgmap == end_pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX
163 && pfn_t_to_page(pfn)->pgmap == pgmap
164 && pfn_t_to_page(end_pfn)->pgmap == pgmap
165 && pfn_t_to_pfn(pfn) == PHYS_PFN(__pa(kaddr))
166 && pfn_t_to_pfn(end_pfn) == PHYS_PFN(__pa(end_kaddr)))
151 dax_enabled = true; 167 dax_enabled = true;
152 put_dev_pagemap(pgmap); 168 put_dev_pagemap(pgmap);
169 put_dev_pagemap(end_pgmap);
170
153 } 171 }
154 172
155 if (!dax_enabled) { 173 if (!dax_enabled) {
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index b123b0dcf274..4671776f5623 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -541,9 +541,9 @@ static int arena_clear_freelist_error(struct arena_info *arena, u32 lane)
541 541
542static int btt_freelist_init(struct arena_info *arena) 542static int btt_freelist_init(struct arena_info *arena)
543{ 543{
544 int old, new, ret; 544 int new, ret;
545 u32 i, map_entry; 545 struct log_entry log_new;
546 struct log_entry log_new, log_old; 546 u32 i, map_entry, log_oldmap, log_newmap;
547 547
548 arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry), 548 arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry),
549 GFP_KERNEL); 549 GFP_KERNEL);
@@ -551,24 +551,26 @@ static int btt_freelist_init(struct arena_info *arena)
551 return -ENOMEM; 551 return -ENOMEM;
552 552
553 for (i = 0; i < arena->nfree; i++) { 553 for (i = 0; i < arena->nfree; i++) {
554 old = btt_log_read(arena, i, &log_old, LOG_OLD_ENT);
555 if (old < 0)
556 return old;
557
558 new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT); 554 new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT);
559 if (new < 0) 555 if (new < 0)
560 return new; 556 return new;
561 557
558 /* old and new map entries with any flags stripped out */
559 log_oldmap = ent_lba(le32_to_cpu(log_new.old_map));
560 log_newmap = ent_lba(le32_to_cpu(log_new.new_map));
561
562 /* sub points to the next one to be overwritten */ 562 /* sub points to the next one to be overwritten */
563 arena->freelist[i].sub = 1 - new; 563 arena->freelist[i].sub = 1 - new;
564 arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq)); 564 arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq));
565 arena->freelist[i].block = le32_to_cpu(log_new.old_map); 565 arena->freelist[i].block = log_oldmap;
566 566
567 /* 567 /*
568 * FIXME: if error clearing fails during init, we want to make 568 * FIXME: if error clearing fails during init, we want to make
569 * the BTT read-only 569 * the BTT read-only
570 */ 570 */
571 if (ent_e_flag(log_new.old_map)) { 571 if (ent_e_flag(log_new.old_map) &&
572 !ent_normal(log_new.old_map)) {
573 arena->freelist[i].has_err = 1;
572 ret = arena_clear_freelist_error(arena, i); 574 ret = arena_clear_freelist_error(arena, i);
573 if (ret) 575 if (ret)
574 dev_err_ratelimited(to_dev(arena), 576 dev_err_ratelimited(to_dev(arena),
@@ -576,7 +578,7 @@ static int btt_freelist_init(struct arena_info *arena)
576 } 578 }
577 579
578 /* This implies a newly created or untouched flog entry */ 580 /* This implies a newly created or untouched flog entry */
579 if (log_new.old_map == log_new.new_map) 581 if (log_oldmap == log_newmap)
580 continue; 582 continue;
581 583
582 /* Check if map recovery is needed */ 584 /* Check if map recovery is needed */
@@ -584,8 +586,15 @@ static int btt_freelist_init(struct arena_info *arena)
584 NULL, NULL, 0); 586 NULL, NULL, 0);
585 if (ret) 587 if (ret)
586 return ret; 588 return ret;
587 if ((le32_to_cpu(log_new.new_map) != map_entry) && 589
588 (le32_to_cpu(log_new.old_map) == map_entry)) { 590 /*
591 * The map_entry from btt_read_map is stripped of any flag bits,
592 * so use the stripped out versions from the log as well for
593 * testing whether recovery is needed. For restoration, use the
594 * 'raw' version of the log entries as that captured what we
595 * were going to write originally.
596 */
597 if ((log_newmap != map_entry) && (log_oldmap == map_entry)) {
589 /* 598 /*
590 * Last transaction wrote the flog, but wasn't able 599 * Last transaction wrote the flog, but wasn't able
591 * to complete the map write. So fix up the map. 600 * to complete the map write. So fix up the map.
diff --git a/drivers/nvdimm/btt.h b/drivers/nvdimm/btt.h
index db3cb6d4d0d4..ddff49c707b0 100644
--- a/drivers/nvdimm/btt.h
+++ b/drivers/nvdimm/btt.h
@@ -44,6 +44,8 @@
44#define ent_e_flag(ent) (!!(ent & MAP_ERR_MASK)) 44#define ent_e_flag(ent) (!!(ent & MAP_ERR_MASK))
45#define ent_z_flag(ent) (!!(ent & MAP_TRIM_MASK)) 45#define ent_z_flag(ent) (!!(ent & MAP_TRIM_MASK))
46#define set_e_flag(ent) (ent |= MAP_ERR_MASK) 46#define set_e_flag(ent) (ent |= MAP_ERR_MASK)
47/* 'normal' is both e and z flags set */
48#define ent_normal(ent) (ent_e_flag(ent) && ent_z_flag(ent))
47 49
48enum btt_init_state { 50enum btt_init_state {
49 INIT_UNCHECKED = 0, 51 INIT_UNCHECKED = 0,
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index 795ad4ff35ca..b72a303176c7 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -159,11 +159,19 @@ static ssize_t size_show(struct device *dev,
159} 159}
160static DEVICE_ATTR_RO(size); 160static DEVICE_ATTR_RO(size);
161 161
162static ssize_t log_zero_flags_show(struct device *dev,
163 struct device_attribute *attr, char *buf)
164{
165 return sprintf(buf, "Y\n");
166}
167static DEVICE_ATTR_RO(log_zero_flags);
168
162static struct attribute *nd_btt_attributes[] = { 169static struct attribute *nd_btt_attributes[] = {
163 &dev_attr_sector_size.attr, 170 &dev_attr_sector_size.attr,
164 &dev_attr_namespace.attr, 171 &dev_attr_namespace.attr,
165 &dev_attr_uuid.attr, 172 &dev_attr_uuid.attr,
166 &dev_attr_size.attr, 173 &dev_attr_size.attr,
174 &dev_attr_log_zero_flags.attr,
167 NULL, 175 NULL,
168}; 176};
169 177
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index efe412a6b5b9..91b9abbf689c 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -11,6 +11,7 @@
11 * General Public License for more details. 11 * General Public License for more details.
12 */ 12 */
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14#include <linux/moduleparam.h>
14#include <linux/vmalloc.h> 15#include <linux/vmalloc.h>
15#include <linux/device.h> 16#include <linux/device.h>
16#include <linux/ndctl.h> 17#include <linux/ndctl.h>
@@ -25,6 +26,10 @@
25 26
26static DEFINE_IDA(dimm_ida); 27static DEFINE_IDA(dimm_ida);
27 28
29static bool noblk;
30module_param(noblk, bool, 0444);
31MODULE_PARM_DESC(noblk, "force disable BLK / local alias support");
32
28/* 33/*
29 * Retrieve bus and dimm handle and return if this bus supports 34 * Retrieve bus and dimm handle and return if this bus supports
30 * get_config_data commands 35 * get_config_data commands
@@ -551,6 +556,8 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
551 556
552 nvdimm->dimm_id = dimm_id; 557 nvdimm->dimm_id = dimm_id;
553 nvdimm->provider_data = provider_data; 558 nvdimm->provider_data = provider_data;
559 if (noblk)
560 flags |= 1 << NDD_NOBLK;
554 nvdimm->flags = flags; 561 nvdimm->flags = flags;
555 nvdimm->cmd_mask = cmd_mask; 562 nvdimm->cmd_mask = cmd_mask;
556 nvdimm->num_flush = num_flush; 563 nvdimm->num_flush = num_flush;
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index a11bf4e6b451..f3d753d3169c 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -392,6 +392,7 @@ int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
392 return 0; /* no label, nothing to reserve */ 392 return 0; /* no label, nothing to reserve */
393 393
394 for_each_clear_bit_le(slot, free, nslot) { 394 for_each_clear_bit_le(slot, free, nslot) {
395 struct nvdimm *nvdimm = to_nvdimm(ndd->dev);
395 struct nd_namespace_label *nd_label; 396 struct nd_namespace_label *nd_label;
396 struct nd_region *nd_region = NULL; 397 struct nd_region *nd_region = NULL;
397 u8 label_uuid[NSLABEL_UUID_LEN]; 398 u8 label_uuid[NSLABEL_UUID_LEN];
@@ -406,6 +407,8 @@ int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
406 407
407 memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN); 408 memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
408 flags = __le32_to_cpu(nd_label->flags); 409 flags = __le32_to_cpu(nd_label->flags);
410 if (test_bit(NDD_NOBLK, &nvdimm->flags))
411 flags &= ~NSLABEL_FLAG_LOCAL;
409 nd_label_gen_id(&label_id, label_uuid, flags); 412 nd_label_gen_id(&label_id, label_uuid, flags);
410 res = nvdimm_allocate_dpa(ndd, &label_id, 413 res = nvdimm_allocate_dpa(ndd, &label_id,
411 __le64_to_cpu(nd_label->dpa), 414 __le64_to_cpu(nd_label->dpa),
@@ -755,7 +758,7 @@ static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
755 758
756static int __pmem_label_update(struct nd_region *nd_region, 759static int __pmem_label_update(struct nd_region *nd_region,
757 struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm, 760 struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
758 int pos) 761 int pos, unsigned long flags)
759{ 762{
760 struct nd_namespace_common *ndns = &nspm->nsio.common; 763 struct nd_namespace_common *ndns = &nspm->nsio.common;
761 struct nd_interleave_set *nd_set = nd_region->nd_set; 764 struct nd_interleave_set *nd_set = nd_region->nd_set;
@@ -796,7 +799,7 @@ static int __pmem_label_update(struct nd_region *nd_region,
796 memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN); 799 memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN);
797 if (nspm->alt_name) 800 if (nspm->alt_name)
798 memcpy(nd_label->name, nspm->alt_name, NSLABEL_NAME_LEN); 801 memcpy(nd_label->name, nspm->alt_name, NSLABEL_NAME_LEN);
799 nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_UPDATING); 802 nd_label->flags = __cpu_to_le32(flags);
800 nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings); 803 nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings);
801 nd_label->position = __cpu_to_le16(pos); 804 nd_label->position = __cpu_to_le16(pos);
802 nd_label->isetcookie = __cpu_to_le64(cookie); 805 nd_label->isetcookie = __cpu_to_le64(cookie);
@@ -1249,13 +1252,13 @@ static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid)
1249int nd_pmem_namespace_label_update(struct nd_region *nd_region, 1252int nd_pmem_namespace_label_update(struct nd_region *nd_region,
1250 struct nd_namespace_pmem *nspm, resource_size_t size) 1253 struct nd_namespace_pmem *nspm, resource_size_t size)
1251{ 1254{
1252 int i; 1255 int i, rc;
1253 1256
1254 for (i = 0; i < nd_region->ndr_mappings; i++) { 1257 for (i = 0; i < nd_region->ndr_mappings; i++) {
1255 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 1258 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1256 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 1259 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1257 struct resource *res; 1260 struct resource *res;
1258 int rc, count = 0; 1261 int count = 0;
1259 1262
1260 if (size == 0) { 1263 if (size == 0) {
1261 rc = del_labels(nd_mapping, nspm->uuid); 1264 rc = del_labels(nd_mapping, nspm->uuid);
@@ -1273,7 +1276,20 @@ int nd_pmem_namespace_label_update(struct nd_region *nd_region,
1273 if (rc < 0) 1276 if (rc < 0)
1274 return rc; 1277 return rc;
1275 1278
1276 rc = __pmem_label_update(nd_region, nd_mapping, nspm, i); 1279 rc = __pmem_label_update(nd_region, nd_mapping, nspm, i,
1280 NSLABEL_FLAG_UPDATING);
1281 if (rc)
1282 return rc;
1283 }
1284
1285 if (size == 0)
1286 return 0;
1287
1288 /* Clear the UPDATING flag per UEFI 2.7 expectations */
1289 for (i = 0; i < nd_region->ndr_mappings; i++) {
1290 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1291
1292 rc = __pmem_label_update(nd_region, nd_mapping, nspm, i, 0);
1277 if (rc) 1293 if (rc)
1278 return rc; 1294 return rc;
1279 } 1295 }
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 4b077555ac70..7849bf1812c4 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -138,6 +138,7 @@ bool nd_is_uuid_unique(struct device *dev, u8 *uuid)
138bool pmem_should_map_pages(struct device *dev) 138bool pmem_should_map_pages(struct device *dev)
139{ 139{
140 struct nd_region *nd_region = to_nd_region(dev->parent); 140 struct nd_region *nd_region = to_nd_region(dev->parent);
141 struct nd_namespace_common *ndns = to_ndns(dev);
141 struct nd_namespace_io *nsio; 142 struct nd_namespace_io *nsio;
142 143
143 if (!IS_ENABLED(CONFIG_ZONE_DEVICE)) 144 if (!IS_ENABLED(CONFIG_ZONE_DEVICE))
@@ -149,6 +150,9 @@ bool pmem_should_map_pages(struct device *dev)
149 if (is_nd_pfn(dev) || is_nd_btt(dev)) 150 if (is_nd_pfn(dev) || is_nd_btt(dev))
150 return false; 151 return false;
151 152
153 if (ndns->force_raw)
154 return false;
155
152 nsio = to_nd_namespace_io(dev); 156 nsio = to_nd_namespace_io(dev);
153 if (region_intersects(nsio->res.start, resource_size(&nsio->res), 157 if (region_intersects(nsio->res.start, resource_size(&nsio->res),
154 IORESOURCE_SYSTEM_RAM, 158 IORESOURCE_SYSTEM_RAM,
@@ -1506,13 +1510,13 @@ static ssize_t __holder_class_store(struct device *dev, const char *buf)
1506 if (dev->driver || ndns->claim) 1510 if (dev->driver || ndns->claim)
1507 return -EBUSY; 1511 return -EBUSY;
1508 1512
1509 if (strcmp(buf, "btt") == 0 || strcmp(buf, "btt\n") == 0) 1513 if (sysfs_streq(buf, "btt"))
1510 ndns->claim_class = btt_claim_class(dev); 1514 ndns->claim_class = btt_claim_class(dev);
1511 else if (strcmp(buf, "pfn") == 0 || strcmp(buf, "pfn\n") == 0) 1515 else if (sysfs_streq(buf, "pfn"))
1512 ndns->claim_class = NVDIMM_CCLASS_PFN; 1516 ndns->claim_class = NVDIMM_CCLASS_PFN;
1513 else if (strcmp(buf, "dax") == 0 || strcmp(buf, "dax\n") == 0) 1517 else if (sysfs_streq(buf, "dax"))
1514 ndns->claim_class = NVDIMM_CCLASS_DAX; 1518 ndns->claim_class = NVDIMM_CCLASS_DAX;
1515 else if (strcmp(buf, "") == 0 || strcmp(buf, "\n") == 0) 1519 else if (sysfs_streq(buf, ""))
1516 ndns->claim_class = NVDIMM_CCLASS_NONE; 1520 ndns->claim_class = NVDIMM_CCLASS_NONE;
1517 else 1521 else
1518 return -EINVAL; 1522 return -EINVAL;
@@ -2492,6 +2496,12 @@ static int init_active_labels(struct nd_region *nd_region)
2492 if (!label_ent) 2496 if (!label_ent)
2493 break; 2497 break;
2494 label = nd_label_active(ndd, j); 2498 label = nd_label_active(ndd, j);
2499 if (test_bit(NDD_NOBLK, &nvdimm->flags)) {
2500 u32 flags = __le32_to_cpu(label->flags);
2501
2502 flags &= ~NSLABEL_FLAG_LOCAL;
2503 label->flags = __cpu_to_le32(flags);
2504 }
2495 label_ent->label = label; 2505 label_ent->label = label;
2496 2506
2497 mutex_lock(&nd_mapping->lock); 2507 mutex_lock(&nd_mapping->lock);
diff --git a/drivers/nvdimm/of_pmem.c b/drivers/nvdimm/of_pmem.c
index 0a701837dfc0..11b9821eba85 100644
--- a/drivers/nvdimm/of_pmem.c
+++ b/drivers/nvdimm/of_pmem.c
@@ -108,7 +108,6 @@ static struct platform_driver of_pmem_region_driver = {
108 .remove = of_pmem_region_remove, 108 .remove = of_pmem_region_remove,
109 .driver = { 109 .driver = {
110 .name = "of_pmem", 110 .name = "of_pmem",
111 .owner = THIS_MODULE,
112 .of_match_table = of_pmem_region_match, 111 .of_match_table = of_pmem_region_match,
113 }, 112 },
114}; 113};
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 6f22272e8d80..d271bd731af7 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -580,6 +580,11 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
580} 580}
581EXPORT_SYMBOL(nd_pfn_probe); 581EXPORT_SYMBOL(nd_pfn_probe);
582 582
583static u32 info_block_reserve(void)
584{
585 return ALIGN(SZ_8K, PAGE_SIZE);
586}
587
583/* 588/*
584 * We hotplug memory at section granularity, pad the reserved area from 589 * We hotplug memory at section granularity, pad the reserved area from
585 * the previous section base to the namespace base address. 590 * the previous section base to the namespace base address.
@@ -593,7 +598,7 @@ static unsigned long init_altmap_base(resource_size_t base)
593 598
594static unsigned long init_altmap_reserve(resource_size_t base) 599static unsigned long init_altmap_reserve(resource_size_t base)
595{ 600{
596 unsigned long reserve = PHYS_PFN(SZ_8K); 601 unsigned long reserve = info_block_reserve() >> PAGE_SHIFT;
597 unsigned long base_pfn = PHYS_PFN(base); 602 unsigned long base_pfn = PHYS_PFN(base);
598 603
599 reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn); 604 reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn);
@@ -608,6 +613,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
608 u64 offset = le64_to_cpu(pfn_sb->dataoff); 613 u64 offset = le64_to_cpu(pfn_sb->dataoff);
609 u32 start_pad = __le32_to_cpu(pfn_sb->start_pad); 614 u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
610 u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc); 615 u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
616 u32 reserve = info_block_reserve();
611 struct nd_namespace_common *ndns = nd_pfn->ndns; 617 struct nd_namespace_common *ndns = nd_pfn->ndns;
612 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); 618 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
613 resource_size_t base = nsio->res.start + start_pad; 619 resource_size_t base = nsio->res.start + start_pad;
@@ -621,7 +627,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
621 res->end -= end_trunc; 627 res->end -= end_trunc;
622 628
623 if (nd_pfn->mode == PFN_MODE_RAM) { 629 if (nd_pfn->mode == PFN_MODE_RAM) {
624 if (offset < SZ_8K) 630 if (offset < reserve)
625 return -EINVAL; 631 return -EINVAL;
626 nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns); 632 nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
627 pgmap->altmap_valid = false; 633 pgmap->altmap_valid = false;
@@ -634,7 +640,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
634 le64_to_cpu(nd_pfn->pfn_sb->npfns), 640 le64_to_cpu(nd_pfn->pfn_sb->npfns),
635 nd_pfn->npfns); 641 nd_pfn->npfns);
636 memcpy(altmap, &__altmap, sizeof(*altmap)); 642 memcpy(altmap, &__altmap, sizeof(*altmap));
637 altmap->free = PHYS_PFN(offset - SZ_8K); 643 altmap->free = PHYS_PFN(offset - reserve);
638 altmap->alloc = 0; 644 altmap->alloc = 0;
639 pgmap->altmap_valid = true; 645 pgmap->altmap_valid = true;
640 } else 646 } else
@@ -678,18 +684,17 @@ static void trim_pfn_device(struct nd_pfn *nd_pfn, u32 *start_pad, u32 *end_trun
678 if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM, 684 if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
679 IORES_DESC_NONE) == REGION_MIXED 685 IORES_DESC_NONE) == REGION_MIXED
680 || !IS_ALIGNED(end, nd_pfn->align) 686 || !IS_ALIGNED(end, nd_pfn->align)
681 || nd_region_conflict(nd_region, start, size + adjust)) 687 || nd_region_conflict(nd_region, start, size))
682 *end_trunc = end - phys_pmem_align_down(nd_pfn, end); 688 *end_trunc = end - phys_pmem_align_down(nd_pfn, end);
683} 689}
684 690
685static int nd_pfn_init(struct nd_pfn *nd_pfn) 691static int nd_pfn_init(struct nd_pfn *nd_pfn)
686{ 692{
687 u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0;
688 struct nd_namespace_common *ndns = nd_pfn->ndns; 693 struct nd_namespace_common *ndns = nd_pfn->ndns;
689 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); 694 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
695 u32 start_pad, end_trunc, reserve = info_block_reserve();
690 resource_size_t start, size; 696 resource_size_t start, size;
691 struct nd_region *nd_region; 697 struct nd_region *nd_region;
692 u32 start_pad, end_trunc;
693 struct nd_pfn_sb *pfn_sb; 698 struct nd_pfn_sb *pfn_sb;
694 unsigned long npfns; 699 unsigned long npfns;
695 phys_addr_t offset; 700 phys_addr_t offset;
@@ -734,7 +739,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
734 */ 739 */
735 start = nsio->res.start + start_pad; 740 start = nsio->res.start + start_pad;
736 size = resource_size(&nsio->res); 741 size = resource_size(&nsio->res);
737 npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - SZ_8K) 742 npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - reserve)
738 / PAGE_SIZE); 743 / PAGE_SIZE);
739 if (nd_pfn->mode == PFN_MODE_PMEM) { 744 if (nd_pfn->mode == PFN_MODE_PMEM) {
740 /* 745 /*
@@ -742,11 +747,10 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
742 * when populating the vmemmap. This *should* be equal to 747 * when populating the vmemmap. This *should* be equal to
743 * PMD_SIZE for most architectures. 748 * PMD_SIZE for most architectures.
744 */ 749 */
745 offset = ALIGN(start + SZ_8K + 64 * npfns + dax_label_reserve, 750 offset = ALIGN(start + reserve + 64 * npfns,
746 max(nd_pfn->align, PMD_SIZE)) - start; 751 max(nd_pfn->align, PMD_SIZE)) - start;
747 } else if (nd_pfn->mode == PFN_MODE_RAM) 752 } else if (nd_pfn->mode == PFN_MODE_RAM)
748 offset = ALIGN(start + SZ_8K + dax_label_reserve, 753 offset = ALIGN(start + reserve, nd_pfn->align) - start;
749 nd_pfn->align) - start;
750 else 754 else
751 return -ENXIO; 755 return -ENXIO;
752 756
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index e2818f94f292..3b58baa44b5c 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -1003,6 +1003,13 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
1003 1003
1004 if (test_bit(NDD_UNARMED, &nvdimm->flags)) 1004 if (test_bit(NDD_UNARMED, &nvdimm->flags))
1005 ro = 1; 1005 ro = 1;
1006
1007 if (test_bit(NDD_NOBLK, &nvdimm->flags)
1008 && dev_type == &nd_blk_device_type) {
1009 dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not BLK capable\n",
1010 caller, dev_name(&nvdimm->dev), i);
1011 return NULL;
1012 }
1006 } 1013 }
1007 1014
1008 if (dev_type == &nd_blk_device_type) { 1015 if (dev_type == &nd_blk_device_type) {
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index ad609617aeb8..43348303cb4b 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -42,6 +42,8 @@ enum {
42 NDD_SECURITY_OVERWRITE = 3, 42 NDD_SECURITY_OVERWRITE = 3,
43 /* tracking whether or not there is a pending device reference */ 43 /* tracking whether or not there is a pending device reference */
44 NDD_WORK_PENDING = 4, 44 NDD_WORK_PENDING = 4,
45 /* ignore / filter NSLABEL_FLAG_LOCAL for this DIMM, i.e. no aliasing */
46 NDD_NOBLK = 5,
45 47
46 /* need to set a limit somewhere, but yes, this is likely overkill */ 48 /* need to set a limit somewhere, but yes, this is likely overkill */
47 ND_IOCTL_MAX_BUFLEN = SZ_4M, 49 ND_IOCTL_MAX_BUFLEN = SZ_4M,
diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h
index f57c9e434d2d..de5d90212409 100644
--- a/include/uapi/linux/ndctl.h
+++ b/include/uapi/linux/ndctl.h
@@ -243,6 +243,7 @@ struct nd_cmd_pkg {
243#define NVDIMM_FAMILY_HPE1 1 243#define NVDIMM_FAMILY_HPE1 1
244#define NVDIMM_FAMILY_HPE2 2 244#define NVDIMM_FAMILY_HPE2 2
245#define NVDIMM_FAMILY_MSFT 3 245#define NVDIMM_FAMILY_MSFT 3
246#define NVDIMM_FAMILY_HYPERV 4
246 247
247#define ND_IOCTL_CALL _IOWR(ND_IOCTL, ND_CMD_CALL,\ 248#define ND_IOCTL_CALL _IOWR(ND_IOCTL, ND_CMD_CALL,\
248 struct nd_cmd_pkg) 249 struct nd_cmd_pkg)