aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-08-25 21:13:10 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-08-25 21:13:10 -0400
commit828bf6e904eb8fc8969333568802689fbbf07a40 (patch)
tree669784db5c100f75698f2a4511d31047dd0b3d50
parentb326272010b6656210193d7ab93fa184087e8ee1 (diff)
parent286e87718103acdf85f4ed323a37e4839a8a7c05 (diff)
Merge tag 'libnvdimm-for-4.19_misc' of gitolite.kernel.org:pub/scm/linux/kernel/git/nvdimm/nvdimm
Pull libnvdimm updates from Dave Jiang: "Collection of misc libnvdimm patches for 4.19 submission: - Adding support to read locked nvdimm capacity. - Change test code to make DSM failure code injection an override. - Add support for calculate maximum contiguous area for namespace. - Add support for queueing a short ARS when there is on going ARS for nvdimm. - Allow NULL to be passed in to ->direct_access() for kaddr and pfn params. - Improve smart injection support for nvdimm emulation testing. - Fix test code that supports for emulating controller temperature. - Fix hang on error before devm_memremap_pages() - Fix a bug that causes user memory corruption when data returned to user for ars_status. - Maintainer updates for Ross Zwisler emails and adding Jan Kara to fsdax" * tag 'libnvdimm-for-4.19_misc' of gitolite.kernel.org:pub/scm/linux/kernel/git/nvdimm/nvdimm: libnvdimm: fix ars_status output length calculation device-dax: avoid hang on error before devm_memremap_pages() tools/testing/nvdimm: improve emulation of smart injection filesystem-dax: Do not request kaddr and pfn when not required md/dm-writecache: Don't request pointer dummy_addr when not required dax/super: Do not request a pointer kaddr when not required tools/testing/nvdimm: kaddr and pfn can be NULL to ->direct_access() s390, dcssblk: kaddr and pfn can be NULL to ->direct_access() libnvdimm, pmem: kaddr and pfn can be NULL to ->direct_access() acpi/nfit: queue issuing of ars when an uc error notification comes in libnvdimm: Export max available extent libnvdimm: Use max contiguous area for namespace size MAINTAINERS: Add Jan Kara for filesystem DAX MAINTAINERS: update Ross Zwisler's email address tools/testing/nvdimm: Fix support for emulating controller temperature tools/testing/nvdimm: Make DSM failure code injection an override acpi, nfit: Prefer _DSM over _LSR for namespace label reads libnvdimm: Introduce locked DIMM capacity support
-rw-r--r--.mailmap1
-rw-r--r--MAINTAINERS13
-rw-r--r--drivers/acpi/nfit/core.c24
-rw-r--r--drivers/acpi/nfit/nfit.h1
-rw-r--r--drivers/dax/pmem.c12
-rw-r--r--drivers/dax/super.c3
-rw-r--r--drivers/md/dm-writecache.c3
-rw-r--r--drivers/nvdimm/bus.c4
-rw-r--r--drivers/nvdimm/dimm.c24
-rw-r--r--drivers/nvdimm/dimm_devs.c31
-rw-r--r--drivers/nvdimm/namespace_devs.c29
-rw-r--r--drivers/nvdimm/nd-core.h8
-rw-r--r--drivers/nvdimm/nd.h1
-rw-r--r--drivers/nvdimm/pmem.c7
-rw-r--r--drivers/nvdimm/region_devs.c40
-rw-r--r--drivers/s390/block/dcssblk.c8
-rw-r--r--fs/dax.c13
-rw-r--r--tools/testing/nvdimm/pmem-dax.c12
-rw-r--r--tools/testing/nvdimm/test/nfit.c126
19 files changed, 270 insertions, 90 deletions
diff --git a/.mailmap b/.mailmap
index 2a6f685bf706..285e09645b31 100644
--- a/.mailmap
+++ b/.mailmap
@@ -159,6 +159,7 @@ Ralf Wildenhues <Ralf.Wildenhues@gmx.de>
159Randy Dunlap <rdunlap@infradead.org> <rdunlap@xenotime.net> 159Randy Dunlap <rdunlap@infradead.org> <rdunlap@xenotime.net>
160Rémi Denis-Courmont <rdenis@simphalempin.com> 160Rémi Denis-Courmont <rdenis@simphalempin.com>
161Ricardo Ribalda Delgado <ricardo.ribalda@gmail.com> 161Ricardo Ribalda Delgado <ricardo.ribalda@gmail.com>
162Ross Zwisler <zwisler@kernel.org> <ross.zwisler@linux.intel.com>
162Rudolf Marek <R.Marek@sh.cvut.cz> 163Rudolf Marek <R.Marek@sh.cvut.cz>
163Rui Saraiva <rmps@joel.ist.utl.pt> 164Rui Saraiva <rmps@joel.ist.utl.pt>
164Sachin P Sant <ssant@in.ibm.com> 165Sachin P Sant <ssant@in.ibm.com>
diff --git a/MAINTAINERS b/MAINTAINERS
index e20e7c42347b..a5b256b25905 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4364,7 +4364,8 @@ F: drivers/i2c/busses/i2c-diolan-u2c.c
4364 4364
4365FILESYSTEM DIRECT ACCESS (DAX) 4365FILESYSTEM DIRECT ACCESS (DAX)
4366M: Matthew Wilcox <mawilcox@microsoft.com> 4366M: Matthew Wilcox <mawilcox@microsoft.com>
4367M: Ross Zwisler <ross.zwisler@linux.intel.com> 4367M: Ross Zwisler <zwisler@kernel.org>
4368M: Jan Kara <jack@suse.cz>
4368L: linux-fsdevel@vger.kernel.org 4369L: linux-fsdevel@vger.kernel.org
4369S: Supported 4370S: Supported
4370F: fs/dax.c 4371F: fs/dax.c
@@ -4374,7 +4375,7 @@ F: include/trace/events/fs_dax.h
4374DEVICE DIRECT ACCESS (DAX) 4375DEVICE DIRECT ACCESS (DAX)
4375M: Dan Williams <dan.j.williams@intel.com> 4376M: Dan Williams <dan.j.williams@intel.com>
4376M: Dave Jiang <dave.jiang@intel.com> 4377M: Dave Jiang <dave.jiang@intel.com>
4377M: Ross Zwisler <ross.zwisler@linux.intel.com> 4378M: Ross Zwisler <zwisler@kernel.org>
4378M: Vishal Verma <vishal.l.verma@intel.com> 4379M: Vishal Verma <vishal.l.verma@intel.com>
4379L: linux-nvdimm@lists.01.org 4380L: linux-nvdimm@lists.01.org
4380S: Supported 4381S: Supported
@@ -8303,7 +8304,7 @@ S: Maintained
8303F: tools/lib/lockdep/ 8304F: tools/lib/lockdep/
8304 8305
8305LIBNVDIMM BLK: MMIO-APERTURE DRIVER 8306LIBNVDIMM BLK: MMIO-APERTURE DRIVER
8306M: Ross Zwisler <ross.zwisler@linux.intel.com> 8307M: Ross Zwisler <zwisler@kernel.org>
8307M: Dan Williams <dan.j.williams@intel.com> 8308M: Dan Williams <dan.j.williams@intel.com>
8308M: Vishal Verma <vishal.l.verma@intel.com> 8309M: Vishal Verma <vishal.l.verma@intel.com>
8309M: Dave Jiang <dave.jiang@intel.com> 8310M: Dave Jiang <dave.jiang@intel.com>
@@ -8316,7 +8317,7 @@ F: drivers/nvdimm/region_devs.c
8316LIBNVDIMM BTT: BLOCK TRANSLATION TABLE 8317LIBNVDIMM BTT: BLOCK TRANSLATION TABLE
8317M: Vishal Verma <vishal.l.verma@intel.com> 8318M: Vishal Verma <vishal.l.verma@intel.com>
8318M: Dan Williams <dan.j.williams@intel.com> 8319M: Dan Williams <dan.j.williams@intel.com>
8319M: Ross Zwisler <ross.zwisler@linux.intel.com> 8320M: Ross Zwisler <zwisler@kernel.org>
8320M: Dave Jiang <dave.jiang@intel.com> 8321M: Dave Jiang <dave.jiang@intel.com>
8321L: linux-nvdimm@lists.01.org 8322L: linux-nvdimm@lists.01.org
8322Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ 8323Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
@@ -8324,7 +8325,7 @@ S: Supported
8324F: drivers/nvdimm/btt* 8325F: drivers/nvdimm/btt*
8325 8326
8326LIBNVDIMM PMEM: PERSISTENT MEMORY DRIVER 8327LIBNVDIMM PMEM: PERSISTENT MEMORY DRIVER
8327M: Ross Zwisler <ross.zwisler@linux.intel.com> 8328M: Ross Zwisler <zwisler@kernel.org>
8328M: Dan Williams <dan.j.williams@intel.com> 8329M: Dan Williams <dan.j.williams@intel.com>
8329M: Vishal Verma <vishal.l.verma@intel.com> 8330M: Vishal Verma <vishal.l.verma@intel.com>
8330M: Dave Jiang <dave.jiang@intel.com> 8331M: Dave Jiang <dave.jiang@intel.com>
@@ -8343,7 +8344,7 @@ F: Documentation/devicetree/bindings/pmem/pmem-region.txt
8343 8344
8344LIBNVDIMM: NON-VOLATILE MEMORY DEVICE SUBSYSTEM 8345LIBNVDIMM: NON-VOLATILE MEMORY DEVICE SUBSYSTEM
8345M: Dan Williams <dan.j.williams@intel.com> 8346M: Dan Williams <dan.j.williams@intel.com>
8346M: Ross Zwisler <ross.zwisler@linux.intel.com> 8347M: Ross Zwisler <zwisler@kernel.org>
8347M: Vishal Verma <vishal.l.verma@intel.com> 8348M: Vishal Verma <vishal.l.verma@intel.com>
8348M: Dave Jiang <dave.jiang@intel.com> 8349M: Dave Jiang <dave.jiang@intel.com>
8349L: linux-nvdimm@lists.01.org 8350L: linux-nvdimm@lists.01.org
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 7c479002e798..b072cfc5f20e 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -1699,7 +1699,7 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
1699{ 1699{
1700 struct acpi_device *adev, *adev_dimm; 1700 struct acpi_device *adev, *adev_dimm;
1701 struct device *dev = acpi_desc->dev; 1701 struct device *dev = acpi_desc->dev;
1702 unsigned long dsm_mask; 1702 unsigned long dsm_mask, label_mask;
1703 const guid_t *guid; 1703 const guid_t *guid;
1704 int i; 1704 int i;
1705 int family = -1; 1705 int family = -1;
@@ -1771,6 +1771,16 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
1771 1ULL << i)) 1771 1ULL << i))
1772 set_bit(i, &nfit_mem->dsm_mask); 1772 set_bit(i, &nfit_mem->dsm_mask);
1773 1773
1774 /*
1775 * Prefer the NVDIMM_FAMILY_INTEL label read commands if present
1776 * due to their better semantics handling locked capacity.
1777 */
1778 label_mask = 1 << ND_CMD_GET_CONFIG_SIZE | 1 << ND_CMD_GET_CONFIG_DATA
1779 | 1 << ND_CMD_SET_CONFIG_DATA;
1780 if (family == NVDIMM_FAMILY_INTEL
1781 && (dsm_mask & label_mask) == label_mask)
1782 return 0;
1783
1774 if (acpi_nvdimm_has_method(adev_dimm, "_LSI") 1784 if (acpi_nvdimm_has_method(adev_dimm, "_LSI")
1775 && acpi_nvdimm_has_method(adev_dimm, "_LSR")) { 1785 && acpi_nvdimm_has_method(adev_dimm, "_LSR")) {
1776 dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev)); 1786 dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev));
@@ -2559,7 +2569,12 @@ static void ars_complete(struct acpi_nfit_desc *acpi_desc,
2559 test_bit(ARS_SHORT, &nfit_spa->ars_state) 2569 test_bit(ARS_SHORT, &nfit_spa->ars_state)
2560 ? "short" : "long"); 2570 ? "short" : "long");
2561 clear_bit(ARS_SHORT, &nfit_spa->ars_state); 2571 clear_bit(ARS_SHORT, &nfit_spa->ars_state);
2562 set_bit(ARS_DONE, &nfit_spa->ars_state); 2572 if (test_and_clear_bit(ARS_REQ_REDO, &nfit_spa->ars_state)) {
2573 set_bit(ARS_SHORT, &nfit_spa->ars_state);
2574 set_bit(ARS_REQ, &nfit_spa->ars_state);
2575 dev_dbg(dev, "ARS: processing scrub request received while in progress\n");
2576 } else
2577 set_bit(ARS_DONE, &nfit_spa->ars_state);
2563} 2578}
2564 2579
2565static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc) 2580static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc)
@@ -3256,9 +3271,10 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags)
3256 if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) 3271 if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
3257 continue; 3272 continue;
3258 3273
3259 if (test_and_set_bit(ARS_REQ, &nfit_spa->ars_state)) 3274 if (test_and_set_bit(ARS_REQ, &nfit_spa->ars_state)) {
3260 busy++; 3275 busy++;
3261 else { 3276 set_bit(ARS_REQ_REDO, &nfit_spa->ars_state);
3277 } else {
3262 if (test_bit(ARS_SHORT, &flags)) 3278 if (test_bit(ARS_SHORT, &flags))
3263 set_bit(ARS_SHORT, &nfit_spa->ars_state); 3279 set_bit(ARS_SHORT, &nfit_spa->ars_state);
3264 scheduled++; 3280 scheduled++;
diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
index a97ff42fe311..d1274ea2d251 100644
--- a/drivers/acpi/nfit/nfit.h
+++ b/drivers/acpi/nfit/nfit.h
@@ -119,6 +119,7 @@ enum nfit_dimm_notifiers {
119 119
120enum nfit_ars_state { 120enum nfit_ars_state {
121 ARS_REQ, 121 ARS_REQ,
122 ARS_REQ_REDO,
122 ARS_DONE, 123 ARS_DONE,
123 ARS_SHORT, 124 ARS_SHORT,
124 ARS_FAILED, 125 ARS_FAILED,
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
index fd49b24fd6af..99e2aace8078 100644
--- a/drivers/dax/pmem.c
+++ b/drivers/dax/pmem.c
@@ -105,15 +105,19 @@ static int dax_pmem_probe(struct device *dev)
105 if (rc) 105 if (rc)
106 return rc; 106 return rc;
107 107
108 rc = devm_add_action_or_reset(dev, dax_pmem_percpu_exit, 108 rc = devm_add_action(dev, dax_pmem_percpu_exit, &dax_pmem->ref);
109 &dax_pmem->ref); 109 if (rc) {
110 if (rc) 110 percpu_ref_exit(&dax_pmem->ref);
111 return rc; 111 return rc;
112 }
112 113
113 dax_pmem->pgmap.ref = &dax_pmem->ref; 114 dax_pmem->pgmap.ref = &dax_pmem->ref;
114 addr = devm_memremap_pages(dev, &dax_pmem->pgmap); 115 addr = devm_memremap_pages(dev, &dax_pmem->pgmap);
115 if (IS_ERR(addr)) 116 if (IS_ERR(addr)) {
117 devm_remove_action(dev, dax_pmem_percpu_exit, &dax_pmem->ref);
118 percpu_ref_exit(&dax_pmem->ref);
116 return PTR_ERR(addr); 119 return PTR_ERR(addr);
120 }
117 121
118 rc = devm_add_action_or_reset(dev, dax_pmem_percpu_kill, 122 rc = devm_add_action_or_reset(dev, dax_pmem_percpu_kill,
119 &dax_pmem->ref); 123 &dax_pmem->ref);
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 45276abf03aa..6e928f37d084 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -89,7 +89,6 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
89 struct request_queue *q; 89 struct request_queue *q;
90 pgoff_t pgoff; 90 pgoff_t pgoff;
91 int err, id; 91 int err, id;
92 void *kaddr;
93 pfn_t pfn; 92 pfn_t pfn;
94 long len; 93 long len;
95 char buf[BDEVNAME_SIZE]; 94 char buf[BDEVNAME_SIZE];
@@ -122,7 +121,7 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
122 } 121 }
123 122
124 id = dax_read_lock(); 123 id = dax_read_lock();
125 len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn); 124 len = dax_direct_access(dax_dev, pgoff, 1, NULL, &pfn);
126 dax_read_unlock(id); 125 dax_read_unlock(id);
127 126
128 put_dax(dax_dev); 127 put_dax(dax_dev);
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 3a28a68f184c..5f1f80d424dd 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -268,9 +268,8 @@ static int persistent_memory_claim(struct dm_writecache *wc)
268 i = 0; 268 i = 0;
269 do { 269 do {
270 long daa; 270 long daa;
271 void *dummy_addr;
272 daa = dax_direct_access(wc->ssd_dev->dax_dev, i, p - i, 271 daa = dax_direct_access(wc->ssd_dev->dax_dev, i, p - i,
273 &dummy_addr, &pfn); 272 NULL, &pfn);
274 if (daa <= 0) { 273 if (daa <= 0) {
275 r = daa ? daa : -EINVAL; 274 r = daa ? daa : -EINVAL;
276 goto err3; 275 goto err3;
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 27902a8799b1..8aae6dcc839f 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -812,9 +812,9 @@ u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd,
812 * overshoots the remainder by 4 bytes, assume it was 812 * overshoots the remainder by 4 bytes, assume it was
813 * including 'status'. 813 * including 'status'.
814 */ 814 */
815 if (out_field[1] - 8 == remainder) 815 if (out_field[1] - 4 == remainder)
816 return remainder; 816 return remainder;
817 return out_field[1] - 4; 817 return out_field[1] - 8;
818 } else if (cmd == ND_CMD_CALL) { 818 } else if (cmd == ND_CMD_CALL) {
819 struct nd_cmd_pkg *pkg = (struct nd_cmd_pkg *) in_field; 819 struct nd_cmd_pkg *pkg = (struct nd_cmd_pkg *) in_field;
820 820
diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c
index 233907889f96..6c8fb7590838 100644
--- a/drivers/nvdimm/dimm.c
+++ b/drivers/nvdimm/dimm.c
@@ -34,6 +34,9 @@ static int nvdimm_probe(struct device *dev)
34 return rc; 34 return rc;
35 } 35 }
36 36
37 /* reset locked, to be validated below... */
38 nvdimm_clear_locked(dev);
39
37 ndd = kzalloc(sizeof(*ndd), GFP_KERNEL); 40 ndd = kzalloc(sizeof(*ndd), GFP_KERNEL);
38 if (!ndd) 41 if (!ndd)
39 return -ENOMEM; 42 return -ENOMEM;
@@ -48,12 +51,30 @@ static int nvdimm_probe(struct device *dev)
48 get_device(dev); 51 get_device(dev);
49 kref_init(&ndd->kref); 52 kref_init(&ndd->kref);
50 53
54 /*
55 * EACCES failures reading the namespace label-area-properties
56 * are interpreted as the DIMM capacity being locked but the
57 * namespace labels themselves being accessible.
58 */
51 rc = nvdimm_init_nsarea(ndd); 59 rc = nvdimm_init_nsarea(ndd);
52 if (rc == -EACCES) 60 if (rc == -EACCES) {
61 /*
62 * See nvdimm_namespace_common_probe() where we fail to
63 * allow namespaces to probe while the DIMM is locked,
64 * but we do allow for namespace enumeration.
65 */
53 nvdimm_set_locked(dev); 66 nvdimm_set_locked(dev);
67 rc = 0;
68 }
54 if (rc) 69 if (rc)
55 goto err; 70 goto err;
56 71
72 /*
73 * EACCES failures reading the namespace label-data are
74 * interpreted as the label area being locked in addition to the
75 * DIMM capacity. We fail the dimm probe to prevent regions from
76 * attempting to parse the label area.
77 */
57 rc = nvdimm_init_config_data(ndd); 78 rc = nvdimm_init_config_data(ndd);
58 if (rc == -EACCES) 79 if (rc == -EACCES)
59 nvdimm_set_locked(dev); 80 nvdimm_set_locked(dev);
@@ -72,7 +93,6 @@ static int nvdimm_probe(struct device *dev)
72 if (rc == 0) 93 if (rc == 0)
73 nvdimm_set_aliasing(dev); 94 nvdimm_set_aliasing(dev);
74 } 95 }
75 nvdimm_clear_locked(dev);
76 nvdimm_bus_unlock(dev); 96 nvdimm_bus_unlock(dev);
77 97
78 if (rc) 98 if (rc)
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index 8d348b22ba45..863cabc35215 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -537,6 +537,37 @@ resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
537} 537}
538 538
539/** 539/**
540 * nd_pmem_max_contiguous_dpa - For the given dimm+region, return the max
541 * contiguous unallocated dpa range.
542 * @nd_region: constrain available space check to this reference region
543 * @nd_mapping: container of dpa-resource-root + labels
544 */
545resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
546 struct nd_mapping *nd_mapping)
547{
548 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
549 struct nvdimm_bus *nvdimm_bus;
550 resource_size_t max = 0;
551 struct resource *res;
552
553 /* if a dimm is disabled the available capacity is zero */
554 if (!ndd)
555 return 0;
556
557 nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
558 if (__reserve_free_pmem(&nd_region->dev, nd_mapping->nvdimm))
559 return 0;
560 for_each_dpa_resource(ndd, res) {
561 if (strcmp(res->name, "pmem-reserve") != 0)
562 continue;
563 if (resource_size(res) > max)
564 max = resource_size(res);
565 }
566 release_free_pmem(nvdimm_bus, nd_mapping);
567 return max;
568}
569
570/**
540 * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa 571 * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa
541 * @nd_mapping: container of dpa-resource-root + labels 572 * @nd_mapping: container of dpa-resource-root + labels
542 * @nd_region: constrain available space check to this reference region 573 * @nd_region: constrain available space check to this reference region
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 28afdd668905..4a4266250c28 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -799,7 +799,7 @@ static int merge_dpa(struct nd_region *nd_region,
799 return 0; 799 return 0;
800} 800}
801 801
802static int __reserve_free_pmem(struct device *dev, void *data) 802int __reserve_free_pmem(struct device *dev, void *data)
803{ 803{
804 struct nvdimm *nvdimm = data; 804 struct nvdimm *nvdimm = data;
805 struct nd_region *nd_region; 805 struct nd_region *nd_region;
@@ -836,7 +836,7 @@ static int __reserve_free_pmem(struct device *dev, void *data)
836 return 0; 836 return 0;
837} 837}
838 838
839static void release_free_pmem(struct nvdimm_bus *nvdimm_bus, 839void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
840 struct nd_mapping *nd_mapping) 840 struct nd_mapping *nd_mapping)
841{ 841{
842 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 842 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
@@ -1032,7 +1032,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
1032 1032
1033 allocated += nvdimm_allocated_dpa(ndd, &label_id); 1033 allocated += nvdimm_allocated_dpa(ndd, &label_id);
1034 } 1034 }
1035 available = nd_region_available_dpa(nd_region); 1035 available = nd_region_allocatable_dpa(nd_region);
1036 1036
1037 if (val > available + allocated) 1037 if (val > available + allocated)
1038 return -ENOSPC; 1038 return -ENOSPC;
@@ -1144,6 +1144,26 @@ resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
1144} 1144}
1145EXPORT_SYMBOL(nvdimm_namespace_capacity); 1145EXPORT_SYMBOL(nvdimm_namespace_capacity);
1146 1146
1147bool nvdimm_namespace_locked(struct nd_namespace_common *ndns)
1148{
1149 int i;
1150 bool locked = false;
1151 struct device *dev = &ndns->dev;
1152 struct nd_region *nd_region = to_nd_region(dev->parent);
1153
1154 for (i = 0; i < nd_region->ndr_mappings; i++) {
1155 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1156 struct nvdimm *nvdimm = nd_mapping->nvdimm;
1157
1158 if (test_bit(NDD_LOCKED, &nvdimm->flags)) {
1159 dev_dbg(dev, "%s locked\n", nvdimm_name(nvdimm));
1160 locked = true;
1161 }
1162 }
1163 return locked;
1164}
1165EXPORT_SYMBOL(nvdimm_namespace_locked);
1166
1147static ssize_t size_show(struct device *dev, 1167static ssize_t size_show(struct device *dev,
1148 struct device_attribute *attr, char *buf) 1168 struct device_attribute *attr, char *buf)
1149{ 1169{
@@ -1695,6 +1715,9 @@ struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
1695 } 1715 }
1696 } 1716 }
1697 1717
1718 if (nvdimm_namespace_locked(ndns))
1719 return ERR_PTR(-EACCES);
1720
1698 size = nvdimm_namespace_capacity(ndns); 1721 size = nvdimm_namespace_capacity(ndns);
1699 if (size < ND_MIN_NAMESPACE_SIZE) { 1722 if (size < ND_MIN_NAMESPACE_SIZE) {
1700 dev_dbg(&ndns->dev, "%pa, too small must be at least %#x\n", 1723 dev_dbg(&ndns->dev, "%pa, too small must be at least %#x\n",
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index 79274ead54fb..ac68072fb8cd 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -100,6 +100,14 @@ struct nd_region;
100struct nvdimm_drvdata; 100struct nvdimm_drvdata;
101struct nd_mapping; 101struct nd_mapping;
102void nd_mapping_free_labels(struct nd_mapping *nd_mapping); 102void nd_mapping_free_labels(struct nd_mapping *nd_mapping);
103
104int __reserve_free_pmem(struct device *dev, void *data);
105void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
106 struct nd_mapping *nd_mapping);
107
108resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
109 struct nd_mapping *nd_mapping);
110resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region);
103resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region, 111resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
104 struct nd_mapping *nd_mapping, resource_size_t *overlap); 112 struct nd_mapping *nd_mapping, resource_size_t *overlap);
105resource_size_t nd_blk_available_dpa(struct nd_region *nd_region); 113resource_size_t nd_blk_available_dpa(struct nd_region *nd_region);
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 6ee7fd7e4bbd..98317e7ce5b5 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -357,6 +357,7 @@ struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
357 struct nd_label_id *label_id, resource_size_t start, 357 struct nd_label_id *label_id, resource_size_t start,
358 resource_size_t n); 358 resource_size_t n);
359resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns); 359resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns);
360bool nvdimm_namespace_locked(struct nd_namespace_common *ndns);
360struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev); 361struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev);
361int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns); 362int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns);
362int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt); 363int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt);
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index dd17acd8fe68..c23649867696 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -226,8 +226,11 @@ __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
226 if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512, 226 if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
227 PFN_PHYS(nr_pages)))) 227 PFN_PHYS(nr_pages))))
228 return -EIO; 228 return -EIO;
229 *kaddr = pmem->virt_addr + offset; 229
230 *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags); 230 if (kaddr)
231 *kaddr = pmem->virt_addr + offset;
232 if (pfn)
233 *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
231 234
232 /* 235 /*
233 * If badblocks are present, limit known good range to the 236 * If badblocks are present, limit known good range to the
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index ec3543b83330..fa37afcd43ff 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -389,6 +389,30 @@ resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
389 return available; 389 return available;
390} 390}
391 391
392resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region)
393{
394 resource_size_t available = 0;
395 int i;
396
397 if (is_memory(&nd_region->dev))
398 available = PHYS_ADDR_MAX;
399
400 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
401 for (i = 0; i < nd_region->ndr_mappings; i++) {
402 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
403
404 if (is_memory(&nd_region->dev))
405 available = min(available,
406 nd_pmem_max_contiguous_dpa(nd_region,
407 nd_mapping));
408 else if (is_nd_blk(&nd_region->dev))
409 available += nd_blk_available_dpa(nd_region);
410 }
411 if (is_memory(&nd_region->dev))
412 return available * nd_region->ndr_mappings;
413 return available;
414}
415
392static ssize_t available_size_show(struct device *dev, 416static ssize_t available_size_show(struct device *dev,
393 struct device_attribute *attr, char *buf) 417 struct device_attribute *attr, char *buf)
394{ 418{
@@ -410,6 +434,21 @@ static ssize_t available_size_show(struct device *dev,
410} 434}
411static DEVICE_ATTR_RO(available_size); 435static DEVICE_ATTR_RO(available_size);
412 436
437static ssize_t max_available_extent_show(struct device *dev,
438 struct device_attribute *attr, char *buf)
439{
440 struct nd_region *nd_region = to_nd_region(dev);
441 unsigned long long available = 0;
442
443 nvdimm_bus_lock(dev);
444 wait_nvdimm_bus_probe_idle(dev);
445 available = nd_region_allocatable_dpa(nd_region);
446 nvdimm_bus_unlock(dev);
447
448 return sprintf(buf, "%llu\n", available);
449}
450static DEVICE_ATTR_RO(max_available_extent);
451
413static ssize_t init_namespaces_show(struct device *dev, 452static ssize_t init_namespaces_show(struct device *dev,
414 struct device_attribute *attr, char *buf) 453 struct device_attribute *attr, char *buf)
415{ 454{
@@ -561,6 +600,7 @@ static struct attribute *nd_region_attributes[] = {
561 &dev_attr_read_only.attr, 600 &dev_attr_read_only.attr,
562 &dev_attr_set_cookie.attr, 601 &dev_attr_set_cookie.attr,
563 &dev_attr_available_size.attr, 602 &dev_attr_available_size.attr,
603 &dev_attr_max_available_extent.attr,
564 &dev_attr_namespace_seed.attr, 604 &dev_attr_namespace_seed.attr,
565 &dev_attr_init_namespaces.attr, 605 &dev_attr_init_namespaces.attr,
566 &dev_attr_badblocks.attr, 606 &dev_attr_badblocks.attr,
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index ed607288e696..23e526cda5c1 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -922,9 +922,11 @@ __dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff,
922 unsigned long dev_sz; 922 unsigned long dev_sz;
923 923
924 dev_sz = dev_info->end - dev_info->start + 1; 924 dev_sz = dev_info->end - dev_info->start + 1;
925 *kaddr = (void *) dev_info->start + offset; 925 if (kaddr)
926 *pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset), 926 *kaddr = (void *) dev_info->start + offset;
927 PFN_DEV|PFN_SPECIAL); 927 if (pfn)
928 *pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset),
929 PFN_DEV|PFN_SPECIAL);
928 930
929 return (dev_sz - offset) / PAGE_SIZE; 931 return (dev_sz - offset) / PAGE_SIZE;
930} 932}
diff --git a/fs/dax.c b/fs/dax.c
index 897b51e41d8f..f76724139f80 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -655,7 +655,6 @@ static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
655{ 655{
656 void *vto, *kaddr; 656 void *vto, *kaddr;
657 pgoff_t pgoff; 657 pgoff_t pgoff;
658 pfn_t pfn;
659 long rc; 658 long rc;
660 int id; 659 int id;
661 660
@@ -664,7 +663,7 @@ static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
664 return rc; 663 return rc;
665 664
666 id = dax_read_lock(); 665 id = dax_read_lock();
667 rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn); 666 rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, NULL);
668 if (rc < 0) { 667 if (rc < 0) {
669 dax_read_unlock(id); 668 dax_read_unlock(id);
670 return rc; 669 return rc;
@@ -975,7 +974,6 @@ static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
975{ 974{
976 const sector_t sector = dax_iomap_sector(iomap, pos); 975 const sector_t sector = dax_iomap_sector(iomap, pos);
977 pgoff_t pgoff; 976 pgoff_t pgoff;
978 void *kaddr;
979 int id, rc; 977 int id, rc;
980 long length; 978 long length;
981 979
@@ -984,7 +982,7 @@ static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
984 return rc; 982 return rc;
985 id = dax_read_lock(); 983 id = dax_read_lock();
986 length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size), 984 length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
987 &kaddr, pfnp); 985 NULL, pfnp);
988 if (length < 0) { 986 if (length < 0) {
989 rc = length; 987 rc = length;
990 goto out; 988 goto out;
@@ -1060,15 +1058,13 @@ int __dax_zero_page_range(struct block_device *bdev,
1060 pgoff_t pgoff; 1058 pgoff_t pgoff;
1061 long rc, id; 1059 long rc, id;
1062 void *kaddr; 1060 void *kaddr;
1063 pfn_t pfn;
1064 1061
1065 rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff); 1062 rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
1066 if (rc) 1063 if (rc)
1067 return rc; 1064 return rc;
1068 1065
1069 id = dax_read_lock(); 1066 id = dax_read_lock();
1070 rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, 1067 rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL);
1071 &pfn);
1072 if (rc < 0) { 1068 if (rc < 0) {
1073 dax_read_unlock(id); 1069 dax_read_unlock(id);
1074 return rc; 1070 return rc;
@@ -1124,7 +1120,6 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
1124 ssize_t map_len; 1120 ssize_t map_len;
1125 pgoff_t pgoff; 1121 pgoff_t pgoff;
1126 void *kaddr; 1122 void *kaddr;
1127 pfn_t pfn;
1128 1123
1129 if (fatal_signal_pending(current)) { 1124 if (fatal_signal_pending(current)) {
1130 ret = -EINTR; 1125 ret = -EINTR;
@@ -1136,7 +1131,7 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
1136 break; 1131 break;
1137 1132
1138 map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), 1133 map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
1139 &kaddr, &pfn); 1134 &kaddr, NULL);
1140 if (map_len < 0) { 1135 if (map_len < 0) {
1141 ret = map_len; 1136 ret = map_len;
1142 break; 1137 break;
diff --git a/tools/testing/nvdimm/pmem-dax.c b/tools/testing/nvdimm/pmem-dax.c
index b53596ad601b..2e7fd8227969 100644
--- a/tools/testing/nvdimm/pmem-dax.c
+++ b/tools/testing/nvdimm/pmem-dax.c
@@ -31,17 +31,21 @@ long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
31 if (get_nfit_res(pmem->phys_addr + offset)) { 31 if (get_nfit_res(pmem->phys_addr + offset)) {
32 struct page *page; 32 struct page *page;
33 33
34 *kaddr = pmem->virt_addr + offset; 34 if (kaddr)
35 *kaddr = pmem->virt_addr + offset;
35 page = vmalloc_to_page(pmem->virt_addr + offset); 36 page = vmalloc_to_page(pmem->virt_addr + offset);
36 *pfn = page_to_pfn_t(page); 37 if (pfn)
38 *pfn = page_to_pfn_t(page);
37 pr_debug_ratelimited("%s: pmem: %p pgoff: %#lx pfn: %#lx\n", 39 pr_debug_ratelimited("%s: pmem: %p pgoff: %#lx pfn: %#lx\n",
38 __func__, pmem, pgoff, page_to_pfn(page)); 40 __func__, pmem, pgoff, page_to_pfn(page));
39 41
40 return 1; 42 return 1;
41 } 43 }
42 44
43 *kaddr = pmem->virt_addr + offset; 45 if (kaddr)
44 *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags); 46 *kaddr = pmem->virt_addr + offset;
47 if (pfn)
48 *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
45 49
46 /* 50 /*
47 * If badblocks are present, limit known good range to the 51 * If badblocks are present, limit known good range to the
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index e2926f72a821..cffc2c5a778d 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -142,6 +142,28 @@ static u32 handle[] = {
142static unsigned long dimm_fail_cmd_flags[NUM_DCR]; 142static unsigned long dimm_fail_cmd_flags[NUM_DCR];
143static int dimm_fail_cmd_code[NUM_DCR]; 143static int dimm_fail_cmd_code[NUM_DCR];
144 144
145static const struct nd_intel_smart smart_def = {
146 .flags = ND_INTEL_SMART_HEALTH_VALID
147 | ND_INTEL_SMART_SPARES_VALID
148 | ND_INTEL_SMART_ALARM_VALID
149 | ND_INTEL_SMART_USED_VALID
150 | ND_INTEL_SMART_SHUTDOWN_VALID
151 | ND_INTEL_SMART_MTEMP_VALID
152 | ND_INTEL_SMART_CTEMP_VALID,
153 .health = ND_INTEL_SMART_NON_CRITICAL_HEALTH,
154 .media_temperature = 23 * 16,
155 .ctrl_temperature = 25 * 16,
156 .pmic_temperature = 40 * 16,
157 .spares = 75,
158 .alarm_flags = ND_INTEL_SMART_SPARE_TRIP
159 | ND_INTEL_SMART_TEMP_TRIP,
160 .ait_status = 1,
161 .life_used = 5,
162 .shutdown_state = 0,
163 .vendor_size = 0,
164 .shutdown_count = 100,
165};
166
145struct nfit_test_fw { 167struct nfit_test_fw {
146 enum intel_fw_update_state state; 168 enum intel_fw_update_state state;
147 u32 context; 169 u32 context;
@@ -752,15 +774,30 @@ static int nfit_test_cmd_smart_inject(
752 if (buf_len != sizeof(*inj)) 774 if (buf_len != sizeof(*inj))
753 return -EINVAL; 775 return -EINVAL;
754 776
755 if (inj->mtemp_enable) 777 if (inj->flags & ND_INTEL_SMART_INJECT_MTEMP) {
756 smart->media_temperature = inj->media_temperature; 778 if (inj->mtemp_enable)
757 if (inj->spare_enable) 779 smart->media_temperature = inj->media_temperature;
758 smart->spares = inj->spares; 780 else
759 if (inj->fatal_enable) 781 smart->media_temperature = smart_def.media_temperature;
760 smart->health = ND_INTEL_SMART_FATAL_HEALTH; 782 }
761 if (inj->unsafe_shutdown_enable) { 783 if (inj->flags & ND_INTEL_SMART_INJECT_SPARE) {
762 smart->shutdown_state = 1; 784 if (inj->spare_enable)
763 smart->shutdown_count++; 785 smart->spares = inj->spares;
786 else
787 smart->spares = smart_def.spares;
788 }
789 if (inj->flags & ND_INTEL_SMART_INJECT_FATAL) {
790 if (inj->fatal_enable)
791 smart->health = ND_INTEL_SMART_FATAL_HEALTH;
792 else
793 smart->health = ND_INTEL_SMART_NON_CRITICAL_HEALTH;
794 }
795 if (inj->flags & ND_INTEL_SMART_INJECT_SHUTDOWN) {
796 if (inj->unsafe_shutdown_enable) {
797 smart->shutdown_state = 1;
798 smart->shutdown_count++;
799 } else
800 smart->shutdown_state = 0;
764 } 801 }
765 inj->status = 0; 802 inj->status = 0;
766 smart_notify(bus_dev, dimm_dev, smart, thresh); 803 smart_notify(bus_dev, dimm_dev, smart, thresh);
@@ -884,6 +921,16 @@ static int nd_intel_test_cmd_set_lss_status(struct nfit_test *t,
884 return 0; 921 return 0;
885} 922}
886 923
924static int override_return_code(int dimm, unsigned int func, int rc)
925{
926 if ((1 << func) & dimm_fail_cmd_flags[dimm]) {
927 if (dimm_fail_cmd_code[dimm])
928 return dimm_fail_cmd_code[dimm];
929 return -EIO;
930 }
931 return rc;
932}
933
887static int get_dimm(struct nfit_mem *nfit_mem, unsigned int func) 934static int get_dimm(struct nfit_mem *nfit_mem, unsigned int func)
888{ 935{
889 int i; 936 int i;
@@ -894,13 +941,6 @@ static int get_dimm(struct nfit_mem *nfit_mem, unsigned int func)
894 break; 941 break;
895 if (i >= ARRAY_SIZE(handle)) 942 if (i >= ARRAY_SIZE(handle))
896 return -ENXIO; 943 return -ENXIO;
897
898 if ((1 << func) & dimm_fail_cmd_flags[i]) {
899 if (dimm_fail_cmd_code[i])
900 return dimm_fail_cmd_code[i];
901 return -EIO;
902 }
903
904 return i; 944 return i;
905} 945}
906 946
@@ -939,48 +979,59 @@ static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
939 979
940 switch (func) { 980 switch (func) {
941 case ND_INTEL_ENABLE_LSS_STATUS: 981 case ND_INTEL_ENABLE_LSS_STATUS:
942 return nd_intel_test_cmd_set_lss_status(t, 982 rc = nd_intel_test_cmd_set_lss_status(t,
943 buf, buf_len); 983 buf, buf_len);
984 break;
944 case ND_INTEL_FW_GET_INFO: 985 case ND_INTEL_FW_GET_INFO:
945 return nd_intel_test_get_fw_info(t, buf, 986 rc = nd_intel_test_get_fw_info(t, buf,
946 buf_len, i - t->dcr_idx); 987 buf_len, i - t->dcr_idx);
988 break;
947 case ND_INTEL_FW_START_UPDATE: 989 case ND_INTEL_FW_START_UPDATE:
948 return nd_intel_test_start_update(t, buf, 990 rc = nd_intel_test_start_update(t, buf,
949 buf_len, i - t->dcr_idx); 991 buf_len, i - t->dcr_idx);
992 break;
950 case ND_INTEL_FW_SEND_DATA: 993 case ND_INTEL_FW_SEND_DATA:
951 return nd_intel_test_send_data(t, buf, 994 rc = nd_intel_test_send_data(t, buf,
952 buf_len, i - t->dcr_idx); 995 buf_len, i - t->dcr_idx);
996 break;
953 case ND_INTEL_FW_FINISH_UPDATE: 997 case ND_INTEL_FW_FINISH_UPDATE:
954 return nd_intel_test_finish_fw(t, buf, 998 rc = nd_intel_test_finish_fw(t, buf,
955 buf_len, i - t->dcr_idx); 999 buf_len, i - t->dcr_idx);
1000 break;
956 case ND_INTEL_FW_FINISH_QUERY: 1001 case ND_INTEL_FW_FINISH_QUERY:
957 return nd_intel_test_finish_query(t, buf, 1002 rc = nd_intel_test_finish_query(t, buf,
958 buf_len, i - t->dcr_idx); 1003 buf_len, i - t->dcr_idx);
1004 break;
959 case ND_INTEL_SMART: 1005 case ND_INTEL_SMART:
960 return nfit_test_cmd_smart(buf, buf_len, 1006 rc = nfit_test_cmd_smart(buf, buf_len,
961 &t->smart[i - t->dcr_idx]); 1007 &t->smart[i - t->dcr_idx]);
1008 break;
962 case ND_INTEL_SMART_THRESHOLD: 1009 case ND_INTEL_SMART_THRESHOLD:
963 return nfit_test_cmd_smart_threshold(buf, 1010 rc = nfit_test_cmd_smart_threshold(buf,
964 buf_len, 1011 buf_len,
965 &t->smart_threshold[i - 1012 &t->smart_threshold[i -
966 t->dcr_idx]); 1013 t->dcr_idx]);
1014 break;
967 case ND_INTEL_SMART_SET_THRESHOLD: 1015 case ND_INTEL_SMART_SET_THRESHOLD:
968 return nfit_test_cmd_smart_set_threshold(buf, 1016 rc = nfit_test_cmd_smart_set_threshold(buf,
969 buf_len, 1017 buf_len,
970 &t->smart_threshold[i - 1018 &t->smart_threshold[i -
971 t->dcr_idx], 1019 t->dcr_idx],
972 &t->smart[i - t->dcr_idx], 1020 &t->smart[i - t->dcr_idx],
973 &t->pdev.dev, t->dimm_dev[i]); 1021 &t->pdev.dev, t->dimm_dev[i]);
1022 break;
974 case ND_INTEL_SMART_INJECT: 1023 case ND_INTEL_SMART_INJECT:
975 return nfit_test_cmd_smart_inject(buf, 1024 rc = nfit_test_cmd_smart_inject(buf,
976 buf_len, 1025 buf_len,
977 &t->smart_threshold[i - 1026 &t->smart_threshold[i -
978 t->dcr_idx], 1027 t->dcr_idx],
979 &t->smart[i - t->dcr_idx], 1028 &t->smart[i - t->dcr_idx],
980 &t->pdev.dev, t->dimm_dev[i]); 1029 &t->pdev.dev, t->dimm_dev[i]);
1030 break;
981 default: 1031 default:
982 return -ENOTTY; 1032 return -ENOTTY;
983 } 1033 }
1034 return override_return_code(i, func, rc);
984 } 1035 }
985 1036
986 if (!test_bit(cmd, &cmd_mask) 1037 if (!test_bit(cmd, &cmd_mask)
@@ -1006,6 +1057,7 @@ static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
1006 default: 1057 default:
1007 return -ENOTTY; 1058 return -ENOTTY;
1008 } 1059 }
1060 return override_return_code(i, func, rc);
1009 } else { 1061 } else {
1010 struct ars_state *ars_state = &t->ars_state; 1062 struct ars_state *ars_state = &t->ars_state;
1011 struct nd_cmd_pkg *call_pkg = buf; 1063 struct nd_cmd_pkg *call_pkg = buf;
@@ -1302,29 +1354,9 @@ static void smart_init(struct nfit_test *t)
1302 .ctrl_temperature = 30 * 16, 1354 .ctrl_temperature = 30 * 16,
1303 .spares = 5, 1355 .spares = 5,
1304 }; 1356 };
1305 const struct nd_intel_smart smart_data = {
1306 .flags = ND_INTEL_SMART_HEALTH_VALID
1307 | ND_INTEL_SMART_SPARES_VALID
1308 | ND_INTEL_SMART_ALARM_VALID
1309 | ND_INTEL_SMART_USED_VALID
1310 | ND_INTEL_SMART_SHUTDOWN_VALID
1311 | ND_INTEL_SMART_MTEMP_VALID,
1312 .health = ND_INTEL_SMART_NON_CRITICAL_HEALTH,
1313 .media_temperature = 23 * 16,
1314 .ctrl_temperature = 25 * 16,
1315 .pmic_temperature = 40 * 16,
1316 .spares = 75,
1317 .alarm_flags = ND_INTEL_SMART_SPARE_TRIP
1318 | ND_INTEL_SMART_TEMP_TRIP,
1319 .ait_status = 1,
1320 .life_used = 5,
1321 .shutdown_state = 0,
1322 .vendor_size = 0,
1323 .shutdown_count = 100,
1324 };
1325 1357
1326 for (i = 0; i < t->num_dcr; i++) { 1358 for (i = 0; i < t->num_dcr; i++) {
1327 memcpy(&t->smart[i], &smart_data, sizeof(smart_data)); 1359 memcpy(&t->smart[i], &smart_def, sizeof(smart_def));
1328 memcpy(&t->smart_threshold[i], &smart_t_data, 1360 memcpy(&t->smart_threshold[i], &smart_t_data,
1329 sizeof(smart_t_data)); 1361 sizeof(smart_t_data));
1330 } 1362 }