aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-08-25 21:13:10 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-08-25 21:13:10 -0400
commit828bf6e904eb8fc8969333568802689fbbf07a40 (patch)
tree669784db5c100f75698f2a4511d31047dd0b3d50 /drivers
parentb326272010b6656210193d7ab93fa184087e8ee1 (diff)
parent286e87718103acdf85f4ed323a37e4839a8a7c05 (diff)
Merge tag 'libnvdimm-for-4.19_misc' of gitolite.kernel.org:pub/scm/linux/kernel/git/nvdimm/nvdimm
Pull libnvdimm updates from Dave Jiang: "Collection of misc libnvdimm patches for 4.19 submission: - Adding support to read locked nvdimm capacity. - Change test code to make DSM failure code injection an override. - Add support for calculate maximum contiguous area for namespace. - Add support for queueing a short ARS when there is on going ARS for nvdimm. - Allow NULL to be passed in to ->direct_access() for kaddr and pfn params. - Improve smart injection support for nvdimm emulation testing. - Fix test code that supports for emulating controller temperature. - Fix hang on error before devm_memremap_pages() - Fix a bug that causes user memory corruption when data returned to user for ars_status. - Maintainer updates for Ross Zwisler emails and adding Jan Kara to fsdax" * tag 'libnvdimm-for-4.19_misc' of gitolite.kernel.org:pub/scm/linux/kernel/git/nvdimm/nvdimm: libnvdimm: fix ars_status output length calculation device-dax: avoid hang on error before devm_memremap_pages() tools/testing/nvdimm: improve emulation of smart injection filesystem-dax: Do not request kaddr and pfn when not required md/dm-writecache: Don't request pointer dummy_addr when not required dax/super: Do not request a pointer kaddr when not required tools/testing/nvdimm: kaddr and pfn can be NULL to ->direct_access() s390, dcssblk: kaddr and pfn can be NULL to ->direct_access() libnvdimm, pmem: kaddr and pfn can be NULL to ->direct_access() acpi/nfit: queue issuing of ars when an uc error notification comes in libnvdimm: Export max available extent libnvdimm: Use max contiguous area for namespace size MAINTAINERS: Add Jan Kara for filesystem DAX MAINTAINERS: update Ross Zwisler's email address tools/testing/nvdimm: Fix support for emulating controller temperature tools/testing/nvdimm: Make DSM failure code injection an override acpi, nfit: Prefer _DSM over _LSR for namespace label reads libnvdimm: Introduce locked DIMM capacity support
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/nfit/core.c24
-rw-r--r--drivers/acpi/nfit/nfit.h1
-rw-r--r--drivers/dax/pmem.c12
-rw-r--r--drivers/dax/super.c3
-rw-r--r--drivers/md/dm-writecache.c3
-rw-r--r--drivers/nvdimm/bus.c4
-rw-r--r--drivers/nvdimm/dimm.c24
-rw-r--r--drivers/nvdimm/dimm_devs.c31
-rw-r--r--drivers/nvdimm/namespace_devs.c29
-rw-r--r--drivers/nvdimm/nd-core.h8
-rw-r--r--drivers/nvdimm/nd.h1
-rw-r--r--drivers/nvdimm/pmem.c7
-rw-r--r--drivers/nvdimm/region_devs.c40
-rw-r--r--drivers/s390/block/dcssblk.c8
14 files changed, 171 insertions, 24 deletions
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 7c479002e798..b072cfc5f20e 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -1699,7 +1699,7 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
1699{ 1699{
1700 struct acpi_device *adev, *adev_dimm; 1700 struct acpi_device *adev, *adev_dimm;
1701 struct device *dev = acpi_desc->dev; 1701 struct device *dev = acpi_desc->dev;
1702 unsigned long dsm_mask; 1702 unsigned long dsm_mask, label_mask;
1703 const guid_t *guid; 1703 const guid_t *guid;
1704 int i; 1704 int i;
1705 int family = -1; 1705 int family = -1;
@@ -1771,6 +1771,16 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
1771 1ULL << i)) 1771 1ULL << i))
1772 set_bit(i, &nfit_mem->dsm_mask); 1772 set_bit(i, &nfit_mem->dsm_mask);
1773 1773
1774 /*
1775 * Prefer the NVDIMM_FAMILY_INTEL label read commands if present
1776 * due to their better semantics handling locked capacity.
1777 */
1778 label_mask = 1 << ND_CMD_GET_CONFIG_SIZE | 1 << ND_CMD_GET_CONFIG_DATA
1779 | 1 << ND_CMD_SET_CONFIG_DATA;
1780 if (family == NVDIMM_FAMILY_INTEL
1781 && (dsm_mask & label_mask) == label_mask)
1782 return 0;
1783
1774 if (acpi_nvdimm_has_method(adev_dimm, "_LSI") 1784 if (acpi_nvdimm_has_method(adev_dimm, "_LSI")
1775 && acpi_nvdimm_has_method(adev_dimm, "_LSR")) { 1785 && acpi_nvdimm_has_method(adev_dimm, "_LSR")) {
1776 dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev)); 1786 dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev));
@@ -2559,7 +2569,12 @@ static void ars_complete(struct acpi_nfit_desc *acpi_desc,
2559 test_bit(ARS_SHORT, &nfit_spa->ars_state) 2569 test_bit(ARS_SHORT, &nfit_spa->ars_state)
2560 ? "short" : "long"); 2570 ? "short" : "long");
2561 clear_bit(ARS_SHORT, &nfit_spa->ars_state); 2571 clear_bit(ARS_SHORT, &nfit_spa->ars_state);
2562 set_bit(ARS_DONE, &nfit_spa->ars_state); 2572 if (test_and_clear_bit(ARS_REQ_REDO, &nfit_spa->ars_state)) {
2573 set_bit(ARS_SHORT, &nfit_spa->ars_state);
2574 set_bit(ARS_REQ, &nfit_spa->ars_state);
2575 dev_dbg(dev, "ARS: processing scrub request received while in progress\n");
2576 } else
2577 set_bit(ARS_DONE, &nfit_spa->ars_state);
2563} 2578}
2564 2579
2565static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc) 2580static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc)
@@ -3256,9 +3271,10 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags)
3256 if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) 3271 if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
3257 continue; 3272 continue;
3258 3273
3259 if (test_and_set_bit(ARS_REQ, &nfit_spa->ars_state)) 3274 if (test_and_set_bit(ARS_REQ, &nfit_spa->ars_state)) {
3260 busy++; 3275 busy++;
3261 else { 3276 set_bit(ARS_REQ_REDO, &nfit_spa->ars_state);
3277 } else {
3262 if (test_bit(ARS_SHORT, &flags)) 3278 if (test_bit(ARS_SHORT, &flags))
3263 set_bit(ARS_SHORT, &nfit_spa->ars_state); 3279 set_bit(ARS_SHORT, &nfit_spa->ars_state);
3264 scheduled++; 3280 scheduled++;
diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
index a97ff42fe311..d1274ea2d251 100644
--- a/drivers/acpi/nfit/nfit.h
+++ b/drivers/acpi/nfit/nfit.h
@@ -119,6 +119,7 @@ enum nfit_dimm_notifiers {
119 119
120enum nfit_ars_state { 120enum nfit_ars_state {
121 ARS_REQ, 121 ARS_REQ,
122 ARS_REQ_REDO,
122 ARS_DONE, 123 ARS_DONE,
123 ARS_SHORT, 124 ARS_SHORT,
124 ARS_FAILED, 125 ARS_FAILED,
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
index fd49b24fd6af..99e2aace8078 100644
--- a/drivers/dax/pmem.c
+++ b/drivers/dax/pmem.c
@@ -105,15 +105,19 @@ static int dax_pmem_probe(struct device *dev)
105 if (rc) 105 if (rc)
106 return rc; 106 return rc;
107 107
108 rc = devm_add_action_or_reset(dev, dax_pmem_percpu_exit, 108 rc = devm_add_action(dev, dax_pmem_percpu_exit, &dax_pmem->ref);
109 &dax_pmem->ref); 109 if (rc) {
110 if (rc) 110 percpu_ref_exit(&dax_pmem->ref);
111 return rc; 111 return rc;
112 }
112 113
113 dax_pmem->pgmap.ref = &dax_pmem->ref; 114 dax_pmem->pgmap.ref = &dax_pmem->ref;
114 addr = devm_memremap_pages(dev, &dax_pmem->pgmap); 115 addr = devm_memremap_pages(dev, &dax_pmem->pgmap);
115 if (IS_ERR(addr)) 116 if (IS_ERR(addr)) {
117 devm_remove_action(dev, dax_pmem_percpu_exit, &dax_pmem->ref);
118 percpu_ref_exit(&dax_pmem->ref);
116 return PTR_ERR(addr); 119 return PTR_ERR(addr);
120 }
117 121
118 rc = devm_add_action_or_reset(dev, dax_pmem_percpu_kill, 122 rc = devm_add_action_or_reset(dev, dax_pmem_percpu_kill,
119 &dax_pmem->ref); 123 &dax_pmem->ref);
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 45276abf03aa..6e928f37d084 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -89,7 +89,6 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
89 struct request_queue *q; 89 struct request_queue *q;
90 pgoff_t pgoff; 90 pgoff_t pgoff;
91 int err, id; 91 int err, id;
92 void *kaddr;
93 pfn_t pfn; 92 pfn_t pfn;
94 long len; 93 long len;
95 char buf[BDEVNAME_SIZE]; 94 char buf[BDEVNAME_SIZE];
@@ -122,7 +121,7 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
122 } 121 }
123 122
124 id = dax_read_lock(); 123 id = dax_read_lock();
125 len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn); 124 len = dax_direct_access(dax_dev, pgoff, 1, NULL, &pfn);
126 dax_read_unlock(id); 125 dax_read_unlock(id);
127 126
128 put_dax(dax_dev); 127 put_dax(dax_dev);
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 3a28a68f184c..5f1f80d424dd 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -268,9 +268,8 @@ static int persistent_memory_claim(struct dm_writecache *wc)
268 i = 0; 268 i = 0;
269 do { 269 do {
270 long daa; 270 long daa;
271 void *dummy_addr;
272 daa = dax_direct_access(wc->ssd_dev->dax_dev, i, p - i, 271 daa = dax_direct_access(wc->ssd_dev->dax_dev, i, p - i,
273 &dummy_addr, &pfn); 272 NULL, &pfn);
274 if (daa <= 0) { 273 if (daa <= 0) {
275 r = daa ? daa : -EINVAL; 274 r = daa ? daa : -EINVAL;
276 goto err3; 275 goto err3;
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 27902a8799b1..8aae6dcc839f 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -812,9 +812,9 @@ u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd,
812 * overshoots the remainder by 4 bytes, assume it was 812 * overshoots the remainder by 4 bytes, assume it was
813 * including 'status'. 813 * including 'status'.
814 */ 814 */
815 if (out_field[1] - 8 == remainder) 815 if (out_field[1] - 4 == remainder)
816 return remainder; 816 return remainder;
817 return out_field[1] - 4; 817 return out_field[1] - 8;
818 } else if (cmd == ND_CMD_CALL) { 818 } else if (cmd == ND_CMD_CALL) {
819 struct nd_cmd_pkg *pkg = (struct nd_cmd_pkg *) in_field; 819 struct nd_cmd_pkg *pkg = (struct nd_cmd_pkg *) in_field;
820 820
diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c
index 233907889f96..6c8fb7590838 100644
--- a/drivers/nvdimm/dimm.c
+++ b/drivers/nvdimm/dimm.c
@@ -34,6 +34,9 @@ static int nvdimm_probe(struct device *dev)
34 return rc; 34 return rc;
35 } 35 }
36 36
37 /* reset locked, to be validated below... */
38 nvdimm_clear_locked(dev);
39
37 ndd = kzalloc(sizeof(*ndd), GFP_KERNEL); 40 ndd = kzalloc(sizeof(*ndd), GFP_KERNEL);
38 if (!ndd) 41 if (!ndd)
39 return -ENOMEM; 42 return -ENOMEM;
@@ -48,12 +51,30 @@ static int nvdimm_probe(struct device *dev)
48 get_device(dev); 51 get_device(dev);
49 kref_init(&ndd->kref); 52 kref_init(&ndd->kref);
50 53
54 /*
55 * EACCES failures reading the namespace label-area-properties
56 * are interpreted as the DIMM capacity being locked but the
57 * namespace labels themselves being accessible.
58 */
51 rc = nvdimm_init_nsarea(ndd); 59 rc = nvdimm_init_nsarea(ndd);
52 if (rc == -EACCES) 60 if (rc == -EACCES) {
61 /*
62 * See nvdimm_namespace_common_probe() where we fail to
63 * allow namespaces to probe while the DIMM is locked,
64 * but we do allow for namespace enumeration.
65 */
53 nvdimm_set_locked(dev); 66 nvdimm_set_locked(dev);
67 rc = 0;
68 }
54 if (rc) 69 if (rc)
55 goto err; 70 goto err;
56 71
72 /*
73 * EACCES failures reading the namespace label-data are
74 * interpreted as the label area being locked in addition to the
75 * DIMM capacity. We fail the dimm probe to prevent regions from
76 * attempting to parse the label area.
77 */
57 rc = nvdimm_init_config_data(ndd); 78 rc = nvdimm_init_config_data(ndd);
58 if (rc == -EACCES) 79 if (rc == -EACCES)
59 nvdimm_set_locked(dev); 80 nvdimm_set_locked(dev);
@@ -72,7 +93,6 @@ static int nvdimm_probe(struct device *dev)
72 if (rc == 0) 93 if (rc == 0)
73 nvdimm_set_aliasing(dev); 94 nvdimm_set_aliasing(dev);
74 } 95 }
75 nvdimm_clear_locked(dev);
76 nvdimm_bus_unlock(dev); 96 nvdimm_bus_unlock(dev);
77 97
78 if (rc) 98 if (rc)
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index 8d348b22ba45..863cabc35215 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -537,6 +537,37 @@ resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
537} 537}
538 538
539/** 539/**
540 * nd_pmem_max_contiguous_dpa - For the given dimm+region, return the max
541 * contiguous unallocated dpa range.
542 * @nd_region: constrain available space check to this reference region
543 * @nd_mapping: container of dpa-resource-root + labels
544 */
545resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
546 struct nd_mapping *nd_mapping)
547{
548 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
549 struct nvdimm_bus *nvdimm_bus;
550 resource_size_t max = 0;
551 struct resource *res;
552
553 /* if a dimm is disabled the available capacity is zero */
554 if (!ndd)
555 return 0;
556
557 nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
558 if (__reserve_free_pmem(&nd_region->dev, nd_mapping->nvdimm))
559 return 0;
560 for_each_dpa_resource(ndd, res) {
561 if (strcmp(res->name, "pmem-reserve") != 0)
562 continue;
563 if (resource_size(res) > max)
564 max = resource_size(res);
565 }
566 release_free_pmem(nvdimm_bus, nd_mapping);
567 return max;
568}
569
570/**
540 * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa 571 * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa
541 * @nd_mapping: container of dpa-resource-root + labels 572 * @nd_mapping: container of dpa-resource-root + labels
542 * @nd_region: constrain available space check to this reference region 573 * @nd_region: constrain available space check to this reference region
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 28afdd668905..4a4266250c28 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -799,7 +799,7 @@ static int merge_dpa(struct nd_region *nd_region,
799 return 0; 799 return 0;
800} 800}
801 801
802static int __reserve_free_pmem(struct device *dev, void *data) 802int __reserve_free_pmem(struct device *dev, void *data)
803{ 803{
804 struct nvdimm *nvdimm = data; 804 struct nvdimm *nvdimm = data;
805 struct nd_region *nd_region; 805 struct nd_region *nd_region;
@@ -836,7 +836,7 @@ static int __reserve_free_pmem(struct device *dev, void *data)
836 return 0; 836 return 0;
837} 837}
838 838
839static void release_free_pmem(struct nvdimm_bus *nvdimm_bus, 839void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
840 struct nd_mapping *nd_mapping) 840 struct nd_mapping *nd_mapping)
841{ 841{
842 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 842 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
@@ -1032,7 +1032,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
1032 1032
1033 allocated += nvdimm_allocated_dpa(ndd, &label_id); 1033 allocated += nvdimm_allocated_dpa(ndd, &label_id);
1034 } 1034 }
1035 available = nd_region_available_dpa(nd_region); 1035 available = nd_region_allocatable_dpa(nd_region);
1036 1036
1037 if (val > available + allocated) 1037 if (val > available + allocated)
1038 return -ENOSPC; 1038 return -ENOSPC;
@@ -1144,6 +1144,26 @@ resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
1144} 1144}
1145EXPORT_SYMBOL(nvdimm_namespace_capacity); 1145EXPORT_SYMBOL(nvdimm_namespace_capacity);
1146 1146
1147bool nvdimm_namespace_locked(struct nd_namespace_common *ndns)
1148{
1149 int i;
1150 bool locked = false;
1151 struct device *dev = &ndns->dev;
1152 struct nd_region *nd_region = to_nd_region(dev->parent);
1153
1154 for (i = 0; i < nd_region->ndr_mappings; i++) {
1155 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1156 struct nvdimm *nvdimm = nd_mapping->nvdimm;
1157
1158 if (test_bit(NDD_LOCKED, &nvdimm->flags)) {
1159 dev_dbg(dev, "%s locked\n", nvdimm_name(nvdimm));
1160 locked = true;
1161 }
1162 }
1163 return locked;
1164}
1165EXPORT_SYMBOL(nvdimm_namespace_locked);
1166
1147static ssize_t size_show(struct device *dev, 1167static ssize_t size_show(struct device *dev,
1148 struct device_attribute *attr, char *buf) 1168 struct device_attribute *attr, char *buf)
1149{ 1169{
@@ -1695,6 +1715,9 @@ struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
1695 } 1715 }
1696 } 1716 }
1697 1717
1718 if (nvdimm_namespace_locked(ndns))
1719 return ERR_PTR(-EACCES);
1720
1698 size = nvdimm_namespace_capacity(ndns); 1721 size = nvdimm_namespace_capacity(ndns);
1699 if (size < ND_MIN_NAMESPACE_SIZE) { 1722 if (size < ND_MIN_NAMESPACE_SIZE) {
1700 dev_dbg(&ndns->dev, "%pa, too small must be at least %#x\n", 1723 dev_dbg(&ndns->dev, "%pa, too small must be at least %#x\n",
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index 79274ead54fb..ac68072fb8cd 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -100,6 +100,14 @@ struct nd_region;
100struct nvdimm_drvdata; 100struct nvdimm_drvdata;
101struct nd_mapping; 101struct nd_mapping;
102void nd_mapping_free_labels(struct nd_mapping *nd_mapping); 102void nd_mapping_free_labels(struct nd_mapping *nd_mapping);
103
104int __reserve_free_pmem(struct device *dev, void *data);
105void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
106 struct nd_mapping *nd_mapping);
107
108resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
109 struct nd_mapping *nd_mapping);
110resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region);
103resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region, 111resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
104 struct nd_mapping *nd_mapping, resource_size_t *overlap); 112 struct nd_mapping *nd_mapping, resource_size_t *overlap);
105resource_size_t nd_blk_available_dpa(struct nd_region *nd_region); 113resource_size_t nd_blk_available_dpa(struct nd_region *nd_region);
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 6ee7fd7e4bbd..98317e7ce5b5 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -357,6 +357,7 @@ struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
357 struct nd_label_id *label_id, resource_size_t start, 357 struct nd_label_id *label_id, resource_size_t start,
358 resource_size_t n); 358 resource_size_t n);
359resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns); 359resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns);
360bool nvdimm_namespace_locked(struct nd_namespace_common *ndns);
360struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev); 361struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev);
361int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns); 362int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns);
362int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt); 363int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt);
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index dd17acd8fe68..c23649867696 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -226,8 +226,11 @@ __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
226 if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512, 226 if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
227 PFN_PHYS(nr_pages)))) 227 PFN_PHYS(nr_pages))))
228 return -EIO; 228 return -EIO;
229 *kaddr = pmem->virt_addr + offset; 229
230 *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags); 230 if (kaddr)
231 *kaddr = pmem->virt_addr + offset;
232 if (pfn)
233 *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
231 234
232 /* 235 /*
233 * If badblocks are present, limit known good range to the 236 * If badblocks are present, limit known good range to the
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index ec3543b83330..fa37afcd43ff 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -389,6 +389,30 @@ resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
389 return available; 389 return available;
390} 390}
391 391
392resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region)
393{
394 resource_size_t available = 0;
395 int i;
396
397 if (is_memory(&nd_region->dev))
398 available = PHYS_ADDR_MAX;
399
400 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
401 for (i = 0; i < nd_region->ndr_mappings; i++) {
402 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
403
404 if (is_memory(&nd_region->dev))
405 available = min(available,
406 nd_pmem_max_contiguous_dpa(nd_region,
407 nd_mapping));
408 else if (is_nd_blk(&nd_region->dev))
409 available += nd_blk_available_dpa(nd_region);
410 }
411 if (is_memory(&nd_region->dev))
412 return available * nd_region->ndr_mappings;
413 return available;
414}
415
392static ssize_t available_size_show(struct device *dev, 416static ssize_t available_size_show(struct device *dev,
393 struct device_attribute *attr, char *buf) 417 struct device_attribute *attr, char *buf)
394{ 418{
@@ -410,6 +434,21 @@ static ssize_t available_size_show(struct device *dev,
410} 434}
411static DEVICE_ATTR_RO(available_size); 435static DEVICE_ATTR_RO(available_size);
412 436
437static ssize_t max_available_extent_show(struct device *dev,
438 struct device_attribute *attr, char *buf)
439{
440 struct nd_region *nd_region = to_nd_region(dev);
441 unsigned long long available = 0;
442
443 nvdimm_bus_lock(dev);
444 wait_nvdimm_bus_probe_idle(dev);
445 available = nd_region_allocatable_dpa(nd_region);
446 nvdimm_bus_unlock(dev);
447
448 return sprintf(buf, "%llu\n", available);
449}
450static DEVICE_ATTR_RO(max_available_extent);
451
413static ssize_t init_namespaces_show(struct device *dev, 452static ssize_t init_namespaces_show(struct device *dev,
414 struct device_attribute *attr, char *buf) 453 struct device_attribute *attr, char *buf)
415{ 454{
@@ -561,6 +600,7 @@ static struct attribute *nd_region_attributes[] = {
561 &dev_attr_read_only.attr, 600 &dev_attr_read_only.attr,
562 &dev_attr_set_cookie.attr, 601 &dev_attr_set_cookie.attr,
563 &dev_attr_available_size.attr, 602 &dev_attr_available_size.attr,
603 &dev_attr_max_available_extent.attr,
564 &dev_attr_namespace_seed.attr, 604 &dev_attr_namespace_seed.attr,
565 &dev_attr_init_namespaces.attr, 605 &dev_attr_init_namespaces.attr,
566 &dev_attr_badblocks.attr, 606 &dev_attr_badblocks.attr,
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index ed607288e696..23e526cda5c1 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -922,9 +922,11 @@ __dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff,
922 unsigned long dev_sz; 922 unsigned long dev_sz;
923 923
924 dev_sz = dev_info->end - dev_info->start + 1; 924 dev_sz = dev_info->end - dev_info->start + 1;
925 *kaddr = (void *) dev_info->start + offset; 925 if (kaddr)
926 *pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset), 926 *kaddr = (void *) dev_info->start + offset;
927 PFN_DEV|PFN_SPECIAL); 927 if (pfn)
928 *pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset),
929 PFN_DEV|PFN_SPECIAL);
928 930
929 return (dev_sz - offset) / PAGE_SIZE; 931 return (dev_sz - offset) / PAGE_SIZE;
930} 932}