aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/acpi
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-03-16 20:45:56 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-16 20:45:56 -0400
commit8759957b77ac1b5b5bdfac5ba049789107e85190 (patch)
tree82d520563ea686259f29fab26abec293b5ee6a5f /drivers/acpi
parent6968e6f8329d014920331dd2cf166ccd474b5299 (diff)
parent489011652a2d5555901def04c24d68874e8ba9a1 (diff)
Merge tag 'libnvdimm-for-4.6' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm
Pull libnvdimm updates from Dan Williams: - Asynchronous address range scrub: Given the capacities of next generation persistent memory devices a scrub operation to find all poison may take 10s of seconds. We want this scrub work to be done asynchronously with the rest of system initialization, so we move it out of line from the NFIT probing, i.e. acpi_nfit_add(). - Clear poison: ACPI 6.1 introduces the ability to send "clear error" commands to the ACPI0012:00 device representing the root of an "nvdimm bus". Similar to relocating a bad block on a disk, this support clears media errors in response to a write. - Persistent memory resource tracking: A persistent memory range may be designated as simply "reserved" by platform firmware in the efi/e820 memory map. Later when the NFIT driver loads it discovers that the range is "Persistent Memory". The NFIT bus driver inserts a resource to advertise that "persistent" attribute in the system resource tree for /proc/iomem and kernel-internal usages. - Miscellaneous cleanups and fixes: Workaround section misaligned pmem ranges when allocating a struct page memmap, fix handling of the read-only case in the ioctl path, and clean up block device major number allocation. * tag 'libnvdimm-for-4.6' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: (26 commits) libnvdimm, pmem: clear poison on write libnvdimm, pmem: fix kmap_atomic() leak in error path nvdimm/btt: don't allocate unused major device number nvdimm/blk: don't allocate unused major device number pmem: don't allocate unused major device number ACPI: Change NFIT driver to insert new resource resource: Export insert_resource and remove_resource resource: Add remove_resource interface resource: Change __request_region to inherit from immediate parent libnvdimm, pmem: fix ia64 build, use PHYS_PFN nfit, libnvdimm: clear poison command support libnvdimm, pfn: 'resource'-address and 'size' attributes for pfn devices libnvdimm, pmem: adjust for section collisions with 'System RAM' libnvdimm, pmem: fix 'pfn' support for section-misaligned namespaces libnvdimm: Fix security issue with DSM IOCTL. libnvdimm: Clean-up access mode check. tools/testing/nvdimm: expand ars unit testing nfit: disable userspace initiated ars during scrub nfit: scrub and register regions in a workqueue nfit, libnvdimm: async region scrub workqueue ...
Diffstat (limited to 'drivers/acpi')
-rw-r--r--drivers/acpi/nfit.c798
-rw-r--r--drivers/acpi/nfit.h30
2 files changed, 611 insertions, 217 deletions
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index 35947ac87644..d0f35e63640b 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -21,6 +21,7 @@
21#include <linux/sort.h> 21#include <linux/sort.h>
22#include <linux/pmem.h> 22#include <linux/pmem.h>
23#include <linux/io.h> 23#include <linux/io.h>
24#include <linux/nd.h>
24#include <asm/cacheflush.h> 25#include <asm/cacheflush.h>
25#include "nfit.h" 26#include "nfit.h"
26 27
@@ -34,6 +35,18 @@ static bool force_enable_dimms;
34module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR); 35module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR);
35MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status"); 36MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status");
36 37
38static unsigned int scrub_timeout = NFIT_ARS_TIMEOUT;
39module_param(scrub_timeout, uint, S_IRUGO|S_IWUSR);
40MODULE_PARM_DESC(scrub_timeout, "Initial scrub timeout in seconds");
41
42/* after three payloads of overflow, it's dead jim */
43static unsigned int scrub_overflow_abort = 3;
44module_param(scrub_overflow_abort, uint, S_IRUGO|S_IWUSR);
45MODULE_PARM_DESC(scrub_overflow_abort,
46 "Number of times we overflow ARS results before abort");
47
48static struct workqueue_struct *nfit_wq;
49
37struct nfit_table_prev { 50struct nfit_table_prev {
38 struct list_head spas; 51 struct list_head spas;
39 struct list_head memdevs; 52 struct list_head memdevs;
@@ -72,9 +85,90 @@ static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
72 return to_acpi_device(acpi_desc->dev); 85 return to_acpi_device(acpi_desc->dev);
73} 86}
74 87
88static int xlat_status(void *buf, unsigned int cmd)
89{
90 struct nd_cmd_clear_error *clear_err;
91 struct nd_cmd_ars_status *ars_status;
92 struct nd_cmd_ars_start *ars_start;
93 struct nd_cmd_ars_cap *ars_cap;
94 u16 flags;
95
96 switch (cmd) {
97 case ND_CMD_ARS_CAP:
98 ars_cap = buf;
99 if ((ars_cap->status & 0xffff) == NFIT_ARS_CAP_NONE)
100 return -ENOTTY;
101
102 /* Command failed */
103 if (ars_cap->status & 0xffff)
104 return -EIO;
105
106 /* No supported scan types for this range */
107 flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE;
108 if ((ars_cap->status >> 16 & flags) == 0)
109 return -ENOTTY;
110 break;
111 case ND_CMD_ARS_START:
112 ars_start = buf;
113 /* ARS is in progress */
114 if ((ars_start->status & 0xffff) == NFIT_ARS_START_BUSY)
115 return -EBUSY;
116
117 /* Command failed */
118 if (ars_start->status & 0xffff)
119 return -EIO;
120 break;
121 case ND_CMD_ARS_STATUS:
122 ars_status = buf;
123 /* Command failed */
124 if (ars_status->status & 0xffff)
125 return -EIO;
126 /* Check extended status (Upper two bytes) */
127 if (ars_status->status == NFIT_ARS_STATUS_DONE)
128 return 0;
129
130 /* ARS is in progress */
131 if (ars_status->status == NFIT_ARS_STATUS_BUSY)
132 return -EBUSY;
133
134 /* No ARS performed for the current boot */
135 if (ars_status->status == NFIT_ARS_STATUS_NONE)
136 return -EAGAIN;
137
138 /*
139 * ARS interrupted, either we overflowed or some other
140 * agent wants the scan to stop. If we didn't overflow
141 * then just continue with the returned results.
142 */
143 if (ars_status->status == NFIT_ARS_STATUS_INTR) {
144 if (ars_status->flags & NFIT_ARS_F_OVERFLOW)
145 return -ENOSPC;
146 return 0;
147 }
148
149 /* Unknown status */
150 if (ars_status->status >> 16)
151 return -EIO;
152 break;
153 case ND_CMD_CLEAR_ERROR:
154 clear_err = buf;
155 if (clear_err->status & 0xffff)
156 return -EIO;
157 if (!clear_err->cleared)
158 return -EIO;
159 if (clear_err->length > clear_err->cleared)
160 return clear_err->cleared;
161 break;
162 default:
163 break;
164 }
165
166 return 0;
167}
168
75static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, 169static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
76 struct nvdimm *nvdimm, unsigned int cmd, void *buf, 170 struct nvdimm *nvdimm, unsigned int cmd, void *buf,
77 unsigned int buf_len) 171 unsigned int buf_len, int *cmd_rc)
78{ 172{
79 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); 173 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
80 const struct nd_cmd_desc *desc = NULL; 174 const struct nd_cmd_desc *desc = NULL;
@@ -185,6 +279,8 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
185 * unfilled in the output buffer 279 * unfilled in the output buffer
186 */ 280 */
187 rc = buf_len - offset - in_buf.buffer.length; 281 rc = buf_len - offset - in_buf.buffer.length;
282 if (cmd_rc)
283 *cmd_rc = xlat_status(buf, cmd);
188 } else { 284 } else {
189 dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n", 285 dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
190 __func__, dimm_name, cmd_name, buf_len, 286 __func__, dimm_name, cmd_name, buf_len,
@@ -675,12 +771,11 @@ static struct attribute_group acpi_nfit_attribute_group = {
675 .attrs = acpi_nfit_attributes, 771 .attrs = acpi_nfit_attributes,
676}; 772};
677 773
678const struct attribute_group *acpi_nfit_attribute_groups[] = { 774static const struct attribute_group *acpi_nfit_attribute_groups[] = {
679 &nvdimm_bus_attribute_group, 775 &nvdimm_bus_attribute_group,
680 &acpi_nfit_attribute_group, 776 &acpi_nfit_attribute_group,
681 NULL, 777 NULL,
682}; 778};
683EXPORT_SYMBOL_GPL(acpi_nfit_attribute_groups);
684 779
685static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev) 780static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev)
686{ 781{
@@ -917,7 +1012,7 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
917 if (!adev) 1012 if (!adev)
918 return; 1013 return;
919 1014
920 for (i = ND_CMD_ARS_CAP; i <= ND_CMD_ARS_STATUS; i++) 1015 for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++)
921 if (acpi_check_dsm(adev->handle, uuid, 1, 1ULL << i)) 1016 if (acpi_check_dsm(adev->handle, uuid, 1, 1ULL << i))
922 set_bit(i, &nd_desc->dsm_mask); 1017 set_bit(i, &nd_desc->dsm_mask);
923} 1018}
@@ -1105,7 +1200,7 @@ static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
1105 writeq(cmd, mmio->addr.base + offset); 1200 writeq(cmd, mmio->addr.base + offset);
1106 wmb_blk(nfit_blk); 1201 wmb_blk(nfit_blk);
1107 1202
1108 if (nfit_blk->dimm_flags & ND_BLK_DCR_LATCH) 1203 if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH)
1109 readq(mmio->addr.base + offset); 1204 readq(mmio->addr.base + offset);
1110} 1205}
1111 1206
@@ -1141,7 +1236,7 @@ static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
1141 memcpy_to_pmem(mmio->addr.aperture + offset, 1236 memcpy_to_pmem(mmio->addr.aperture + offset,
1142 iobuf + copied, c); 1237 iobuf + copied, c);
1143 else { 1238 else {
1144 if (nfit_blk->dimm_flags & ND_BLK_READ_FLUSH) 1239 if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH)
1145 mmio_flush_range((void __force *) 1240 mmio_flush_range((void __force *)
1146 mmio->addr.aperture + offset, c); 1241 mmio->addr.aperture + offset, c);
1147 1242
@@ -1328,13 +1423,13 @@ static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc,
1328 1423
1329 memset(&flags, 0, sizeof(flags)); 1424 memset(&flags, 0, sizeof(flags));
1330 rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags, 1425 rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags,
1331 sizeof(flags)); 1426 sizeof(flags), NULL);
1332 1427
1333 if (rc >= 0 && flags.status == 0) 1428 if (rc >= 0 && flags.status == 0)
1334 nfit_blk->dimm_flags = flags.flags; 1429 nfit_blk->dimm_flags = flags.flags;
1335 else if (rc == -ENOTTY) { 1430 else if (rc == -ENOTTY) {
1336 /* fall back to a conservative default */ 1431 /* fall back to a conservative default */
1337 nfit_blk->dimm_flags = ND_BLK_DCR_LATCH | ND_BLK_READ_FLUSH; 1432 nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH;
1338 rc = 0; 1433 rc = 0;
1339 } else 1434 } else
1340 rc = -ENXIO; 1435 rc = -ENXIO;
@@ -1473,93 +1568,85 @@ static void acpi_nfit_blk_region_disable(struct nvdimm_bus *nvdimm_bus,
1473 /* devm will free nfit_blk */ 1568 /* devm will free nfit_blk */
1474} 1569}
1475 1570
1476static int ars_get_cap(struct nvdimm_bus_descriptor *nd_desc, 1571static int ars_get_cap(struct acpi_nfit_desc *acpi_desc,
1477 struct nd_cmd_ars_cap *cmd, u64 addr, u64 length) 1572 struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa)
1478{ 1573{
1479 cmd->address = addr; 1574 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
1480 cmd->length = length; 1575 struct acpi_nfit_system_address *spa = nfit_spa->spa;
1576 int cmd_rc, rc;
1481 1577
1482 return nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd, 1578 cmd->address = spa->address;
1483 sizeof(*cmd)); 1579 cmd->length = spa->length;
1580 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd,
1581 sizeof(*cmd), &cmd_rc);
1582 if (rc < 0)
1583 return rc;
1584 return cmd_rc;
1484} 1585}
1485 1586
1486static int ars_do_start(struct nvdimm_bus_descriptor *nd_desc, 1587static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa)
1487 struct nd_cmd_ars_start *cmd, u64 addr, u64 length)
1488{ 1588{
1489 int rc; 1589 int rc;
1590 int cmd_rc;
1591 struct nd_cmd_ars_start ars_start;
1592 struct acpi_nfit_system_address *spa = nfit_spa->spa;
1593 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
1490 1594
1491 cmd->address = addr; 1595 memset(&ars_start, 0, sizeof(ars_start));
1492 cmd->length = length; 1596 ars_start.address = spa->address;
1493 cmd->type = ND_ARS_PERSISTENT; 1597 ars_start.length = spa->length;
1598 if (nfit_spa_type(spa) == NFIT_SPA_PM)
1599 ars_start.type = ND_ARS_PERSISTENT;
1600 else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE)
1601 ars_start.type = ND_ARS_VOLATILE;
1602 else
1603 return -ENOTTY;
1494 1604
1495 while (1) { 1605 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
1496 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, cmd, 1606 sizeof(ars_start), &cmd_rc);
1497 sizeof(*cmd)); 1607
1498 if (rc) 1608 if (rc < 0)
1499 return rc; 1609 return rc;
1500 switch (cmd->status) { 1610 return cmd_rc;
1501 case 0:
1502 return 0;
1503 case 1:
1504 /* ARS unsupported, but we should never get here */
1505 return 0;
1506 case 6:
1507 /* ARS is in progress */
1508 msleep(1000);
1509 break;
1510 default:
1511 return -ENXIO;
1512 }
1513 }
1514} 1611}
1515 1612
1516static int ars_get_status(struct nvdimm_bus_descriptor *nd_desc, 1613static int ars_continue(struct acpi_nfit_desc *acpi_desc)
1517 struct nd_cmd_ars_status *cmd, u32 size)
1518{ 1614{
1519 int rc; 1615 int rc, cmd_rc;
1616 struct nd_cmd_ars_start ars_start;
1617 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
1618 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
1619
1620 memset(&ars_start, 0, sizeof(ars_start));
1621 ars_start.address = ars_status->restart_address;
1622 ars_start.length = ars_status->restart_length;
1623 ars_start.type = ars_status->type;
1624 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
1625 sizeof(ars_start), &cmd_rc);
1626 if (rc < 0)
1627 return rc;
1628 return cmd_rc;
1629}
1520 1630
1521 while (1) { 1631static int ars_get_status(struct acpi_nfit_desc *acpi_desc)
1522 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, cmd, 1632{
1523 size); 1633 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
1524 if (rc || cmd->status & 0xffff) 1634 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
1525 return -ENXIO; 1635 int rc, cmd_rc;
1526 1636
1527 /* Check extended status (Upper two bytes) */ 1637 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status,
1528 switch (cmd->status >> 16) { 1638 acpi_desc->ars_status_size, &cmd_rc);
1529 case 0: 1639 if (rc < 0)
1530 return 0; 1640 return rc;
1531 case 1: 1641 return cmd_rc;
1532 /* ARS is in progress */
1533 msleep(1000);
1534 break;
1535 case 2:
1536 /* No ARS performed for the current boot */
1537 return 0;
1538 case 3:
1539 /* TODO: error list overflow support */
1540 default:
1541 return -ENXIO;
1542 }
1543 }
1544} 1642}
1545 1643
1546static int ars_status_process_records(struct nvdimm_bus *nvdimm_bus, 1644static int ars_status_process_records(struct nvdimm_bus *nvdimm_bus,
1547 struct nd_cmd_ars_status *ars_status, u64 start) 1645 struct nd_cmd_ars_status *ars_status)
1548{ 1646{
1549 int rc; 1647 int rc;
1550 u32 i; 1648 u32 i;
1551 1649
1552 /*
1553 * The address field returned by ars_status should be either
1554 * less than or equal to the address we last started ARS for.
1555 * The (start, length) returned by ars_status should also have
1556 * non-zero overlap with the range we started ARS for.
1557 * If this is not the case, bail.
1558 */
1559 if (ars_status->address > start ||
1560 (ars_status->address + ars_status->length < start))
1561 return -ENXIO;
1562
1563 for (i = 0; i < ars_status->num_records; i++) { 1650 for (i = 0; i < ars_status->num_records; i++) {
1564 rc = nvdimm_bus_add_poison(nvdimm_bus, 1651 rc = nvdimm_bus_add_poison(nvdimm_bus,
1565 ars_status->records[i].err_address, 1652 ars_status->records[i].err_address,
@@ -1571,118 +1658,56 @@ static int ars_status_process_records(struct nvdimm_bus *nvdimm_bus,
1571 return 0; 1658 return 0;
1572} 1659}
1573 1660
1574static int acpi_nfit_find_poison(struct acpi_nfit_desc *acpi_desc, 1661static void acpi_nfit_remove_resource(void *data)
1575 struct nd_region_desc *ndr_desc)
1576{ 1662{
1577 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 1663 struct resource *res = data;
1578 struct nvdimm_bus *nvdimm_bus = acpi_desc->nvdimm_bus;
1579 struct nd_cmd_ars_status *ars_status = NULL;
1580 struct nd_cmd_ars_start *ars_start = NULL;
1581 struct nd_cmd_ars_cap *ars_cap = NULL;
1582 u64 start, len, cur, remaining;
1583 u32 ars_status_size;
1584 int rc;
1585
1586 ars_cap = kzalloc(sizeof(*ars_cap), GFP_KERNEL);
1587 if (!ars_cap)
1588 return -ENOMEM;
1589 1664
1590 start = ndr_desc->res->start; 1665 remove_resource(res);
1591 len = ndr_desc->res->end - ndr_desc->res->start + 1; 1666}
1592
1593 /*
1594 * If ARS is unimplemented, unsupported, or if the 'Persistent Memory
1595 * Scrub' flag in extended status is not set, skip this but continue
1596 * initialization
1597 */
1598 rc = ars_get_cap(nd_desc, ars_cap, start, len);
1599 if (rc == -ENOTTY) {
1600 dev_dbg(acpi_desc->dev,
1601 "Address Range Scrub is not implemented, won't create an error list\n");
1602 rc = 0;
1603 goto out;
1604 }
1605 if (rc)
1606 goto out;
1607
1608 if ((ars_cap->status & 0xffff) ||
1609 !(ars_cap->status >> 16 & ND_ARS_PERSISTENT)) {
1610 dev_warn(acpi_desc->dev,
1611 "ARS unsupported (status: 0x%x), won't create an error list\n",
1612 ars_cap->status);
1613 goto out;
1614 }
1615
1616 /*
1617 * Check if a full-range ARS has been run. If so, use those results
1618 * without having to start a new ARS.
1619 */
1620 ars_status_size = ars_cap->max_ars_out;
1621 ars_status = kzalloc(ars_status_size, GFP_KERNEL);
1622 if (!ars_status) {
1623 rc = -ENOMEM;
1624 goto out;
1625 }
1626 1667
1627 rc = ars_get_status(nd_desc, ars_status, ars_status_size); 1668static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc,
1628 if (rc) 1669 struct nd_region_desc *ndr_desc)
1629 goto out; 1670{
1671 struct resource *res, *nd_res = ndr_desc->res;
1672 int is_pmem, ret;
1630 1673
1631 if (ars_status->address <= start && 1674 /* No operation if the region is already registered as PMEM */
1632 (ars_status->address + ars_status->length >= start + len)) { 1675 is_pmem = region_intersects(nd_res->start, resource_size(nd_res),
1633 rc = ars_status_process_records(nvdimm_bus, ars_status, start); 1676 IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY);
1634 goto out; 1677 if (is_pmem == REGION_INTERSECTS)
1635 } 1678 return 0;
1636 1679
1637 /* 1680 res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL);
1638 * ARS_STATUS can overflow if the number of poison entries found is 1681 if (!res)
1639 * greater than the maximum buffer size (ars_cap->max_ars_out)
1640 * To detect overflow, check if the length field of ars_status
1641 * is less than the length we supplied. If so, process the
1642 * error entries we got, adjust the start point, and start again
1643 */
1644 ars_start = kzalloc(sizeof(*ars_start), GFP_KERNEL);
1645 if (!ars_start)
1646 return -ENOMEM; 1682 return -ENOMEM;
1647 1683
1648 cur = start; 1684 res->name = "Persistent Memory";
1649 remaining = len; 1685 res->start = nd_res->start;
1650 do { 1686 res->end = nd_res->end;
1651 u64 done, end; 1687 res->flags = IORESOURCE_MEM;
1652 1688 res->desc = IORES_DESC_PERSISTENT_MEMORY;
1653 rc = ars_do_start(nd_desc, ars_start, cur, remaining);
1654 if (rc)
1655 goto out;
1656
1657 rc = ars_get_status(nd_desc, ars_status, ars_status_size);
1658 if (rc)
1659 goto out;
1660 1689
1661 rc = ars_status_process_records(nvdimm_bus, ars_status, cur); 1690 ret = insert_resource(&iomem_resource, res);
1662 if (rc) 1691 if (ret)
1663 goto out; 1692 return ret;
1664 1693
1665 end = min(cur + remaining, 1694 ret = devm_add_action(acpi_desc->dev, acpi_nfit_remove_resource, res);
1666 ars_status->address + ars_status->length); 1695 if (ret) {
1667 done = end - cur; 1696 remove_resource(res);
1668 cur += done; 1697 return ret;
1669 remaining -= done; 1698 }
1670 } while (remaining);
1671 1699
1672 out: 1700 return 0;
1673 kfree(ars_cap);
1674 kfree(ars_start);
1675 kfree(ars_status);
1676 return rc;
1677} 1701}
1678 1702
1679static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc, 1703static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
1680 struct nd_mapping *nd_mapping, struct nd_region_desc *ndr_desc, 1704 struct nd_mapping *nd_mapping, struct nd_region_desc *ndr_desc,
1681 struct acpi_nfit_memory_map *memdev, 1705 struct acpi_nfit_memory_map *memdev,
1682 struct acpi_nfit_system_address *spa) 1706 struct nfit_spa *nfit_spa)
1683{ 1707{
1684 struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, 1708 struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc,
1685 memdev->device_handle); 1709 memdev->device_handle);
1710 struct acpi_nfit_system_address *spa = nfit_spa->spa;
1686 struct nd_blk_region_desc *ndbr_desc; 1711 struct nd_blk_region_desc *ndbr_desc;
1687 struct nfit_mem *nfit_mem; 1712 struct nfit_mem *nfit_mem;
1688 int blk_valid = 0; 1713 int blk_valid = 0;
@@ -1718,7 +1743,9 @@ static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
1718 ndbr_desc->enable = acpi_nfit_blk_region_enable; 1743 ndbr_desc->enable = acpi_nfit_blk_region_enable;
1719 ndbr_desc->disable = acpi_nfit_blk_region_disable; 1744 ndbr_desc->disable = acpi_nfit_blk_region_disable;
1720 ndbr_desc->do_io = acpi_desc->blk_do_io; 1745 ndbr_desc->do_io = acpi_desc->blk_do_io;
1721 if (!nvdimm_blk_region_create(acpi_desc->nvdimm_bus, ndr_desc)) 1746 nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus,
1747 ndr_desc);
1748 if (!nfit_spa->nd_region)
1722 return -ENOMEM; 1749 return -ENOMEM;
1723 break; 1750 break;
1724 } 1751 }
@@ -1738,7 +1765,7 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
1738 struct resource res; 1765 struct resource res;
1739 int count = 0, rc; 1766 int count = 0, rc;
1740 1767
1741 if (nfit_spa->is_registered) 1768 if (nfit_spa->nd_region)
1742 return 0; 1769 return 0;
1743 1770
1744 if (spa->range_index == 0) { 1771 if (spa->range_index == 0) {
@@ -1775,47 +1802,332 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
1775 } 1802 }
1776 nd_mapping = &nd_mappings[count++]; 1803 nd_mapping = &nd_mappings[count++];
1777 rc = acpi_nfit_init_mapping(acpi_desc, nd_mapping, ndr_desc, 1804 rc = acpi_nfit_init_mapping(acpi_desc, nd_mapping, ndr_desc,
1778 memdev, spa); 1805 memdev, nfit_spa);
1779 if (rc) 1806 if (rc)
1780 return rc; 1807 goto out;
1781 } 1808 }
1782 1809
1783 ndr_desc->nd_mapping = nd_mappings; 1810 ndr_desc->nd_mapping = nd_mappings;
1784 ndr_desc->num_mappings = count; 1811 ndr_desc->num_mappings = count;
1785 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa); 1812 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
1786 if (rc) 1813 if (rc)
1787 return rc; 1814 goto out;
1788 1815
1789 nvdimm_bus = acpi_desc->nvdimm_bus; 1816 nvdimm_bus = acpi_desc->nvdimm_bus;
1790 if (nfit_spa_type(spa) == NFIT_SPA_PM) { 1817 if (nfit_spa_type(spa) == NFIT_SPA_PM) {
1791 rc = acpi_nfit_find_poison(acpi_desc, ndr_desc); 1818 rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc);
1792 if (rc) { 1819 if (rc) {
1793 dev_err(acpi_desc->dev, 1820 dev_warn(acpi_desc->dev,
1794 "error while performing ARS to find poison: %d\n", 1821 "failed to insert pmem resource to iomem: %d\n",
1795 rc); 1822 rc);
1796 return rc; 1823 goto out;
1797 } 1824 }
1798 if (!nvdimm_pmem_region_create(nvdimm_bus, ndr_desc)) 1825
1799 return -ENOMEM; 1826 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
1827 ndr_desc);
1828 if (!nfit_spa->nd_region)
1829 rc = -ENOMEM;
1800 } else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) { 1830 } else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) {
1801 if (!nvdimm_volatile_region_create(nvdimm_bus, ndr_desc)) 1831 nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus,
1802 return -ENOMEM; 1832 ndr_desc);
1833 if (!nfit_spa->nd_region)
1834 rc = -ENOMEM;
1803 } 1835 }
1804 1836
1805 nfit_spa->is_registered = 1; 1837 out:
1838 if (rc)
1839 dev_err(acpi_desc->dev, "failed to register spa range %d\n",
1840 nfit_spa->spa->range_index);
1841 return rc;
1842}
1843
1844static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc,
1845 u32 max_ars)
1846{
1847 struct device *dev = acpi_desc->dev;
1848 struct nd_cmd_ars_status *ars_status;
1849
1850 if (acpi_desc->ars_status && acpi_desc->ars_status_size >= max_ars) {
1851 memset(acpi_desc->ars_status, 0, acpi_desc->ars_status_size);
1852 return 0;
1853 }
1854
1855 if (acpi_desc->ars_status)
1856 devm_kfree(dev, acpi_desc->ars_status);
1857 acpi_desc->ars_status = NULL;
1858 ars_status = devm_kzalloc(dev, max_ars, GFP_KERNEL);
1859 if (!ars_status)
1860 return -ENOMEM;
1861 acpi_desc->ars_status = ars_status;
1862 acpi_desc->ars_status_size = max_ars;
1806 return 0; 1863 return 0;
1807} 1864}
1808 1865
1809static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) 1866static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc,
1867 struct nfit_spa *nfit_spa)
1810{ 1868{
1869 struct acpi_nfit_system_address *spa = nfit_spa->spa;
1870 int rc;
1871
1872 if (!nfit_spa->max_ars) {
1873 struct nd_cmd_ars_cap ars_cap;
1874
1875 memset(&ars_cap, 0, sizeof(ars_cap));
1876 rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa);
1877 if (rc < 0)
1878 return rc;
1879 nfit_spa->max_ars = ars_cap.max_ars_out;
1880 nfit_spa->clear_err_unit = ars_cap.clear_err_unit;
1881 /* check that the supported scrub types match the spa type */
1882 if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE &&
1883 ((ars_cap.status >> 16) & ND_ARS_VOLATILE) == 0)
1884 return -ENOTTY;
1885 else if (nfit_spa_type(spa) == NFIT_SPA_PM &&
1886 ((ars_cap.status >> 16) & ND_ARS_PERSISTENT) == 0)
1887 return -ENOTTY;
1888 }
1889
1890 if (ars_status_alloc(acpi_desc, nfit_spa->max_ars))
1891 return -ENOMEM;
1892
1893 rc = ars_get_status(acpi_desc);
1894 if (rc < 0 && rc != -ENOSPC)
1895 return rc;
1896
1897 if (ars_status_process_records(acpi_desc->nvdimm_bus,
1898 acpi_desc->ars_status))
1899 return -ENOMEM;
1900
1901 return 0;
1902}
1903
1904static void acpi_nfit_async_scrub(struct acpi_nfit_desc *acpi_desc,
1905 struct nfit_spa *nfit_spa)
1906{
1907 struct acpi_nfit_system_address *spa = nfit_spa->spa;
1908 unsigned int overflow_retry = scrub_overflow_abort;
1909 u64 init_ars_start = 0, init_ars_len = 0;
1910 struct device *dev = acpi_desc->dev;
1911 unsigned int tmo = scrub_timeout;
1912 int rc;
1913
1914 if (nfit_spa->ars_done || !nfit_spa->nd_region)
1915 return;
1916
1917 rc = ars_start(acpi_desc, nfit_spa);
1918 /*
1919 * If we timed out the initial scan we'll still be busy here,
1920 * and will wait another timeout before giving up permanently.
1921 */
1922 if (rc < 0 && rc != -EBUSY)
1923 return;
1924
1925 do {
1926 u64 ars_start, ars_len;
1927
1928 if (acpi_desc->cancel)
1929 break;
1930 rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
1931 if (rc == -ENOTTY)
1932 break;
1933 if (rc == -EBUSY && !tmo) {
1934 dev_warn(dev, "range %d ars timeout, aborting\n",
1935 spa->range_index);
1936 break;
1937 }
1938
1939 if (rc == -EBUSY) {
1940 /*
1941 * Note, entries may be appended to the list
1942 * while the lock is dropped, but the workqueue
1943 * being active prevents entries being deleted /
1944 * freed.
1945 */
1946 mutex_unlock(&acpi_desc->init_mutex);
1947 ssleep(1);
1948 tmo--;
1949 mutex_lock(&acpi_desc->init_mutex);
1950 continue;
1951 }
1952
1953 /* we got some results, but there are more pending... */
1954 if (rc == -ENOSPC && overflow_retry--) {
1955 if (!init_ars_len) {
1956 init_ars_len = acpi_desc->ars_status->length;
1957 init_ars_start = acpi_desc->ars_status->address;
1958 }
1959 rc = ars_continue(acpi_desc);
1960 }
1961
1962 if (rc < 0) {
1963 dev_warn(dev, "range %d ars continuation failed\n",
1964 spa->range_index);
1965 break;
1966 }
1967
1968 if (init_ars_len) {
1969 ars_start = init_ars_start;
1970 ars_len = init_ars_len;
1971 } else {
1972 ars_start = acpi_desc->ars_status->address;
1973 ars_len = acpi_desc->ars_status->length;
1974 }
1975 dev_dbg(dev, "spa range: %d ars from %#llx + %#llx complete\n",
1976 spa->range_index, ars_start, ars_len);
1977 /* notify the region about new poison entries */
1978 nvdimm_region_notify(nfit_spa->nd_region,
1979 NVDIMM_REVALIDATE_POISON);
1980 break;
1981 } while (1);
1982}
1983
1984static void acpi_nfit_scrub(struct work_struct *work)
1985{
1986 struct device *dev;
1987 u64 init_scrub_length = 0;
1811 struct nfit_spa *nfit_spa; 1988 struct nfit_spa *nfit_spa;
1989 u64 init_scrub_address = 0;
1990 bool init_ars_done = false;
1991 struct acpi_nfit_desc *acpi_desc;
1992 unsigned int tmo = scrub_timeout;
1993 unsigned int overflow_retry = scrub_overflow_abort;
1994
1995 acpi_desc = container_of(work, typeof(*acpi_desc), work);
1996 dev = acpi_desc->dev;
1812 1997
1998 /*
1999 * We scrub in 2 phases. The first phase waits for any platform
2000 * firmware initiated scrubs to complete and then we go search for the
2001 * affected spa regions to mark them scanned. In the second phase we
2002 * initiate a directed scrub for every range that was not scrubbed in
2003 * phase 1.
2004 */
2005
2006 /* process platform firmware initiated scrubs */
2007 retry:
2008 mutex_lock(&acpi_desc->init_mutex);
1813 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 2009 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
1814 int rc = acpi_nfit_register_region(acpi_desc, nfit_spa); 2010 struct nd_cmd_ars_status *ars_status;
2011 struct acpi_nfit_system_address *spa;
2012 u64 ars_start, ars_len;
2013 int rc;
1815 2014
1816 if (rc) 2015 if (acpi_desc->cancel)
1817 return rc; 2016 break;
2017
2018 if (nfit_spa->nd_region)
2019 continue;
2020
2021 if (init_ars_done) {
2022 /*
2023 * No need to re-query, we're now just
2024 * reconciling all the ranges covered by the
2025 * initial scrub
2026 */
2027 rc = 0;
2028 } else
2029 rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
2030
2031 if (rc == -ENOTTY) {
2032 /* no ars capability, just register spa and move on */
2033 acpi_nfit_register_region(acpi_desc, nfit_spa);
2034 continue;
2035 }
2036
2037 if (rc == -EBUSY && !tmo) {
2038 /* fallthrough to directed scrub in phase 2 */
2039 dev_warn(dev, "timeout awaiting ars results, continuing...\n");
2040 break;
2041 } else if (rc == -EBUSY) {
2042 mutex_unlock(&acpi_desc->init_mutex);
2043 ssleep(1);
2044 tmo--;
2045 goto retry;
2046 }
2047
2048 /* we got some results, but there are more pending... */
2049 if (rc == -ENOSPC && overflow_retry--) {
2050 ars_status = acpi_desc->ars_status;
2051 /*
2052 * Record the original scrub range, so that we
2053 * can recall all the ranges impacted by the
2054 * initial scrub.
2055 */
2056 if (!init_scrub_length) {
2057 init_scrub_length = ars_status->length;
2058 init_scrub_address = ars_status->address;
2059 }
2060 rc = ars_continue(acpi_desc);
2061 if (rc == 0) {
2062 mutex_unlock(&acpi_desc->init_mutex);
2063 goto retry;
2064 }
2065 }
2066
2067 if (rc < 0) {
2068 /*
2069 * Initial scrub failed, we'll give it one more
2070 * try below...
2071 */
2072 break;
2073 }
2074
2075 /* We got some final results, record completed ranges */
2076 ars_status = acpi_desc->ars_status;
2077 if (init_scrub_length) {
2078 ars_start = init_scrub_address;
2079 ars_len = ars_start + init_scrub_length;
2080 } else {
2081 ars_start = ars_status->address;
2082 ars_len = ars_status->length;
2083 }
2084 spa = nfit_spa->spa;
2085
2086 if (!init_ars_done) {
2087 init_ars_done = true;
2088 dev_dbg(dev, "init scrub %#llx + %#llx complete\n",
2089 ars_start, ars_len);
2090 }
2091 if (ars_start <= spa->address && ars_start + ars_len
2092 >= spa->address + spa->length)
2093 acpi_nfit_register_region(acpi_desc, nfit_spa);
1818 } 2094 }
2095
2096 /*
2097 * For all the ranges not covered by an initial scrub we still
2098 * want to see if there are errors, but it's ok to discover them
2099 * asynchronously.
2100 */
2101 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
2102 /*
2103 * Flag all the ranges that still need scrubbing, but
2104 * register them now to make data available.
2105 */
2106 if (nfit_spa->nd_region)
2107 nfit_spa->ars_done = 1;
2108 else
2109 acpi_nfit_register_region(acpi_desc, nfit_spa);
2110 }
2111
2112 list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
2113 acpi_nfit_async_scrub(acpi_desc, nfit_spa);
2114 mutex_unlock(&acpi_desc->init_mutex);
2115}
2116
2117static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
2118{
2119 struct nfit_spa *nfit_spa;
2120 int rc;
2121
2122 list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
2123 if (nfit_spa_type(nfit_spa->spa) == NFIT_SPA_DCR) {
2124 /* BLK regions don't need to wait for ars results */
2125 rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
2126 if (rc)
2127 return rc;
2128 }
2129
2130 queue_work(nfit_wq, &acpi_desc->work);
1819 return 0; 2131 return 0;
1820} 2132}
1821 2133
@@ -1901,15 +2213,64 @@ int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz)
1901} 2213}
1902EXPORT_SYMBOL_GPL(acpi_nfit_init); 2214EXPORT_SYMBOL_GPL(acpi_nfit_init);
1903 2215
1904static struct acpi_nfit_desc *acpi_nfit_desc_init(struct acpi_device *adev) 2216struct acpi_nfit_flush_work {
2217 struct work_struct work;
2218 struct completion cmp;
2219};
2220
2221static void flush_probe(struct work_struct *work)
1905{ 2222{
1906 struct nvdimm_bus_descriptor *nd_desc; 2223 struct acpi_nfit_flush_work *flush;
1907 struct acpi_nfit_desc *acpi_desc;
1908 struct device *dev = &adev->dev;
1909 2224
1910 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); 2225 flush = container_of(work, typeof(*flush), work);
1911 if (!acpi_desc) 2226 complete(&flush->cmp);
1912 return ERR_PTR(-ENOMEM); 2227}
2228
2229static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
2230{
2231 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
2232 struct device *dev = acpi_desc->dev;
2233 struct acpi_nfit_flush_work flush;
2234
2235 /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
2236 device_lock(dev);
2237 device_unlock(dev);
2238
2239 /*
2240 * Scrub work could take 10s of seconds, userspace may give up so we
2241 * need to be interruptible while waiting.
2242 */
2243 INIT_WORK_ONSTACK(&flush.work, flush_probe);
2244 COMPLETION_INITIALIZER_ONSTACK(flush.cmp);
2245 queue_work(nfit_wq, &flush.work);
2246 return wait_for_completion_interruptible(&flush.cmp);
2247}
2248
2249static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
2250 struct nvdimm *nvdimm, unsigned int cmd)
2251{
2252 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
2253
2254 if (nvdimm)
2255 return 0;
2256 if (cmd != ND_CMD_ARS_START)
2257 return 0;
2258
2259 /*
2260 * The kernel and userspace may race to initiate a scrub, but
2261 * the scrub thread is prepared to lose that initial race. It
2262 * just needs guarantees that any ars it initiates are not
2263 * interrupted by any intervening start reqeusts from userspace.
2264 */
2265 if (work_busy(&acpi_desc->work))
2266 return -EBUSY;
2267
2268 return 0;
2269}
2270
2271void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
2272{
2273 struct nvdimm_bus_descriptor *nd_desc;
1913 2274
1914 dev_set_drvdata(dev, acpi_desc); 2275 dev_set_drvdata(dev, acpi_desc);
1915 acpi_desc->dev = dev; 2276 acpi_desc->dev = dev;
@@ -1917,14 +2278,10 @@ static struct acpi_nfit_desc *acpi_nfit_desc_init(struct acpi_device *adev)
1917 nd_desc = &acpi_desc->nd_desc; 2278 nd_desc = &acpi_desc->nd_desc;
1918 nd_desc->provider_name = "ACPI.NFIT"; 2279 nd_desc->provider_name = "ACPI.NFIT";
1919 nd_desc->ndctl = acpi_nfit_ctl; 2280 nd_desc->ndctl = acpi_nfit_ctl;
2281 nd_desc->flush_probe = acpi_nfit_flush_probe;
2282 nd_desc->clear_to_send = acpi_nfit_clear_to_send;
1920 nd_desc->attr_groups = acpi_nfit_attribute_groups; 2283 nd_desc->attr_groups = acpi_nfit_attribute_groups;
1921 2284
1922 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, nd_desc);
1923 if (!acpi_desc->nvdimm_bus) {
1924 devm_kfree(dev, acpi_desc);
1925 return ERR_PTR(-ENXIO);
1926 }
1927
1928 INIT_LIST_HEAD(&acpi_desc->spa_maps); 2285 INIT_LIST_HEAD(&acpi_desc->spa_maps);
1929 INIT_LIST_HEAD(&acpi_desc->spas); 2286 INIT_LIST_HEAD(&acpi_desc->spas);
1930 INIT_LIST_HEAD(&acpi_desc->dcrs); 2287 INIT_LIST_HEAD(&acpi_desc->dcrs);
@@ -1935,9 +2292,9 @@ static struct acpi_nfit_desc *acpi_nfit_desc_init(struct acpi_device *adev)
1935 INIT_LIST_HEAD(&acpi_desc->dimms); 2292 INIT_LIST_HEAD(&acpi_desc->dimms);
1936 mutex_init(&acpi_desc->spa_map_mutex); 2293 mutex_init(&acpi_desc->spa_map_mutex);
1937 mutex_init(&acpi_desc->init_mutex); 2294 mutex_init(&acpi_desc->init_mutex);
1938 2295 INIT_WORK(&acpi_desc->work, acpi_nfit_scrub);
1939 return acpi_desc;
1940} 2296}
2297EXPORT_SYMBOL_GPL(acpi_nfit_desc_init);
1941 2298
1942static int acpi_nfit_add(struct acpi_device *adev) 2299static int acpi_nfit_add(struct acpi_device *adev)
1943{ 2300{
@@ -1956,12 +2313,13 @@ static int acpi_nfit_add(struct acpi_device *adev)
1956 return 0; 2313 return 0;
1957 } 2314 }
1958 2315
1959 acpi_desc = acpi_nfit_desc_init(adev); 2316 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
1960 if (IS_ERR(acpi_desc)) { 2317 if (!acpi_desc)
1961 dev_err(dev, "%s: error initializing acpi_desc: %ld\n", 2318 return -ENOMEM;
1962 __func__, PTR_ERR(acpi_desc)); 2319 acpi_nfit_desc_init(acpi_desc, &adev->dev);
1963 return PTR_ERR(acpi_desc); 2320 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, &acpi_desc->nd_desc);
1964 } 2321 if (!acpi_desc->nvdimm_bus)
2322 return -ENOMEM;
1965 2323
1966 /* 2324 /*
1967 * Save the acpi header for later and then skip it, 2325 * Save the acpi header for later and then skip it,
@@ -2000,6 +2358,8 @@ static int acpi_nfit_remove(struct acpi_device *adev)
2000{ 2358{
2001 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev); 2359 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
2002 2360
2361 acpi_desc->cancel = 1;
2362 flush_workqueue(nfit_wq);
2003 nvdimm_bus_unregister(acpi_desc->nvdimm_bus); 2363 nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
2004 return 0; 2364 return 0;
2005} 2365}
@@ -2024,12 +2384,19 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
2024 } 2384 }
2025 2385
2026 if (!acpi_desc) { 2386 if (!acpi_desc) {
2027 acpi_desc = acpi_nfit_desc_init(adev); 2387 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
2028 if (IS_ERR(acpi_desc)) { 2388 if (!acpi_desc)
2029 dev_err(dev, "%s: error initializing acpi_desc: %ld\n",
2030 __func__, PTR_ERR(acpi_desc));
2031 goto out_unlock; 2389 goto out_unlock;
2032 } 2390 acpi_nfit_desc_init(acpi_desc, &adev->dev);
2391 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, &acpi_desc->nd_desc);
2392 if (!acpi_desc->nvdimm_bus)
2393 goto out_unlock;
2394 } else {
2395 /*
2396 * Finish previous registration before considering new
2397 * regions.
2398 */
2399 flush_workqueue(nfit_wq);
2033 } 2400 }
2034 2401
2035 /* Evaluate _FIT */ 2402 /* Evaluate _FIT */
@@ -2097,12 +2464,17 @@ static __init int nfit_init(void)
2097 acpi_str_to_uuid(UUID_NFIT_BUS, nfit_uuid[NFIT_DEV_BUS]); 2464 acpi_str_to_uuid(UUID_NFIT_BUS, nfit_uuid[NFIT_DEV_BUS]);
2098 acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]); 2465 acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]);
2099 2466
2467 nfit_wq = create_singlethread_workqueue("nfit");
2468 if (!nfit_wq)
2469 return -ENOMEM;
2470
2100 return acpi_bus_register_driver(&acpi_nfit_driver); 2471 return acpi_bus_register_driver(&acpi_nfit_driver);
2101} 2472}
2102 2473
2103static __exit void nfit_exit(void) 2474static __exit void nfit_exit(void)
2104{ 2475{
2105 acpi_bus_unregister_driver(&acpi_nfit_driver); 2476 acpi_bus_unregister_driver(&acpi_nfit_driver);
2477 destroy_workqueue(nfit_wq);
2106} 2478}
2107 2479
2108module_init(nfit_init); 2480module_init(nfit_init);
diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit.h
index 3d549a383659..c75576b2d50e 100644
--- a/drivers/acpi/nfit.h
+++ b/drivers/acpi/nfit.h
@@ -14,6 +14,7 @@
14 */ 14 */
15#ifndef __NFIT_H__ 15#ifndef __NFIT_H__
16#define __NFIT_H__ 16#define __NFIT_H__
17#include <linux/workqueue.h>
17#include <linux/libnvdimm.h> 18#include <linux/libnvdimm.h>
18#include <linux/types.h> 19#include <linux/types.h>
19#include <linux/uuid.h> 20#include <linux/uuid.h>
@@ -40,15 +41,32 @@ enum nfit_uuids {
40 NFIT_UUID_MAX, 41 NFIT_UUID_MAX,
41}; 42};
42 43
44enum nfit_fic {
45 NFIT_FIC_BYTE = 0x101, /* byte-addressable energy backed */
46 NFIT_FIC_BLK = 0x201, /* block-addressable non-energy backed */
47 NFIT_FIC_BYTEN = 0x301, /* byte-addressable non-energy backed */
48};
49
43enum { 50enum {
44 ND_BLK_READ_FLUSH = 1, 51 NFIT_BLK_READ_FLUSH = 1,
45 ND_BLK_DCR_LATCH = 2, 52 NFIT_BLK_DCR_LATCH = 2,
53 NFIT_ARS_STATUS_DONE = 0,
54 NFIT_ARS_STATUS_BUSY = 1 << 16,
55 NFIT_ARS_STATUS_NONE = 2 << 16,
56 NFIT_ARS_STATUS_INTR = 3 << 16,
57 NFIT_ARS_START_BUSY = 6,
58 NFIT_ARS_CAP_NONE = 1,
59 NFIT_ARS_F_OVERFLOW = 1,
60 NFIT_ARS_TIMEOUT = 90,
46}; 61};
47 62
48struct nfit_spa { 63struct nfit_spa {
49 struct acpi_nfit_system_address *spa; 64 struct acpi_nfit_system_address *spa;
50 struct list_head list; 65 struct list_head list;
51 int is_registered; 66 struct nd_region *nd_region;
67 unsigned int ars_done:1;
68 u32 clear_err_unit;
69 u32 max_ars;
52}; 70};
53 71
54struct nfit_dcr { 72struct nfit_dcr {
@@ -110,6 +128,10 @@ struct acpi_nfit_desc {
110 struct list_head idts; 128 struct list_head idts;
111 struct nvdimm_bus *nvdimm_bus; 129 struct nvdimm_bus *nvdimm_bus;
112 struct device *dev; 130 struct device *dev;
131 struct nd_cmd_ars_status *ars_status;
132 size_t ars_status_size;
133 struct work_struct work;
134 unsigned int cancel:1;
113 unsigned long dimm_dsm_force_en; 135 unsigned long dimm_dsm_force_en;
114 unsigned long bus_dsm_force_en; 136 unsigned long bus_dsm_force_en;
115 int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa, 137 int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
@@ -182,5 +204,5 @@ static inline struct acpi_nfit_desc *to_acpi_desc(
182 204
183const u8 *to_nfit_uuid(enum nfit_uuids id); 205const u8 *to_nfit_uuid(enum nfit_uuids id);
184int acpi_nfit_init(struct acpi_nfit_desc *nfit, acpi_size sz); 206int acpi_nfit_init(struct acpi_nfit_desc *nfit, acpi_size sz);
185extern const struct attribute_group *acpi_nfit_attribute_groups[]; 207void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev);
186#endif /* __NFIT_H__ */ 208#endif /* __NFIT_H__ */