aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/acpi
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-07-28 20:22:07 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-28 20:38:16 -0400
commitf0c98ebc57c2d5e535bc4f9167f35650d2ba3c90 (patch)
treead584aa321c0a2dbdaa49e0754f6c9f233b79a48 /drivers/acpi
parentd94ba9e7d8d5c821d0442f13b30b0140c1109c38 (diff)
parent0606263f24f3d64960de742c55894190b5df903b (diff)
Merge tag 'libnvdimm-for-4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm
Pull libnvdimm updates from Dan Williams: - Replace pcommit with ADR / directed-flushing. The pcommit instruction, which has not shipped on any product, is deprecated. Instead, the requirement is that platforms implement either ADR, or provide one or more flush addresses per nvdimm. ADR (Asynchronous DRAM Refresh) flushes data in posted write buffers to the memory controller on a power-fail event. Flush addresses are defined in ACPI 6.x as an NVDIMM Firmware Interface Table (NFIT) sub-structure: "Flush Hint Address Structure". A flush hint is an mmio address that when written and fenced assures that all previous posted writes targeting a given dimm have been flushed to media. - On-demand ARS (address range scrub). Linux uses the results of the ACPI ARS commands to track bad blocks in pmem devices. When latent errors are detected we re-scrub the media to refresh the bad block list, userspace can also request a re-scrub at any time. - Support for the Microsoft DSM (device specific method) command format. - Support for EDK2/OVMF virtual disk device memory ranges. - Various fixes and cleanups across the subsystem. * tag 'libnvdimm-for-4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: (41 commits) libnvdimm-btt: Delete an unnecessary check before the function call "__nd_device_register" nfit: do an ARS scrub on hitting a latent media error nfit: move to nfit/ sub-directory nfit, libnvdimm: allow an ARS scrub to be triggered on demand libnvdimm: register nvdimm_bus devices with an nd_bus driver pmem: clarify a debug print in pmem_clear_poison x86/insn: remove pcommit Revert "KVM: x86: add pcommit support" nfit, tools/testing/nvdimm/: unify shutdown paths libnvdimm: move ->module to struct nvdimm_bus_descriptor nfit: cleanup acpi_nfit_init calling convention nfit: fix _FIT evaluation memory leak + use after free tools/testing/nvdimm: add manufacturing_{date|location} dimm properties tools/testing/nvdimm: add virtual ramdisk range acpi, nfit: treat virtual ramdisk SPA as pmem region pmem: kill __pmem address space pmem: kill wmb_pmem() libnvdimm, pmem: use nvdimm_flush() for namespace I/O writes fs/dax: remove wmb_pmem() libnvdimm, pmem: flush posted-write queues on shutdown ...
Diffstat (limited to 'drivers/acpi')
-rw-r--r--drivers/acpi/Kconfig27
-rw-r--r--drivers/acpi/Makefile2
-rw-r--r--drivers/acpi/nfit/Kconfig26
-rw-r--r--drivers/acpi/nfit/Makefile3
-rw-r--r--drivers/acpi/nfit/core.c (renamed from drivers/acpi/nfit.c)647
-rw-r--r--drivers/acpi/nfit/mce.c89
-rw-r--r--drivers/acpi/nfit/nfit.h (renamed from drivers/acpi/nfit.h)60
7 files changed, 511 insertions, 343 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index acad70a0bb0d..aebd944bdaa1 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -454,32 +454,7 @@ config ACPI_REDUCED_HARDWARE_ONLY
454 454
455 If you are unsure what to do, do not enable this option. 455 If you are unsure what to do, do not enable this option.
456 456
457config ACPI_NFIT 457source "drivers/acpi/nfit/Kconfig"
458 tristate "ACPI NVDIMM Firmware Interface Table (NFIT)"
459 depends on PHYS_ADDR_T_64BIT
460 depends on BLK_DEV
461 depends on ARCH_HAS_MMIO_FLUSH
462 select LIBNVDIMM
463 help
464 Infrastructure to probe ACPI 6 compliant platforms for
465 NVDIMMs (NFIT) and register a libnvdimm device tree. In
466 addition to storage devices this also enables libnvdimm to pass
467 ACPI._DSM messages for platform/dimm configuration.
468
469 To compile this driver as a module, choose M here:
470 the module will be called nfit.
471
472config ACPI_NFIT_DEBUG
473 bool "NFIT DSM debug"
474 depends on ACPI_NFIT
475 depends on DYNAMIC_DEBUG
476 default n
477 help
478 Enabling this option causes the nfit driver to dump the
479 input and output buffers of _DSM operations on the ACPI0012
480 device and its children. This can be very verbose, so leave
481 it disabled unless you are debugging a hardware / firmware
482 issue.
483 458
484source "drivers/acpi/apei/Kconfig" 459source "drivers/acpi/apei/Kconfig"
485source "drivers/acpi/dptf/Kconfig" 460source "drivers/acpi/dptf/Kconfig"
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 88f54f03e3d2..35a6ccbe3025 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -69,7 +69,7 @@ obj-$(CONFIG_ACPI_PCI_SLOT) += pci_slot.o
69obj-$(CONFIG_ACPI_PROCESSOR) += processor.o 69obj-$(CONFIG_ACPI_PROCESSOR) += processor.o
70obj-$(CONFIG_ACPI) += container.o 70obj-$(CONFIG_ACPI) += container.o
71obj-$(CONFIG_ACPI_THERMAL) += thermal.o 71obj-$(CONFIG_ACPI_THERMAL) += thermal.o
72obj-$(CONFIG_ACPI_NFIT) += nfit.o 72obj-$(CONFIG_ACPI_NFIT) += nfit/
73obj-$(CONFIG_ACPI) += acpi_memhotplug.o 73obj-$(CONFIG_ACPI) += acpi_memhotplug.o
74obj-$(CONFIG_ACPI_HOTPLUG_IOAPIC) += ioapic.o 74obj-$(CONFIG_ACPI_HOTPLUG_IOAPIC) += ioapic.o
75obj-$(CONFIG_ACPI_BATTERY) += battery.o 75obj-$(CONFIG_ACPI_BATTERY) += battery.o
diff --git a/drivers/acpi/nfit/Kconfig b/drivers/acpi/nfit/Kconfig
new file mode 100644
index 000000000000..dd0d53c52552
--- /dev/null
+++ b/drivers/acpi/nfit/Kconfig
@@ -0,0 +1,26 @@
1config ACPI_NFIT
2 tristate "ACPI NVDIMM Firmware Interface Table (NFIT)"
3 depends on PHYS_ADDR_T_64BIT
4 depends on BLK_DEV
5 depends on ARCH_HAS_MMIO_FLUSH
6 select LIBNVDIMM
7 help
8 Infrastructure to probe ACPI 6 compliant platforms for
9 NVDIMMs (NFIT) and register a libnvdimm device tree. In
10 addition to storage devices this also enables libnvdimm to pass
11 ACPI._DSM messages for platform/dimm configuration.
12
13 To compile this driver as a module, choose M here:
14 the module will be called nfit.
15
16config ACPI_NFIT_DEBUG
17 bool "NFIT DSM debug"
18 depends on ACPI_NFIT
19 depends on DYNAMIC_DEBUG
20 default n
21 help
22 Enabling this option causes the nfit driver to dump the
23 input and output buffers of _DSM operations on the ACPI0012
24 device and its children. This can be very verbose, so leave
25 it disabled unless you are debugging a hardware / firmware
26 issue.
diff --git a/drivers/acpi/nfit/Makefile b/drivers/acpi/nfit/Makefile
new file mode 100644
index 000000000000..a407e769f103
--- /dev/null
+++ b/drivers/acpi/nfit/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_ACPI_NFIT) := nfit.o
2nfit-y := core.o
3nfit-$(CONFIG_X86_MCE) += mce.o
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit/core.c
index 1f0e06065ae6..8c234dd9b8bc 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit/core.c
@@ -15,6 +15,7 @@
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17#include <linux/ndctl.h> 17#include <linux/ndctl.h>
18#include <linux/sysfs.h>
18#include <linux/delay.h> 19#include <linux/delay.h>
19#include <linux/list.h> 20#include <linux/list.h>
20#include <linux/acpi.h> 21#include <linux/acpi.h>
@@ -50,6 +51,9 @@ module_param(disable_vendor_specific, bool, S_IRUGO);
50MODULE_PARM_DESC(disable_vendor_specific, 51MODULE_PARM_DESC(disable_vendor_specific,
51 "Limit commands to the publicly specified set\n"); 52 "Limit commands to the publicly specified set\n");
52 53
54LIST_HEAD(acpi_descs);
55DEFINE_MUTEX(acpi_desc_lock);
56
53static struct workqueue_struct *nfit_wq; 57static struct workqueue_struct *nfit_wq;
54 58
55struct nfit_table_prev { 59struct nfit_table_prev {
@@ -360,7 +364,7 @@ static const char *spa_type_name(u16 type)
360 return to_name[type]; 364 return to_name[type];
361} 365}
362 366
363static int nfit_spa_type(struct acpi_nfit_system_address *spa) 367int nfit_spa_type(struct acpi_nfit_system_address *spa)
364{ 368{
365 int i; 369 int i;
366 370
@@ -374,22 +378,25 @@ static bool add_spa(struct acpi_nfit_desc *acpi_desc,
374 struct nfit_table_prev *prev, 378 struct nfit_table_prev *prev,
375 struct acpi_nfit_system_address *spa) 379 struct acpi_nfit_system_address *spa)
376{ 380{
377 size_t length = min_t(size_t, sizeof(*spa), spa->header.length);
378 struct device *dev = acpi_desc->dev; 381 struct device *dev = acpi_desc->dev;
379 struct nfit_spa *nfit_spa; 382 struct nfit_spa *nfit_spa;
380 383
384 if (spa->header.length != sizeof(*spa))
385 return false;
386
381 list_for_each_entry(nfit_spa, &prev->spas, list) { 387 list_for_each_entry(nfit_spa, &prev->spas, list) {
382 if (memcmp(nfit_spa->spa, spa, length) == 0) { 388 if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) {
383 list_move_tail(&nfit_spa->list, &acpi_desc->spas); 389 list_move_tail(&nfit_spa->list, &acpi_desc->spas);
384 return true; 390 return true;
385 } 391 }
386 } 392 }
387 393
388 nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa), GFP_KERNEL); 394 nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof(*spa),
395 GFP_KERNEL);
389 if (!nfit_spa) 396 if (!nfit_spa)
390 return false; 397 return false;
391 INIT_LIST_HEAD(&nfit_spa->list); 398 INIT_LIST_HEAD(&nfit_spa->list);
392 nfit_spa->spa = spa; 399 memcpy(nfit_spa->spa, spa, sizeof(*spa));
393 list_add_tail(&nfit_spa->list, &acpi_desc->spas); 400 list_add_tail(&nfit_spa->list, &acpi_desc->spas);
394 dev_dbg(dev, "%s: spa index: %d type: %s\n", __func__, 401 dev_dbg(dev, "%s: spa index: %d type: %s\n", __func__,
395 spa->range_index, 402 spa->range_index,
@@ -401,21 +408,24 @@ static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
401 struct nfit_table_prev *prev, 408 struct nfit_table_prev *prev,
402 struct acpi_nfit_memory_map *memdev) 409 struct acpi_nfit_memory_map *memdev)
403{ 410{
404 size_t length = min_t(size_t, sizeof(*memdev), memdev->header.length);
405 struct device *dev = acpi_desc->dev; 411 struct device *dev = acpi_desc->dev;
406 struct nfit_memdev *nfit_memdev; 412 struct nfit_memdev *nfit_memdev;
407 413
414 if (memdev->header.length != sizeof(*memdev))
415 return false;
416
408 list_for_each_entry(nfit_memdev, &prev->memdevs, list) 417 list_for_each_entry(nfit_memdev, &prev->memdevs, list)
409 if (memcmp(nfit_memdev->memdev, memdev, length) == 0) { 418 if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) {
410 list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs); 419 list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs);
411 return true; 420 return true;
412 } 421 }
413 422
414 nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev), GFP_KERNEL); 423 nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev) + sizeof(*memdev),
424 GFP_KERNEL);
415 if (!nfit_memdev) 425 if (!nfit_memdev)
416 return false; 426 return false;
417 INIT_LIST_HEAD(&nfit_memdev->list); 427 INIT_LIST_HEAD(&nfit_memdev->list);
418 nfit_memdev->memdev = memdev; 428 memcpy(nfit_memdev->memdev, memdev, sizeof(*memdev));
419 list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs); 429 list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs);
420 dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d\n", 430 dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d\n",
421 __func__, memdev->device_handle, memdev->range_index, 431 __func__, memdev->device_handle, memdev->range_index,
@@ -423,25 +433,42 @@ static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
423 return true; 433 return true;
424} 434}
425 435
436/*
437 * An implementation may provide a truncated control region if no block windows
438 * are defined.
439 */
440static size_t sizeof_dcr(struct acpi_nfit_control_region *dcr)
441{
442 if (dcr->header.length < offsetof(struct acpi_nfit_control_region,
443 window_size))
444 return 0;
445 if (dcr->windows)
446 return sizeof(*dcr);
447 return offsetof(struct acpi_nfit_control_region, window_size);
448}
449
426static bool add_dcr(struct acpi_nfit_desc *acpi_desc, 450static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
427 struct nfit_table_prev *prev, 451 struct nfit_table_prev *prev,
428 struct acpi_nfit_control_region *dcr) 452 struct acpi_nfit_control_region *dcr)
429{ 453{
430 size_t length = min_t(size_t, sizeof(*dcr), dcr->header.length);
431 struct device *dev = acpi_desc->dev; 454 struct device *dev = acpi_desc->dev;
432 struct nfit_dcr *nfit_dcr; 455 struct nfit_dcr *nfit_dcr;
433 456
457 if (!sizeof_dcr(dcr))
458 return false;
459
434 list_for_each_entry(nfit_dcr, &prev->dcrs, list) 460 list_for_each_entry(nfit_dcr, &prev->dcrs, list)
435 if (memcmp(nfit_dcr->dcr, dcr, length) == 0) { 461 if (memcmp(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)) == 0) {
436 list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs); 462 list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs);
437 return true; 463 return true;
438 } 464 }
439 465
440 nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr), GFP_KERNEL); 466 nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr) + sizeof(*dcr),
467 GFP_KERNEL);
441 if (!nfit_dcr) 468 if (!nfit_dcr)
442 return false; 469 return false;
443 INIT_LIST_HEAD(&nfit_dcr->list); 470 INIT_LIST_HEAD(&nfit_dcr->list);
444 nfit_dcr->dcr = dcr; 471 memcpy(nfit_dcr->dcr, dcr, sizeof_dcr(dcr));
445 list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs); 472 list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs);
446 dev_dbg(dev, "%s: dcr index: %d windows: %d\n", __func__, 473 dev_dbg(dev, "%s: dcr index: %d windows: %d\n", __func__,
447 dcr->region_index, dcr->windows); 474 dcr->region_index, dcr->windows);
@@ -452,71 +479,102 @@ static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
452 struct nfit_table_prev *prev, 479 struct nfit_table_prev *prev,
453 struct acpi_nfit_data_region *bdw) 480 struct acpi_nfit_data_region *bdw)
454{ 481{
455 size_t length = min_t(size_t, sizeof(*bdw), bdw->header.length);
456 struct device *dev = acpi_desc->dev; 482 struct device *dev = acpi_desc->dev;
457 struct nfit_bdw *nfit_bdw; 483 struct nfit_bdw *nfit_bdw;
458 484
485 if (bdw->header.length != sizeof(*bdw))
486 return false;
459 list_for_each_entry(nfit_bdw, &prev->bdws, list) 487 list_for_each_entry(nfit_bdw, &prev->bdws, list)
460 if (memcmp(nfit_bdw->bdw, bdw, length) == 0) { 488 if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) {
461 list_move_tail(&nfit_bdw->list, &acpi_desc->bdws); 489 list_move_tail(&nfit_bdw->list, &acpi_desc->bdws);
462 return true; 490 return true;
463 } 491 }
464 492
465 nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw), GFP_KERNEL); 493 nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw) + sizeof(*bdw),
494 GFP_KERNEL);
466 if (!nfit_bdw) 495 if (!nfit_bdw)
467 return false; 496 return false;
468 INIT_LIST_HEAD(&nfit_bdw->list); 497 INIT_LIST_HEAD(&nfit_bdw->list);
469 nfit_bdw->bdw = bdw; 498 memcpy(nfit_bdw->bdw, bdw, sizeof(*bdw));
470 list_add_tail(&nfit_bdw->list, &acpi_desc->bdws); 499 list_add_tail(&nfit_bdw->list, &acpi_desc->bdws);
471 dev_dbg(dev, "%s: bdw dcr: %d windows: %d\n", __func__, 500 dev_dbg(dev, "%s: bdw dcr: %d windows: %d\n", __func__,
472 bdw->region_index, bdw->windows); 501 bdw->region_index, bdw->windows);
473 return true; 502 return true;
474} 503}
475 504
505static size_t sizeof_idt(struct acpi_nfit_interleave *idt)
506{
507 if (idt->header.length < sizeof(*idt))
508 return 0;
509 return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1);
510}
511
476static bool add_idt(struct acpi_nfit_desc *acpi_desc, 512static bool add_idt(struct acpi_nfit_desc *acpi_desc,
477 struct nfit_table_prev *prev, 513 struct nfit_table_prev *prev,
478 struct acpi_nfit_interleave *idt) 514 struct acpi_nfit_interleave *idt)
479{ 515{
480 size_t length = min_t(size_t, sizeof(*idt), idt->header.length);
481 struct device *dev = acpi_desc->dev; 516 struct device *dev = acpi_desc->dev;
482 struct nfit_idt *nfit_idt; 517 struct nfit_idt *nfit_idt;
483 518
484 list_for_each_entry(nfit_idt, &prev->idts, list) 519 if (!sizeof_idt(idt))
485 if (memcmp(nfit_idt->idt, idt, length) == 0) { 520 return false;
521
522 list_for_each_entry(nfit_idt, &prev->idts, list) {
523 if (sizeof_idt(nfit_idt->idt) != sizeof_idt(idt))
524 continue;
525
526 if (memcmp(nfit_idt->idt, idt, sizeof_idt(idt)) == 0) {
486 list_move_tail(&nfit_idt->list, &acpi_desc->idts); 527 list_move_tail(&nfit_idt->list, &acpi_desc->idts);
487 return true; 528 return true;
488 } 529 }
530 }
489 531
490 nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt), GFP_KERNEL); 532 nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt) + sizeof_idt(idt),
533 GFP_KERNEL);
491 if (!nfit_idt) 534 if (!nfit_idt)
492 return false; 535 return false;
493 INIT_LIST_HEAD(&nfit_idt->list); 536 INIT_LIST_HEAD(&nfit_idt->list);
494 nfit_idt->idt = idt; 537 memcpy(nfit_idt->idt, idt, sizeof_idt(idt));
495 list_add_tail(&nfit_idt->list, &acpi_desc->idts); 538 list_add_tail(&nfit_idt->list, &acpi_desc->idts);
496 dev_dbg(dev, "%s: idt index: %d num_lines: %d\n", __func__, 539 dev_dbg(dev, "%s: idt index: %d num_lines: %d\n", __func__,
497 idt->interleave_index, idt->line_count); 540 idt->interleave_index, idt->line_count);
498 return true; 541 return true;
499} 542}
500 543
544static size_t sizeof_flush(struct acpi_nfit_flush_address *flush)
545{
546 if (flush->header.length < sizeof(*flush))
547 return 0;
548 return sizeof(*flush) + sizeof(u64) * (flush->hint_count - 1);
549}
550
501static bool add_flush(struct acpi_nfit_desc *acpi_desc, 551static bool add_flush(struct acpi_nfit_desc *acpi_desc,
502 struct nfit_table_prev *prev, 552 struct nfit_table_prev *prev,
503 struct acpi_nfit_flush_address *flush) 553 struct acpi_nfit_flush_address *flush)
504{ 554{
505 size_t length = min_t(size_t, sizeof(*flush), flush->header.length);
506 struct device *dev = acpi_desc->dev; 555 struct device *dev = acpi_desc->dev;
507 struct nfit_flush *nfit_flush; 556 struct nfit_flush *nfit_flush;
508 557
509 list_for_each_entry(nfit_flush, &prev->flushes, list) 558 if (!sizeof_flush(flush))
510 if (memcmp(nfit_flush->flush, flush, length) == 0) { 559 return false;
560
561 list_for_each_entry(nfit_flush, &prev->flushes, list) {
562 if (sizeof_flush(nfit_flush->flush) != sizeof_flush(flush))
563 continue;
564
565 if (memcmp(nfit_flush->flush, flush,
566 sizeof_flush(flush)) == 0) {
511 list_move_tail(&nfit_flush->list, &acpi_desc->flushes); 567 list_move_tail(&nfit_flush->list, &acpi_desc->flushes);
512 return true; 568 return true;
513 } 569 }
570 }
514 571
515 nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush), GFP_KERNEL); 572 nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush)
573 + sizeof_flush(flush), GFP_KERNEL);
516 if (!nfit_flush) 574 if (!nfit_flush)
517 return false; 575 return false;
518 INIT_LIST_HEAD(&nfit_flush->list); 576 INIT_LIST_HEAD(&nfit_flush->list);
519 nfit_flush->flush = flush; 577 memcpy(nfit_flush->flush, flush, sizeof_flush(flush));
520 list_add_tail(&nfit_flush->list, &acpi_desc->flushes); 578 list_add_tail(&nfit_flush->list, &acpi_desc->flushes);
521 dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__, 579 dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__,
522 flush->device_handle, flush->hint_count); 580 flush->device_handle, flush->hint_count);
@@ -614,7 +672,6 @@ static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
614{ 672{
615 u16 dcr = __to_nfit_memdev(nfit_mem)->region_index; 673 u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
616 struct nfit_memdev *nfit_memdev; 674 struct nfit_memdev *nfit_memdev;
617 struct nfit_flush *nfit_flush;
618 struct nfit_bdw *nfit_bdw; 675 struct nfit_bdw *nfit_bdw;
619 struct nfit_idt *nfit_idt; 676 struct nfit_idt *nfit_idt;
620 u16 idt_idx, range_index; 677 u16 idt_idx, range_index;
@@ -647,14 +704,6 @@ static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
647 nfit_mem->idt_bdw = nfit_idt->idt; 704 nfit_mem->idt_bdw = nfit_idt->idt;
648 break; 705 break;
649 } 706 }
650
651 list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) {
652 if (nfit_flush->flush->device_handle !=
653 nfit_memdev->memdev->device_handle)
654 continue;
655 nfit_mem->nfit_flush = nfit_flush;
656 break;
657 }
658 break; 707 break;
659 } 708 }
660} 709}
@@ -675,6 +724,7 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
675 } 724 }
676 725
677 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 726 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
727 struct nfit_flush *nfit_flush;
678 struct nfit_dcr *nfit_dcr; 728 struct nfit_dcr *nfit_dcr;
679 u32 device_handle; 729 u32 device_handle;
680 u16 dcr; 730 u16 dcr;
@@ -721,6 +771,28 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
721 break; 771 break;
722 } 772 }
723 773
774 list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) {
775 struct acpi_nfit_flush_address *flush;
776 u16 i;
777
778 if (nfit_flush->flush->device_handle != device_handle)
779 continue;
780 nfit_mem->nfit_flush = nfit_flush;
781 flush = nfit_flush->flush;
782 nfit_mem->flush_wpq = devm_kzalloc(acpi_desc->dev,
783 flush->hint_count
784 * sizeof(struct resource), GFP_KERNEL);
785 if (!nfit_mem->flush_wpq)
786 return -ENOMEM;
787 for (i = 0; i < flush->hint_count; i++) {
788 struct resource *res = &nfit_mem->flush_wpq[i];
789
790 res->start = flush->hint_address[i];
791 res->end = res->start + 8 - 1;
792 }
793 break;
794 }
795
724 if (dcr && !nfit_mem->dcr) { 796 if (dcr && !nfit_mem->dcr) {
725 dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n", 797 dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
726 spa->range_index, dcr); 798 spa->range_index, dcr);
@@ -806,14 +878,85 @@ static ssize_t revision_show(struct device *dev,
806} 878}
807static DEVICE_ATTR_RO(revision); 879static DEVICE_ATTR_RO(revision);
808 880
881/*
882 * This shows the number of full Address Range Scrubs that have been
883 * completed since driver load time. Userspace can wait on this using
884 * select/poll etc. A '+' at the end indicates an ARS is in progress
885 */
886static ssize_t scrub_show(struct device *dev,
887 struct device_attribute *attr, char *buf)
888{
889 struct nvdimm_bus_descriptor *nd_desc;
890 ssize_t rc = -ENXIO;
891
892 device_lock(dev);
893 nd_desc = dev_get_drvdata(dev);
894 if (nd_desc) {
895 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
896
897 rc = sprintf(buf, "%d%s", acpi_desc->scrub_count,
898 (work_busy(&acpi_desc->work)) ? "+\n" : "\n");
899 }
900 device_unlock(dev);
901 return rc;
902}
903
904static ssize_t scrub_store(struct device *dev,
905 struct device_attribute *attr, const char *buf, size_t size)
906{
907 struct nvdimm_bus_descriptor *nd_desc;
908 ssize_t rc;
909 long val;
910
911 rc = kstrtol(buf, 0, &val);
912 if (rc)
913 return rc;
914 if (val != 1)
915 return -EINVAL;
916
917 device_lock(dev);
918 nd_desc = dev_get_drvdata(dev);
919 if (nd_desc) {
920 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
921
922 rc = acpi_nfit_ars_rescan(acpi_desc);
923 }
924 device_unlock(dev);
925 if (rc)
926 return rc;
927 return size;
928}
929static DEVICE_ATTR_RW(scrub);
930
931static bool ars_supported(struct nvdimm_bus *nvdimm_bus)
932{
933 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
934 const unsigned long mask = 1 << ND_CMD_ARS_CAP | 1 << ND_CMD_ARS_START
935 | 1 << ND_CMD_ARS_STATUS;
936
937 return (nd_desc->cmd_mask & mask) == mask;
938}
939
940static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n)
941{
942 struct device *dev = container_of(kobj, struct device, kobj);
943 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
944
945 if (a == &dev_attr_scrub.attr && !ars_supported(nvdimm_bus))
946 return 0;
947 return a->mode;
948}
949
809static struct attribute *acpi_nfit_attributes[] = { 950static struct attribute *acpi_nfit_attributes[] = {
810 &dev_attr_revision.attr, 951 &dev_attr_revision.attr,
952 &dev_attr_scrub.attr,
811 NULL, 953 NULL,
812}; 954};
813 955
814static struct attribute_group acpi_nfit_attribute_group = { 956static struct attribute_group acpi_nfit_attribute_group = {
815 .name = "nfit", 957 .name = "nfit",
816 .attrs = acpi_nfit_attributes, 958 .attrs = acpi_nfit_attributes,
959 .is_visible = nfit_visible,
817}; 960};
818 961
819static const struct attribute_group *acpi_nfit_attribute_groups[] = { 962static const struct attribute_group *acpi_nfit_attribute_groups[] = {
@@ -1130,11 +1273,11 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
1130 } 1273 }
1131 1274
1132 /* 1275 /*
1133 * Until standardization materializes we need to consider up to 3 1276 * Until standardization materializes we need to consider 4
1134 * different command sets. Note, that checking for function0 (bit0) 1277 * different command sets. Note, that checking for function0 (bit0)
1135 * tells us if any commands are reachable through this uuid. 1278 * tells us if any commands are reachable through this uuid.
1136 */ 1279 */
1137 for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_HPE2; i++) 1280 for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_MSFT; i++)
1138 if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) 1281 if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
1139 break; 1282 break;
1140 1283
@@ -1144,12 +1287,14 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
1144 dsm_mask = 0x3fe; 1287 dsm_mask = 0x3fe;
1145 if (disable_vendor_specific) 1288 if (disable_vendor_specific)
1146 dsm_mask &= ~(1 << ND_CMD_VENDOR); 1289 dsm_mask &= ~(1 << ND_CMD_VENDOR);
1147 } else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) 1290 } else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) {
1148 dsm_mask = 0x1c3c76; 1291 dsm_mask = 0x1c3c76;
1149 else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) { 1292 } else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) {
1150 dsm_mask = 0x1fe; 1293 dsm_mask = 0x1fe;
1151 if (disable_vendor_specific) 1294 if (disable_vendor_specific)
1152 dsm_mask &= ~(1 << 8); 1295 dsm_mask &= ~(1 << 8);
1296 } else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) {
1297 dsm_mask = 0xffffffff;
1153 } else { 1298 } else {
1154 dev_dbg(dev, "unknown dimm command family\n"); 1299 dev_dbg(dev, "unknown dimm command family\n");
1155 nfit_mem->family = -1; 1300 nfit_mem->family = -1;
@@ -1171,6 +1316,7 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
1171 int dimm_count = 0; 1316 int dimm_count = 0;
1172 1317
1173 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 1318 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
1319 struct acpi_nfit_flush_address *flush;
1174 unsigned long flags = 0, cmd_mask; 1320 unsigned long flags = 0, cmd_mask;
1175 struct nvdimm *nvdimm; 1321 struct nvdimm *nvdimm;
1176 u32 device_handle; 1322 u32 device_handle;
@@ -1204,9 +1350,12 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
1204 if (nfit_mem->family == NVDIMM_FAMILY_INTEL) 1350 if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
1205 cmd_mask |= nfit_mem->dsm_mask; 1351 cmd_mask |= nfit_mem->dsm_mask;
1206 1352
1353 flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush
1354 : NULL;
1207 nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem, 1355 nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem,
1208 acpi_nfit_dimm_attribute_groups, 1356 acpi_nfit_dimm_attribute_groups,
1209 flags, cmd_mask); 1357 flags, cmd_mask, flush ? flush->hint_count : 0,
1358 nfit_mem->flush_wpq);
1210 if (!nvdimm) 1359 if (!nvdimm)
1211 return -ENOMEM; 1360 return -ENOMEM;
1212 1361
@@ -1374,24 +1523,6 @@ static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio)
1374 return mmio->base_offset + line_offset + table_offset + sub_line_offset; 1523 return mmio->base_offset + line_offset + table_offset + sub_line_offset;
1375} 1524}
1376 1525
1377static void wmb_blk(struct nfit_blk *nfit_blk)
1378{
1379
1380 if (nfit_blk->nvdimm_flush) {
1381 /*
1382 * The first wmb() is needed to 'sfence' all previous writes
1383 * such that they are architecturally visible for the platform
1384 * buffer flush. Note that we've already arranged for pmem
1385 * writes to avoid the cache via arch_memcpy_to_pmem(). The
1386 * final wmb() ensures ordering for the NVDIMM flush write.
1387 */
1388 wmb();
1389 writeq(1, nfit_blk->nvdimm_flush);
1390 wmb();
1391 } else
1392 wmb_pmem();
1393}
1394
1395static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw) 1526static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
1396{ 1527{
1397 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; 1528 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
@@ -1426,7 +1557,7 @@ static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
1426 offset = to_interleave_offset(offset, mmio); 1557 offset = to_interleave_offset(offset, mmio);
1427 1558
1428 writeq(cmd, mmio->addr.base + offset); 1559 writeq(cmd, mmio->addr.base + offset);
1429 wmb_blk(nfit_blk); 1560 nvdimm_flush(nfit_blk->nd_region);
1430 1561
1431 if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH) 1562 if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH)
1432 readq(mmio->addr.base + offset); 1563 readq(mmio->addr.base + offset);
@@ -1477,7 +1608,7 @@ static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
1477 } 1608 }
1478 1609
1479 if (rw) 1610 if (rw)
1480 wmb_blk(nfit_blk); 1611 nvdimm_flush(nfit_blk->nd_region);
1481 1612
1482 rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0; 1613 rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
1483 return rc; 1614 return rc;
@@ -1509,125 +1640,6 @@ static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr,
1509 return rc; 1640 return rc;
1510} 1641}
1511 1642
1512static void nfit_spa_mapping_release(struct kref *kref)
1513{
1514 struct nfit_spa_mapping *spa_map = to_spa_map(kref);
1515 struct acpi_nfit_system_address *spa = spa_map->spa;
1516 struct acpi_nfit_desc *acpi_desc = spa_map->acpi_desc;
1517
1518 WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
1519 dev_dbg(acpi_desc->dev, "%s: SPA%d\n", __func__, spa->range_index);
1520 if (spa_map->type == SPA_MAP_APERTURE)
1521 memunmap((void __force *)spa_map->addr.aperture);
1522 else
1523 iounmap(spa_map->addr.base);
1524 release_mem_region(spa->address, spa->length);
1525 list_del(&spa_map->list);
1526 kfree(spa_map);
1527}
1528
1529static struct nfit_spa_mapping *find_spa_mapping(
1530 struct acpi_nfit_desc *acpi_desc,
1531 struct acpi_nfit_system_address *spa)
1532{
1533 struct nfit_spa_mapping *spa_map;
1534
1535 WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
1536 list_for_each_entry(spa_map, &acpi_desc->spa_maps, list)
1537 if (spa_map->spa == spa)
1538 return spa_map;
1539
1540 return NULL;
1541}
1542
1543static void nfit_spa_unmap(struct acpi_nfit_desc *acpi_desc,
1544 struct acpi_nfit_system_address *spa)
1545{
1546 struct nfit_spa_mapping *spa_map;
1547
1548 mutex_lock(&acpi_desc->spa_map_mutex);
1549 spa_map = find_spa_mapping(acpi_desc, spa);
1550
1551 if (spa_map)
1552 kref_put(&spa_map->kref, nfit_spa_mapping_release);
1553 mutex_unlock(&acpi_desc->spa_map_mutex);
1554}
1555
1556static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
1557 struct acpi_nfit_system_address *spa, enum spa_map_type type)
1558{
1559 resource_size_t start = spa->address;
1560 resource_size_t n = spa->length;
1561 struct nfit_spa_mapping *spa_map;
1562 struct resource *res;
1563
1564 WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
1565
1566 spa_map = find_spa_mapping(acpi_desc, spa);
1567 if (spa_map) {
1568 kref_get(&spa_map->kref);
1569 return spa_map->addr.base;
1570 }
1571
1572 spa_map = kzalloc(sizeof(*spa_map), GFP_KERNEL);
1573 if (!spa_map)
1574 return NULL;
1575
1576 INIT_LIST_HEAD(&spa_map->list);
1577 spa_map->spa = spa;
1578 kref_init(&spa_map->kref);
1579 spa_map->acpi_desc = acpi_desc;
1580
1581 res = request_mem_region(start, n, dev_name(acpi_desc->dev));
1582 if (!res)
1583 goto err_mem;
1584
1585 spa_map->type = type;
1586 if (type == SPA_MAP_APERTURE)
1587 spa_map->addr.aperture = (void __pmem *)memremap(start, n,
1588 ARCH_MEMREMAP_PMEM);
1589 else
1590 spa_map->addr.base = ioremap_nocache(start, n);
1591
1592
1593 if (!spa_map->addr.base)
1594 goto err_map;
1595
1596 list_add_tail(&spa_map->list, &acpi_desc->spa_maps);
1597 return spa_map->addr.base;
1598
1599 err_map:
1600 release_mem_region(start, n);
1601 err_mem:
1602 kfree(spa_map);
1603 return NULL;
1604}
1605
1606/**
1607 * nfit_spa_map - interleave-aware managed-mappings of acpi_nfit_system_address ranges
1608 * @nvdimm_bus: NFIT-bus that provided the spa table entry
1609 * @nfit_spa: spa table to map
1610 * @type: aperture or control region
1611 *
1612 * In the case where block-data-window apertures and
1613 * dimm-control-regions are interleaved they will end up sharing a
1614 * single request_mem_region() + ioremap() for the address range. In
1615 * the style of devm nfit_spa_map() mappings are automatically dropped
1616 * when all region devices referencing the same mapping are disabled /
1617 * unbound.
1618 */
1619static void __iomem *nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
1620 struct acpi_nfit_system_address *spa, enum spa_map_type type)
1621{
1622 void __iomem *iomem;
1623
1624 mutex_lock(&acpi_desc->spa_map_mutex);
1625 iomem = __nfit_spa_map(acpi_desc, spa, type);
1626 mutex_unlock(&acpi_desc->spa_map_mutex);
1627
1628 return iomem;
1629}
1630
1631static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio, 1643static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio,
1632 struct acpi_nfit_interleave *idt, u16 interleave_ways) 1644 struct acpi_nfit_interleave *idt, u16 interleave_ways)
1633{ 1645{
@@ -1669,9 +1681,7 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
1669 struct device *dev) 1681 struct device *dev)
1670{ 1682{
1671 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1683 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1672 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1673 struct nd_blk_region *ndbr = to_nd_blk_region(dev); 1684 struct nd_blk_region *ndbr = to_nd_blk_region(dev);
1674 struct nfit_flush *nfit_flush;
1675 struct nfit_blk_mmio *mmio; 1685 struct nfit_blk_mmio *mmio;
1676 struct nfit_blk *nfit_blk; 1686 struct nfit_blk *nfit_blk;
1677 struct nfit_mem *nfit_mem; 1687 struct nfit_mem *nfit_mem;
@@ -1697,8 +1707,8 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
1697 /* map block aperture memory */ 1707 /* map block aperture memory */
1698 nfit_blk->bdw_offset = nfit_mem->bdw->offset; 1708 nfit_blk->bdw_offset = nfit_mem->bdw->offset;
1699 mmio = &nfit_blk->mmio[BDW]; 1709 mmio = &nfit_blk->mmio[BDW];
1700 mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw, 1710 mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address,
1701 SPA_MAP_APERTURE); 1711 nfit_mem->spa_bdw->length, ARCH_MEMREMAP_PMEM);
1702 if (!mmio->addr.base) { 1712 if (!mmio->addr.base) {
1703 dev_dbg(dev, "%s: %s failed to map bdw\n", __func__, 1713 dev_dbg(dev, "%s: %s failed to map bdw\n", __func__,
1704 nvdimm_name(nvdimm)); 1714 nvdimm_name(nvdimm));
@@ -1720,8 +1730,8 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
1720 nfit_blk->cmd_offset = nfit_mem->dcr->command_offset; 1730 nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
1721 nfit_blk->stat_offset = nfit_mem->dcr->status_offset; 1731 nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
1722 mmio = &nfit_blk->mmio[DCR]; 1732 mmio = &nfit_blk->mmio[DCR];
1723 mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr, 1733 mmio->addr.base = devm_nvdimm_ioremap(dev, nfit_mem->spa_dcr->address,
1724 SPA_MAP_CONTROL); 1734 nfit_mem->spa_dcr->length);
1725 if (!mmio->addr.base) { 1735 if (!mmio->addr.base) {
1726 dev_dbg(dev, "%s: %s failed to map dcr\n", __func__, 1736 dev_dbg(dev, "%s: %s failed to map dcr\n", __func__,
1727 nvdimm_name(nvdimm)); 1737 nvdimm_name(nvdimm));
@@ -1746,15 +1756,7 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
1746 return rc; 1756 return rc;
1747 } 1757 }
1748 1758
1749 nfit_flush = nfit_mem->nfit_flush; 1759 if (nvdimm_has_flush(nfit_blk->nd_region) < 0)
1750 if (nfit_flush && nfit_flush->flush->hint_count != 0) {
1751 nfit_blk->nvdimm_flush = devm_ioremap_nocache(dev,
1752 nfit_flush->flush->hint_address[0], 8);
1753 if (!nfit_blk->nvdimm_flush)
1754 return -ENOMEM;
1755 }
1756
1757 if (!arch_has_wmb_pmem() && !nfit_blk->nvdimm_flush)
1758 dev_warn(dev, "unable to guarantee persistence of writes\n"); 1760 dev_warn(dev, "unable to guarantee persistence of writes\n");
1759 1761
1760 if (mmio->line_size == 0) 1762 if (mmio->line_size == 0)
@@ -1773,29 +1775,6 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
1773 return 0; 1775 return 0;
1774} 1776}
1775 1777
1776static void acpi_nfit_blk_region_disable(struct nvdimm_bus *nvdimm_bus,
1777 struct device *dev)
1778{
1779 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1780 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1781 struct nd_blk_region *ndbr = to_nd_blk_region(dev);
1782 struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
1783 int i;
1784
1785 if (!nfit_blk)
1786 return; /* never enabled */
1787
1788 /* auto-free BLK spa mappings */
1789 for (i = 0; i < 2; i++) {
1790 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[i];
1791
1792 if (mmio->addr.base)
1793 nfit_spa_unmap(acpi_desc, mmio->spa);
1794 }
1795 nd_blk_region_set_provider_data(ndbr, NULL);
1796 /* devm will free nfit_blk */
1797}
1798
1799static int ars_get_cap(struct acpi_nfit_desc *acpi_desc, 1778static int ars_get_cap(struct acpi_nfit_desc *acpi_desc,
1800 struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa) 1779 struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa)
1801{ 1780{
@@ -1919,11 +1898,11 @@ static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc,
1919 if (ret) 1898 if (ret)
1920 return ret; 1899 return ret;
1921 1900
1922 ret = devm_add_action(acpi_desc->dev, acpi_nfit_remove_resource, res); 1901 ret = devm_add_action_or_reset(acpi_desc->dev,
1923 if (ret) { 1902 acpi_nfit_remove_resource,
1924 remove_resource(res); 1903 res);
1904 if (ret)
1925 return ret; 1905 return ret;
1926 }
1927 1906
1928 return 0; 1907 return 0;
1929} 1908}
@@ -1969,7 +1948,6 @@ static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
1969 ndr_desc->num_mappings = blk_valid; 1948 ndr_desc->num_mappings = blk_valid;
1970 ndbr_desc = to_blk_region_desc(ndr_desc); 1949 ndbr_desc = to_blk_region_desc(ndr_desc);
1971 ndbr_desc->enable = acpi_nfit_blk_region_enable; 1950 ndbr_desc->enable = acpi_nfit_blk_region_enable;
1972 ndbr_desc->disable = acpi_nfit_blk_region_disable;
1973 ndbr_desc->do_io = acpi_desc->blk_do_io; 1951 ndbr_desc->do_io = acpi_desc->blk_do_io;
1974 nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus, 1952 nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus,
1975 ndr_desc); 1953 ndr_desc);
@@ -1981,6 +1959,14 @@ static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
1981 return 0; 1959 return 0;
1982} 1960}
1983 1961
1962static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa)
1963{
1964 return (nfit_spa_type(spa) == NFIT_SPA_VDISK ||
1965 nfit_spa_type(spa) == NFIT_SPA_VCD ||
1966 nfit_spa_type(spa) == NFIT_SPA_PDISK ||
1967 nfit_spa_type(spa) == NFIT_SPA_PCD);
1968}
1969
1984static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, 1970static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
1985 struct nfit_spa *nfit_spa) 1971 struct nfit_spa *nfit_spa)
1986{ 1972{
@@ -1996,7 +1982,7 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
1996 if (nfit_spa->nd_region) 1982 if (nfit_spa->nd_region)
1997 return 0; 1983 return 0;
1998 1984
1999 if (spa->range_index == 0) { 1985 if (spa->range_index == 0 && !nfit_spa_is_virtual(spa)) {
2000 dev_dbg(acpi_desc->dev, "%s: detected invalid spa index\n", 1986 dev_dbg(acpi_desc->dev, "%s: detected invalid spa index\n",
2001 __func__); 1987 __func__);
2002 return 0; 1988 return 0;
@@ -2060,6 +2046,11 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
2060 ndr_desc); 2046 ndr_desc);
2061 if (!nfit_spa->nd_region) 2047 if (!nfit_spa->nd_region)
2062 rc = -ENOMEM; 2048 rc = -ENOMEM;
2049 } else if (nfit_spa_is_virtual(spa)) {
2050 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
2051 ndr_desc);
2052 if (!nfit_spa->nd_region)
2053 rc = -ENOMEM;
2063 } 2054 }
2064 2055
2065 out: 2056 out:
@@ -2139,7 +2130,7 @@ static void acpi_nfit_async_scrub(struct acpi_nfit_desc *acpi_desc,
2139 unsigned int tmo = scrub_timeout; 2130 unsigned int tmo = scrub_timeout;
2140 int rc; 2131 int rc;
2141 2132
2142 if (nfit_spa->ars_done || !nfit_spa->nd_region) 2133 if (!nfit_spa->ars_required || !nfit_spa->nd_region)
2143 return; 2134 return;
2144 2135
2145 rc = ars_start(acpi_desc, nfit_spa); 2136 rc = ars_start(acpi_desc, nfit_spa);
@@ -2228,7 +2219,9 @@ static void acpi_nfit_scrub(struct work_struct *work)
2228 * firmware initiated scrubs to complete and then we go search for the 2219 * firmware initiated scrubs to complete and then we go search for the
2229 * affected spa regions to mark them scanned. In the second phase we 2220 * affected spa regions to mark them scanned. In the second phase we
2230 * initiate a directed scrub for every range that was not scrubbed in 2221 * initiate a directed scrub for every range that was not scrubbed in
2231 * phase 1. 2222 * phase 1. If we're called for a 'rescan', we harmlessly pass through
2223 * the first phase, but really only care about running phase 2, where
2224 * regions can be notified of new poison.
2232 */ 2225 */
2233 2226
2234 /* process platform firmware initiated scrubs */ 2227 /* process platform firmware initiated scrubs */
@@ -2331,14 +2324,17 @@ static void acpi_nfit_scrub(struct work_struct *work)
2331 * Flag all the ranges that still need scrubbing, but 2324 * Flag all the ranges that still need scrubbing, but
2332 * register them now to make data available. 2325 * register them now to make data available.
2333 */ 2326 */
2334 if (nfit_spa->nd_region) 2327 if (!nfit_spa->nd_region) {
2335 nfit_spa->ars_done = 1; 2328 nfit_spa->ars_required = 1;
2336 else
2337 acpi_nfit_register_region(acpi_desc, nfit_spa); 2329 acpi_nfit_register_region(acpi_desc, nfit_spa);
2330 }
2338 } 2331 }
2339 2332
2340 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) 2333 list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
2341 acpi_nfit_async_scrub(acpi_desc, nfit_spa); 2334 acpi_nfit_async_scrub(acpi_desc, nfit_spa);
2335 acpi_desc->scrub_count++;
2336 if (acpi_desc->scrub_count_state)
2337 sysfs_notify_dirent(acpi_desc->scrub_count_state);
2342 mutex_unlock(&acpi_desc->init_mutex); 2338 mutex_unlock(&acpi_desc->init_mutex);
2343} 2339}
2344 2340
@@ -2376,14 +2372,89 @@ static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc,
2376 return 0; 2372 return 0;
2377} 2373}
2378 2374
2379int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz) 2375static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc *acpi_desc)
2376{
2377 struct device *dev = acpi_desc->dev;
2378 struct kernfs_node *nfit;
2379 struct device *bus_dev;
2380
2381 if (!ars_supported(acpi_desc->nvdimm_bus))
2382 return 0;
2383
2384 bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus);
2385 nfit = sysfs_get_dirent(bus_dev->kobj.sd, "nfit");
2386 if (!nfit) {
2387 dev_err(dev, "sysfs_get_dirent 'nfit' failed\n");
2388 return -ENODEV;
2389 }
2390 acpi_desc->scrub_count_state = sysfs_get_dirent(nfit, "scrub");
2391 sysfs_put(nfit);
2392 if (!acpi_desc->scrub_count_state) {
2393 dev_err(dev, "sysfs_get_dirent 'scrub' failed\n");
2394 return -ENODEV;
2395 }
2396
2397 return 0;
2398}
2399
2400static void acpi_nfit_destruct(void *data)
2401{
2402 struct acpi_nfit_desc *acpi_desc = data;
2403 struct device *bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus);
2404
2405 /*
2406 * Destruct under acpi_desc_lock so that nfit_handle_mce does not
2407 * race teardown
2408 */
2409 mutex_lock(&acpi_desc_lock);
2410 acpi_desc->cancel = 1;
2411 /*
2412 * Bounce the nvdimm bus lock to make sure any in-flight
2413 * acpi_nfit_ars_rescan() submissions have had a chance to
2414 * either submit or see ->cancel set.
2415 */
2416 device_lock(bus_dev);
2417 device_unlock(bus_dev);
2418
2419 flush_workqueue(nfit_wq);
2420 if (acpi_desc->scrub_count_state)
2421 sysfs_put(acpi_desc->scrub_count_state);
2422 nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
2423 acpi_desc->nvdimm_bus = NULL;
2424 list_del(&acpi_desc->list);
2425 mutex_unlock(&acpi_desc_lock);
2426}
2427
2428int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz)
2380{ 2429{
2381 struct device *dev = acpi_desc->dev; 2430 struct device *dev = acpi_desc->dev;
2382 struct nfit_table_prev prev; 2431 struct nfit_table_prev prev;
2383 const void *end; 2432 const void *end;
2384 u8 *data;
2385 int rc; 2433 int rc;
2386 2434
2435 if (!acpi_desc->nvdimm_bus) {
2436 acpi_nfit_init_dsms(acpi_desc);
2437
2438 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev,
2439 &acpi_desc->nd_desc);
2440 if (!acpi_desc->nvdimm_bus)
2441 return -ENOMEM;
2442
2443 rc = devm_add_action_or_reset(dev, acpi_nfit_destruct,
2444 acpi_desc);
2445 if (rc)
2446 return rc;
2447
2448 rc = acpi_nfit_desc_init_scrub_attr(acpi_desc);
2449 if (rc)
2450 return rc;
2451
2452 /* register this acpi_desc for mce notifications */
2453 mutex_lock(&acpi_desc_lock);
2454 list_add_tail(&acpi_desc->list, &acpi_descs);
2455 mutex_unlock(&acpi_desc_lock);
2456 }
2457
2387 mutex_lock(&acpi_desc->init_mutex); 2458 mutex_lock(&acpi_desc->init_mutex);
2388 2459
2389 INIT_LIST_HEAD(&prev.spas); 2460 INIT_LIST_HEAD(&prev.spas);
@@ -2406,7 +2477,6 @@ int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz)
2406 list_cut_position(&prev.flushes, &acpi_desc->flushes, 2477 list_cut_position(&prev.flushes, &acpi_desc->flushes,
2407 acpi_desc->flushes.prev); 2478 acpi_desc->flushes.prev);
2408 2479
2409 data = (u8 *) acpi_desc->nfit;
2410 end = data + sz; 2480 end = data + sz;
2411 while (!IS_ERR_OR_NULL(data)) 2481 while (!IS_ERR_OR_NULL(data))
2412 data = add_table(acpi_desc, &prev, data, end); 2482 data = add_table(acpi_desc, &prev, data, end);
@@ -2422,12 +2492,9 @@ int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz)
2422 if (rc) 2492 if (rc)
2423 goto out_unlock; 2493 goto out_unlock;
2424 2494
2425 if (nfit_mem_init(acpi_desc) != 0) { 2495 rc = nfit_mem_init(acpi_desc);
2426 rc = -ENOMEM; 2496 if (rc)
2427 goto out_unlock; 2497 goto out_unlock;
2428 }
2429
2430 acpi_nfit_init_dsms(acpi_desc);
2431 2498
2432 rc = acpi_nfit_register_dimms(acpi_desc); 2499 rc = acpi_nfit_register_dimms(acpi_desc);
2433 if (rc) 2500 if (rc)
@@ -2496,6 +2563,33 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
2496 return 0; 2563 return 0;
2497} 2564}
2498 2565
2566int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc)
2567{
2568 struct device *dev = acpi_desc->dev;
2569 struct nfit_spa *nfit_spa;
2570
2571 if (work_busy(&acpi_desc->work))
2572 return -EBUSY;
2573
2574 if (acpi_desc->cancel)
2575 return 0;
2576
2577 mutex_lock(&acpi_desc->init_mutex);
2578 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
2579 struct acpi_nfit_system_address *spa = nfit_spa->spa;
2580
2581 if (nfit_spa_type(spa) != NFIT_SPA_PM)
2582 continue;
2583
2584 nfit_spa->ars_required = 1;
2585 }
2586 queue_work(nfit_wq, &acpi_desc->work);
2587 dev_dbg(dev, "%s: ars_scan triggered\n", __func__);
2588 mutex_unlock(&acpi_desc->init_mutex);
2589
2590 return 0;
2591}
2592
2499void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev) 2593void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
2500{ 2594{
2501 struct nvdimm_bus_descriptor *nd_desc; 2595 struct nvdimm_bus_descriptor *nd_desc;
@@ -2505,12 +2599,12 @@ void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
2505 acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io; 2599 acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io;
2506 nd_desc = &acpi_desc->nd_desc; 2600 nd_desc = &acpi_desc->nd_desc;
2507 nd_desc->provider_name = "ACPI.NFIT"; 2601 nd_desc->provider_name = "ACPI.NFIT";
2602 nd_desc->module = THIS_MODULE;
2508 nd_desc->ndctl = acpi_nfit_ctl; 2603 nd_desc->ndctl = acpi_nfit_ctl;
2509 nd_desc->flush_probe = acpi_nfit_flush_probe; 2604 nd_desc->flush_probe = acpi_nfit_flush_probe;
2510 nd_desc->clear_to_send = acpi_nfit_clear_to_send; 2605 nd_desc->clear_to_send = acpi_nfit_clear_to_send;
2511 nd_desc->attr_groups = acpi_nfit_attribute_groups; 2606 nd_desc->attr_groups = acpi_nfit_attribute_groups;
2512 2607
2513 INIT_LIST_HEAD(&acpi_desc->spa_maps);
2514 INIT_LIST_HEAD(&acpi_desc->spas); 2608 INIT_LIST_HEAD(&acpi_desc->spas);
2515 INIT_LIST_HEAD(&acpi_desc->dcrs); 2609 INIT_LIST_HEAD(&acpi_desc->dcrs);
2516 INIT_LIST_HEAD(&acpi_desc->bdws); 2610 INIT_LIST_HEAD(&acpi_desc->bdws);
@@ -2518,7 +2612,7 @@ void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
2518 INIT_LIST_HEAD(&acpi_desc->flushes); 2612 INIT_LIST_HEAD(&acpi_desc->flushes);
2519 INIT_LIST_HEAD(&acpi_desc->memdevs); 2613 INIT_LIST_HEAD(&acpi_desc->memdevs);
2520 INIT_LIST_HEAD(&acpi_desc->dimms); 2614 INIT_LIST_HEAD(&acpi_desc->dimms);
2521 mutex_init(&acpi_desc->spa_map_mutex); 2615 INIT_LIST_HEAD(&acpi_desc->list);
2522 mutex_init(&acpi_desc->init_mutex); 2616 mutex_init(&acpi_desc->init_mutex);
2523 INIT_WORK(&acpi_desc->work, acpi_nfit_scrub); 2617 INIT_WORK(&acpi_desc->work, acpi_nfit_scrub);
2524} 2618}
@@ -2532,7 +2626,7 @@ static int acpi_nfit_add(struct acpi_device *adev)
2532 struct acpi_table_header *tbl; 2626 struct acpi_table_header *tbl;
2533 acpi_status status = AE_OK; 2627 acpi_status status = AE_OK;
2534 acpi_size sz; 2628 acpi_size sz;
2535 int rc; 2629 int rc = 0;
2536 2630
2537 status = acpi_get_table_with_size(ACPI_SIG_NFIT, 0, &tbl, &sz); 2631 status = acpi_get_table_with_size(ACPI_SIG_NFIT, 0, &tbl, &sz);
2538 if (ACPI_FAILURE(status)) { 2632 if (ACPI_FAILURE(status)) {
@@ -2545,50 +2639,33 @@ static int acpi_nfit_add(struct acpi_device *adev)
2545 if (!acpi_desc) 2639 if (!acpi_desc)
2546 return -ENOMEM; 2640 return -ENOMEM;
2547 acpi_nfit_desc_init(acpi_desc, &adev->dev); 2641 acpi_nfit_desc_init(acpi_desc, &adev->dev);
2548 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, &acpi_desc->nd_desc);
2549 if (!acpi_desc->nvdimm_bus)
2550 return -ENOMEM;
2551 2642
2552 /* 2643 /* Save the acpi header for exporting the revision via sysfs */
2553 * Save the acpi header for later and then skip it,
2554 * making nfit point to the first nfit table header.
2555 */
2556 acpi_desc->acpi_header = *tbl; 2644 acpi_desc->acpi_header = *tbl;
2557 acpi_desc->nfit = (void *) tbl + sizeof(struct acpi_table_nfit);
2558 sz -= sizeof(struct acpi_table_nfit);
2559 2645
2560 /* Evaluate _FIT and override with that if present */ 2646 /* Evaluate _FIT and override with that if present */
2561 status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf); 2647 status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
2562 if (ACPI_SUCCESS(status) && buf.length > 0) { 2648 if (ACPI_SUCCESS(status) && buf.length > 0) {
2563 union acpi_object *obj; 2649 union acpi_object *obj = buf.pointer;
2564 /* 2650
2565 * Adjust for the acpi_object header of the _FIT 2651 if (obj->type == ACPI_TYPE_BUFFER)
2566 */ 2652 rc = acpi_nfit_init(acpi_desc, obj->buffer.pointer,
2567 obj = buf.pointer; 2653 obj->buffer.length);
2568 if (obj->type == ACPI_TYPE_BUFFER) { 2654 else
2569 acpi_desc->nfit =
2570 (struct acpi_nfit_header *)obj->buffer.pointer;
2571 sz = obj->buffer.length;
2572 } else
2573 dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n", 2655 dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n",
2574 __func__, (int) obj->type); 2656 __func__, (int) obj->type);
2575 } 2657 kfree(buf.pointer);
2576 2658 } else
2577 rc = acpi_nfit_init(acpi_desc, sz); 2659 /* skip over the lead-in header table */
2578 if (rc) { 2660 rc = acpi_nfit_init(acpi_desc, (void *) tbl
2579 nvdimm_bus_unregister(acpi_desc->nvdimm_bus); 2661 + sizeof(struct acpi_table_nfit),
2580 return rc; 2662 sz - sizeof(struct acpi_table_nfit));
2581 } 2663 return rc;
2582 return 0;
2583} 2664}
2584 2665
2585static int acpi_nfit_remove(struct acpi_device *adev) 2666static int acpi_nfit_remove(struct acpi_device *adev)
2586{ 2667{
2587 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev); 2668 /* see acpi_nfit_destruct */
2588
2589 acpi_desc->cancel = 1;
2590 flush_workqueue(nfit_wq);
2591 nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
2592 return 0; 2669 return 0;
2593} 2670}
2594 2671
@@ -2596,9 +2673,8 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
2596{ 2673{
2597 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev); 2674 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
2598 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 2675 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
2599 struct acpi_nfit_header *nfit_saved;
2600 union acpi_object *obj;
2601 struct device *dev = &adev->dev; 2676 struct device *dev = &adev->dev;
2677 union acpi_object *obj;
2602 acpi_status status; 2678 acpi_status status;
2603 int ret; 2679 int ret;
2604 2680
@@ -2616,9 +2692,6 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
2616 if (!acpi_desc) 2692 if (!acpi_desc)
2617 goto out_unlock; 2693 goto out_unlock;
2618 acpi_nfit_desc_init(acpi_desc, &adev->dev); 2694 acpi_nfit_desc_init(acpi_desc, &adev->dev);
2619 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, &acpi_desc->nd_desc);
2620 if (!acpi_desc->nvdimm_bus)
2621 goto out_unlock;
2622 } else { 2695 } else {
2623 /* 2696 /*
2624 * Finish previous registration before considering new 2697 * Finish previous registration before considering new
@@ -2634,21 +2707,14 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
2634 goto out_unlock; 2707 goto out_unlock;
2635 } 2708 }
2636 2709
2637 nfit_saved = acpi_desc->nfit;
2638 obj = buf.pointer; 2710 obj = buf.pointer;
2639 if (obj->type == ACPI_TYPE_BUFFER) { 2711 if (obj->type == ACPI_TYPE_BUFFER) {
2640 acpi_desc->nfit = 2712 ret = acpi_nfit_init(acpi_desc, obj->buffer.pointer,
2641 (struct acpi_nfit_header *)obj->buffer.pointer; 2713 obj->buffer.length);
2642 ret = acpi_nfit_init(acpi_desc, obj->buffer.length); 2714 if (ret)
2643 if (ret) {
2644 /* Merge failed, restore old nfit, and exit */
2645 acpi_desc->nfit = nfit_saved;
2646 dev_err(dev, "failed to merge updated NFIT\n"); 2715 dev_err(dev, "failed to merge updated NFIT\n");
2647 } 2716 } else
2648 } else {
2649 /* Bad _FIT, restore old nfit */
2650 dev_err(dev, "Invalid _FIT\n"); 2717 dev_err(dev, "Invalid _FIT\n");
2651 }
2652 kfree(buf.pointer); 2718 kfree(buf.pointer);
2653 2719
2654 out_unlock: 2720 out_unlock:
@@ -2693,18 +2759,23 @@ static __init int nfit_init(void)
2693 acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]); 2759 acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]);
2694 acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE1, nfit_uuid[NFIT_DEV_DIMM_N_HPE1]); 2760 acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE1, nfit_uuid[NFIT_DEV_DIMM_N_HPE1]);
2695 acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE2, nfit_uuid[NFIT_DEV_DIMM_N_HPE2]); 2761 acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE2, nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
2762 acpi_str_to_uuid(UUID_NFIT_DIMM_N_MSFT, nfit_uuid[NFIT_DEV_DIMM_N_MSFT]);
2696 2763
2697 nfit_wq = create_singlethread_workqueue("nfit"); 2764 nfit_wq = create_singlethread_workqueue("nfit");
2698 if (!nfit_wq) 2765 if (!nfit_wq)
2699 return -ENOMEM; 2766 return -ENOMEM;
2700 2767
2768 nfit_mce_register();
2769
2701 return acpi_bus_register_driver(&acpi_nfit_driver); 2770 return acpi_bus_register_driver(&acpi_nfit_driver);
2702} 2771}
2703 2772
2704static __exit void nfit_exit(void) 2773static __exit void nfit_exit(void)
2705{ 2774{
2775 nfit_mce_unregister();
2706 acpi_bus_unregister_driver(&acpi_nfit_driver); 2776 acpi_bus_unregister_driver(&acpi_nfit_driver);
2707 destroy_workqueue(nfit_wq); 2777 destroy_workqueue(nfit_wq);
2778 WARN_ON(!list_empty(&acpi_descs));
2708} 2779}
2709 2780
2710module_init(nfit_init); 2781module_init(nfit_init);
diff --git a/drivers/acpi/nfit/mce.c b/drivers/acpi/nfit/mce.c
new file mode 100644
index 000000000000..4c745bf389fe
--- /dev/null
+++ b/drivers/acpi/nfit/mce.c
@@ -0,0 +1,89 @@
1/*
2 * NFIT - Machine Check Handler
3 *
4 * Copyright(c) 2013-2016 Intel Corporation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of version 2 of the GNU General Public License as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 */
15#include <linux/notifier.h>
16#include <linux/acpi.h>
17#include <asm/mce.h>
18#include "nfit.h"
19
20static int nfit_handle_mce(struct notifier_block *nb, unsigned long val,
21 void *data)
22{
23 struct mce *mce = (struct mce *)data;
24 struct acpi_nfit_desc *acpi_desc;
25 struct nfit_spa *nfit_spa;
26
27 /* We only care about memory errors */
28 if (!(mce->status & MCACOD))
29 return NOTIFY_DONE;
30
31 /*
32 * mce->addr contains the physical addr accessed that caused the
33 * machine check. We need to walk through the list of NFITs, and see
34 * if any of them matches that address, and only then start a scrub.
35 */
36 mutex_lock(&acpi_desc_lock);
37 list_for_each_entry(acpi_desc, &acpi_descs, list) {
38 struct device *dev = acpi_desc->dev;
39 int found_match = 0;
40
41 mutex_lock(&acpi_desc->init_mutex);
42 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
43 struct acpi_nfit_system_address *spa = nfit_spa->spa;
44
45 if (nfit_spa_type(spa) == NFIT_SPA_PM)
46 continue;
47 /* find the spa that covers the mce addr */
48 if (spa->address > mce->addr)
49 continue;
50 if ((spa->address + spa->length - 1) < mce->addr)
51 continue;
52 found_match = 1;
53 dev_dbg(dev, "%s: addr in SPA %d (0x%llx, 0x%llx)\n",
54 __func__, spa->range_index, spa->address,
55 spa->length);
56 /*
57 * We can break at the first match because we're going
58 * to rescan all the SPA ranges. There shouldn't be any
59 * aliasing anyway.
60 */
61 break;
62 }
63 mutex_unlock(&acpi_desc->init_mutex);
64
65 /*
66 * We can ignore an -EBUSY here because if an ARS is already
67 * in progress, just let that be the last authoritative one
68 */
69 if (found_match)
70 acpi_nfit_ars_rescan(acpi_desc);
71 }
72
73 mutex_unlock(&acpi_desc_lock);
74 return NOTIFY_DONE;
75}
76
77static struct notifier_block nfit_mce_dec = {
78 .notifier_call = nfit_handle_mce,
79};
80
81void nfit_mce_register(void)
82{
83 mce_register_decode_chain(&nfit_mce_dec);
84}
85
86void nfit_mce_unregister(void)
87{
88 mce_unregister_decode_chain(&nfit_mce_dec);
89}
diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit/nfit.h
index 02b9ea1e8d2e..e894ded24d99 100644
--- a/drivers/acpi/nfit.h
+++ b/drivers/acpi/nfit/nfit.h
@@ -16,6 +16,7 @@
16#define __NFIT_H__ 16#define __NFIT_H__
17#include <linux/workqueue.h> 17#include <linux/workqueue.h>
18#include <linux/libnvdimm.h> 18#include <linux/libnvdimm.h>
19#include <linux/ndctl.h>
19#include <linux/types.h> 20#include <linux/types.h>
20#include <linux/uuid.h> 21#include <linux/uuid.h>
21#include <linux/acpi.h> 22#include <linux/acpi.h>
@@ -31,6 +32,9 @@
31#define UUID_NFIT_DIMM_N_HPE1 "9002c334-acf3-4c0e-9642-a235f0d53bc6" 32#define UUID_NFIT_DIMM_N_HPE1 "9002c334-acf3-4c0e-9642-a235f0d53bc6"
32#define UUID_NFIT_DIMM_N_HPE2 "5008664b-b758-41a0-a03c-27c2f2d04f7e" 33#define UUID_NFIT_DIMM_N_HPE2 "5008664b-b758-41a0-a03c-27c2f2d04f7e"
33 34
35/* https://msdn.microsoft.com/library/windows/hardware/mt604741 */
36#define UUID_NFIT_DIMM_N_MSFT "1ee68b36-d4bd-4a1a-9a16-4f8e53d46e05"
37
34#define ACPI_NFIT_MEM_FAILED_MASK (ACPI_NFIT_MEM_SAVE_FAILED \ 38#define ACPI_NFIT_MEM_FAILED_MASK (ACPI_NFIT_MEM_SAVE_FAILED \
35 | ACPI_NFIT_MEM_RESTORE_FAILED | ACPI_NFIT_MEM_FLUSH_FAILED \ 39 | ACPI_NFIT_MEM_RESTORE_FAILED | ACPI_NFIT_MEM_FLUSH_FAILED \
36 | ACPI_NFIT_MEM_NOT_ARMED) 40 | ACPI_NFIT_MEM_NOT_ARMED)
@@ -40,6 +44,7 @@ enum nfit_uuids {
40 NFIT_DEV_DIMM = NVDIMM_FAMILY_INTEL, 44 NFIT_DEV_DIMM = NVDIMM_FAMILY_INTEL,
41 NFIT_DEV_DIMM_N_HPE1 = NVDIMM_FAMILY_HPE1, 45 NFIT_DEV_DIMM_N_HPE1 = NVDIMM_FAMILY_HPE1,
42 NFIT_DEV_DIMM_N_HPE2 = NVDIMM_FAMILY_HPE2, 46 NFIT_DEV_DIMM_N_HPE2 = NVDIMM_FAMILY_HPE2,
47 NFIT_DEV_DIMM_N_MSFT = NVDIMM_FAMILY_MSFT,
43 NFIT_SPA_VOLATILE, 48 NFIT_SPA_VOLATILE,
44 NFIT_SPA_PM, 49 NFIT_SPA_PM,
45 NFIT_SPA_DCR, 50 NFIT_SPA_DCR,
@@ -74,37 +79,37 @@ enum {
74}; 79};
75 80
76struct nfit_spa { 81struct nfit_spa {
77 struct acpi_nfit_system_address *spa;
78 struct list_head list; 82 struct list_head list;
79 struct nd_region *nd_region; 83 struct nd_region *nd_region;
80 unsigned int ars_done:1; 84 unsigned int ars_required:1;
81 u32 clear_err_unit; 85 u32 clear_err_unit;
82 u32 max_ars; 86 u32 max_ars;
87 struct acpi_nfit_system_address spa[0];
83}; 88};
84 89
85struct nfit_dcr { 90struct nfit_dcr {
86 struct acpi_nfit_control_region *dcr;
87 struct list_head list; 91 struct list_head list;
92 struct acpi_nfit_control_region dcr[0];
88}; 93};
89 94
90struct nfit_bdw { 95struct nfit_bdw {
91 struct acpi_nfit_data_region *bdw;
92 struct list_head list; 96 struct list_head list;
97 struct acpi_nfit_data_region bdw[0];
93}; 98};
94 99
95struct nfit_idt { 100struct nfit_idt {
96 struct acpi_nfit_interleave *idt;
97 struct list_head list; 101 struct list_head list;
102 struct acpi_nfit_interleave idt[0];
98}; 103};
99 104
100struct nfit_flush { 105struct nfit_flush {
101 struct acpi_nfit_flush_address *flush;
102 struct list_head list; 106 struct list_head list;
107 struct acpi_nfit_flush_address flush[0];
103}; 108};
104 109
105struct nfit_memdev { 110struct nfit_memdev {
106 struct acpi_nfit_memory_map *memdev;
107 struct list_head list; 111 struct list_head list;
112 struct acpi_nfit_memory_map memdev[0];
108}; 113};
109 114
110/* assembled tables for a given dimm/memory-device */ 115/* assembled tables for a given dimm/memory-device */
@@ -123,6 +128,7 @@ struct nfit_mem {
123 struct list_head list; 128 struct list_head list;
124 struct acpi_device *adev; 129 struct acpi_device *adev;
125 struct acpi_nfit_desc *acpi_desc; 130 struct acpi_nfit_desc *acpi_desc;
131 struct resource *flush_wpq;
126 unsigned long dsm_mask; 132 unsigned long dsm_mask;
127 int family; 133 int family;
128}; 134};
@@ -130,10 +136,7 @@ struct nfit_mem {
130struct acpi_nfit_desc { 136struct acpi_nfit_desc {
131 struct nvdimm_bus_descriptor nd_desc; 137 struct nvdimm_bus_descriptor nd_desc;
132 struct acpi_table_header acpi_header; 138 struct acpi_table_header acpi_header;
133 struct acpi_nfit_header *nfit;
134 struct mutex spa_map_mutex;
135 struct mutex init_mutex; 139 struct mutex init_mutex;
136 struct list_head spa_maps;
137 struct list_head memdevs; 140 struct list_head memdevs;
138 struct list_head flushes; 141 struct list_head flushes;
139 struct list_head dimms; 142 struct list_head dimms;
@@ -146,6 +149,9 @@ struct acpi_nfit_desc {
146 struct nd_cmd_ars_status *ars_status; 149 struct nd_cmd_ars_status *ars_status;
147 size_t ars_status_size; 150 size_t ars_status_size;
148 struct work_struct work; 151 struct work_struct work;
152 struct list_head list;
153 struct kernfs_node *scrub_count_state;
154 unsigned int scrub_count;
149 unsigned int cancel:1; 155 unsigned int cancel:1;
150 unsigned long dimm_cmd_force_en; 156 unsigned long dimm_cmd_force_en;
151 unsigned long bus_cmd_force_en; 157 unsigned long bus_cmd_force_en;
@@ -161,7 +167,7 @@ enum nd_blk_mmio_selector {
161struct nd_blk_addr { 167struct nd_blk_addr {
162 union { 168 union {
163 void __iomem *base; 169 void __iomem *base;
164 void __pmem *aperture; 170 void *aperture;
165 }; 171 };
166}; 172};
167 173
@@ -180,28 +186,26 @@ struct nfit_blk {
180 u64 bdw_offset; /* post interleave offset */ 186 u64 bdw_offset; /* post interleave offset */
181 u64 stat_offset; 187 u64 stat_offset;
182 u64 cmd_offset; 188 u64 cmd_offset;
183 void __iomem *nvdimm_flush;
184 u32 dimm_flags; 189 u32 dimm_flags;
185}; 190};
186 191
187enum spa_map_type { 192extern struct list_head acpi_descs;
188 SPA_MAP_CONTROL, 193extern struct mutex acpi_desc_lock;
189 SPA_MAP_APERTURE, 194int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc);
190};
191
192struct nfit_spa_mapping {
193 struct acpi_nfit_desc *acpi_desc;
194 struct acpi_nfit_system_address *spa;
195 struct list_head list;
196 struct kref kref;
197 enum spa_map_type type;
198 struct nd_blk_addr addr;
199};
200 195
201static inline struct nfit_spa_mapping *to_spa_map(struct kref *kref) 196#ifdef CONFIG_X86_MCE
197void nfit_mce_register(void);
198void nfit_mce_unregister(void);
199#else
200static inline void nfit_mce_register(void)
202{ 201{
203 return container_of(kref, struct nfit_spa_mapping, kref);
204} 202}
203static inline void nfit_mce_unregister(void)
204{
205}
206#endif
207
208int nfit_spa_type(struct acpi_nfit_system_address *spa);
205 209
206static inline struct acpi_nfit_memory_map *__to_nfit_memdev( 210static inline struct acpi_nfit_memory_map *__to_nfit_memdev(
207 struct nfit_mem *nfit_mem) 211 struct nfit_mem *nfit_mem)
@@ -218,6 +222,6 @@ static inline struct acpi_nfit_desc *to_acpi_desc(
218} 222}
219 223
220const u8 *to_nfit_uuid(enum nfit_uuids id); 224const u8 *to_nfit_uuid(enum nfit_uuids id);
221int acpi_nfit_init(struct acpi_nfit_desc *nfit, acpi_size sz); 225int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *nfit, acpi_size sz);
222void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev); 226void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev);
223#endif /* __NFIT_H__ */ 227#endif /* __NFIT_H__ */