diff options
| author | Dan Williams <dan.j.williams@intel.com> | 2016-10-07 19:46:24 -0400 |
|---|---|---|
| committer | Dan Williams <dan.j.williams@intel.com> | 2016-10-07 19:46:24 -0400 |
| commit | 178d6f4be8bf42b298bedf8ea2a00754100e0c4e (patch) | |
| tree | a71865455adc31082a4ad21a942286520a7b5da1 | |
| parent | db58028ee4e360430de8e3b48f657dc798ee6591 (diff) | |
| parent | 98a29c39dc689298d2f834f40102cba752eb49c0 (diff) | |
Merge branch 'for-4.9/libnvdimm' into libnvdimm-for-next
| -rw-r--r-- | drivers/acpi/nfit/core.c | 210 | ||||
| -rw-r--r-- | drivers/acpi/nfit/mce.c | 24 | ||||
| -rw-r--r-- | drivers/acpi/nfit/nfit.h | 17 | ||||
| -rw-r--r-- | drivers/nvdimm/bus.c | 2 | ||||
| -rw-r--r-- | drivers/nvdimm/core.c | 73 | ||||
| -rw-r--r-- | drivers/nvdimm/dimm.c | 11 | ||||
| -rw-r--r-- | drivers/nvdimm/dimm_devs.c | 226 | ||||
| -rw-r--r-- | drivers/nvdimm/label.c | 192 | ||||
| -rw-r--r-- | drivers/nvdimm/namespace_devs.c | 792 | ||||
| -rw-r--r-- | drivers/nvdimm/nd-core.h | 24 | ||||
| -rw-r--r-- | drivers/nvdimm/nd.h | 29 | ||||
| -rw-r--r-- | drivers/nvdimm/pmem.c | 28 | ||||
| -rw-r--r-- | drivers/nvdimm/region_devs.c | 58 | ||||
| -rw-r--r-- | include/linux/libnvdimm.h | 28 | ||||
| -rw-r--r-- | include/linux/nd.h | 8 | ||||
| -rw-r--r-- | include/uapi/linux/ndctl.h | 30 | ||||
| -rw-r--r-- | tools/testing/nvdimm/Kbuild | 1 | ||||
| -rw-r--r-- | tools/testing/nvdimm/test/iomap.c | 151 | ||||
| -rw-r--r-- | tools/testing/nvdimm/test/nfit.c | 160 | ||||
| -rw-r--r-- | tools/testing/nvdimm/test/nfit_test.h | 12 |
20 files changed, 1559 insertions, 517 deletions
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index e1d5ea6d5e40..71a7d07c28c9 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c | |||
| @@ -886,6 +886,58 @@ static ssize_t revision_show(struct device *dev, | |||
| 886 | } | 886 | } |
| 887 | static DEVICE_ATTR_RO(revision); | 887 | static DEVICE_ATTR_RO(revision); |
| 888 | 888 | ||
| 889 | static ssize_t hw_error_scrub_show(struct device *dev, | ||
| 890 | struct device_attribute *attr, char *buf) | ||
| 891 | { | ||
| 892 | struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); | ||
| 893 | struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); | ||
| 894 | struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); | ||
| 895 | |||
| 896 | return sprintf(buf, "%d\n", acpi_desc->scrub_mode); | ||
| 897 | } | ||
| 898 | |||
| 899 | /* | ||
| 900 | * The 'hw_error_scrub' attribute can have the following values written to it: | ||
| 901 | * '0': Switch to the default mode where an exception will only insert | ||
| 902 | * the address of the memory error into the poison and badblocks lists. | ||
| 903 | * '1': Enable a full scrub to happen if an exception for a memory error is | ||
| 904 | * received. | ||
| 905 | */ | ||
| 906 | static ssize_t hw_error_scrub_store(struct device *dev, | ||
| 907 | struct device_attribute *attr, const char *buf, size_t size) | ||
| 908 | { | ||
| 909 | struct nvdimm_bus_descriptor *nd_desc; | ||
| 910 | ssize_t rc; | ||
| 911 | long val; | ||
| 912 | |||
| 913 | rc = kstrtol(buf, 0, &val); | ||
| 914 | if (rc) | ||
| 915 | return rc; | ||
| 916 | |||
| 917 | device_lock(dev); | ||
| 918 | nd_desc = dev_get_drvdata(dev); | ||
| 919 | if (nd_desc) { | ||
| 920 | struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); | ||
| 921 | |||
| 922 | switch (val) { | ||
| 923 | case HW_ERROR_SCRUB_ON: | ||
| 924 | acpi_desc->scrub_mode = HW_ERROR_SCRUB_ON; | ||
| 925 | break; | ||
| 926 | case HW_ERROR_SCRUB_OFF: | ||
| 927 | acpi_desc->scrub_mode = HW_ERROR_SCRUB_OFF; | ||
| 928 | break; | ||
| 929 | default: | ||
| 930 | rc = -EINVAL; | ||
| 931 | break; | ||
| 932 | } | ||
| 933 | } | ||
| 934 | device_unlock(dev); | ||
| 935 | if (rc) | ||
| 936 | return rc; | ||
| 937 | return size; | ||
| 938 | } | ||
| 939 | static DEVICE_ATTR_RW(hw_error_scrub); | ||
| 940 | |||
| 889 | /* | 941 | /* |
| 890 | * This shows the number of full Address Range Scrubs that have been | 942 | * This shows the number of full Address Range Scrubs that have been |
| 891 | * completed since driver load time. Userspace can wait on this using | 943 | * completed since driver load time. Userspace can wait on this using |
| @@ -958,6 +1010,7 @@ static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n) | |||
| 958 | static struct attribute *acpi_nfit_attributes[] = { | 1010 | static struct attribute *acpi_nfit_attributes[] = { |
| 959 | &dev_attr_revision.attr, | 1011 | &dev_attr_revision.attr, |
| 960 | &dev_attr_scrub.attr, | 1012 | &dev_attr_scrub.attr, |
| 1013 | &dev_attr_hw_error_scrub.attr, | ||
| 961 | NULL, | 1014 | NULL, |
| 962 | }; | 1015 | }; |
| 963 | 1016 | ||
| @@ -1256,6 +1309,44 @@ static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc, | |||
| 1256 | return NULL; | 1309 | return NULL; |
| 1257 | } | 1310 | } |
| 1258 | 1311 | ||
| 1312 | void __acpi_nvdimm_notify(struct device *dev, u32 event) | ||
| 1313 | { | ||
| 1314 | struct nfit_mem *nfit_mem; | ||
| 1315 | struct acpi_nfit_desc *acpi_desc; | ||
| 1316 | |||
| 1317 | dev_dbg(dev->parent, "%s: %s: event: %d\n", dev_name(dev), __func__, | ||
| 1318 | event); | ||
| 1319 | |||
| 1320 | if (event != NFIT_NOTIFY_DIMM_HEALTH) { | ||
| 1321 | dev_dbg(dev->parent, "%s: unknown event: %d\n", dev_name(dev), | ||
| 1322 | event); | ||
| 1323 | return; | ||
| 1324 | } | ||
| 1325 | |||
| 1326 | acpi_desc = dev_get_drvdata(dev->parent); | ||
| 1327 | if (!acpi_desc) | ||
| 1328 | return; | ||
| 1329 | |||
| 1330 | /* | ||
| 1331 | * If we successfully retrieved acpi_desc, then we know nfit_mem data | ||
| 1332 | * is still valid. | ||
| 1333 | */ | ||
| 1334 | nfit_mem = dev_get_drvdata(dev); | ||
| 1335 | if (nfit_mem && nfit_mem->flags_attr) | ||
| 1336 | sysfs_notify_dirent(nfit_mem->flags_attr); | ||
| 1337 | } | ||
| 1338 | EXPORT_SYMBOL_GPL(__acpi_nvdimm_notify); | ||
| 1339 | |||
| 1340 | static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data) | ||
| 1341 | { | ||
| 1342 | struct acpi_device *adev = data; | ||
| 1343 | struct device *dev = &adev->dev; | ||
| 1344 | |||
| 1345 | device_lock(dev->parent); | ||
| 1346 | __acpi_nvdimm_notify(dev, event); | ||
| 1347 | device_unlock(dev->parent); | ||
| 1348 | } | ||
| 1349 | |||
| 1259 | static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, | 1350 | static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, |
| 1260 | struct nfit_mem *nfit_mem, u32 device_handle) | 1351 | struct nfit_mem *nfit_mem, u32 device_handle) |
| 1261 | { | 1352 | { |
| @@ -1280,6 +1371,13 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, | |||
| 1280 | return force_enable_dimms ? 0 : -ENODEV; | 1371 | return force_enable_dimms ? 0 : -ENODEV; |
| 1281 | } | 1372 | } |
| 1282 | 1373 | ||
| 1374 | if (ACPI_FAILURE(acpi_install_notify_handler(adev_dimm->handle, | ||
| 1375 | ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify, adev_dimm))) { | ||
| 1376 | dev_err(dev, "%s: notification registration failed\n", | ||
| 1377 | dev_name(&adev_dimm->dev)); | ||
| 1378 | return -ENXIO; | ||
| 1379 | } | ||
| 1380 | |||
| 1283 | /* | 1381 | /* |
| 1284 | * Until standardization materializes we need to consider 4 | 1382 | * Until standardization materializes we need to consider 4 |
| 1285 | * different command sets. Note, that checking for function0 (bit0) | 1383 | * different command sets. Note, that checking for function0 (bit0) |
| @@ -1318,18 +1416,41 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, | |||
| 1318 | return 0; | 1416 | return 0; |
| 1319 | } | 1417 | } |
| 1320 | 1418 | ||
| 1419 | static void shutdown_dimm_notify(void *data) | ||
| 1420 | { | ||
| 1421 | struct acpi_nfit_desc *acpi_desc = data; | ||
| 1422 | struct nfit_mem *nfit_mem; | ||
| 1423 | |||
| 1424 | mutex_lock(&acpi_desc->init_mutex); | ||
| 1425 | /* | ||
| 1426 | * Clear out the nfit_mem->flags_attr and shut down dimm event | ||
| 1427 | * notifications. | ||
| 1428 | */ | ||
| 1429 | list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { | ||
| 1430 | struct acpi_device *adev_dimm = nfit_mem->adev; | ||
| 1431 | |||
| 1432 | if (nfit_mem->flags_attr) { | ||
| 1433 | sysfs_put(nfit_mem->flags_attr); | ||
| 1434 | nfit_mem->flags_attr = NULL; | ||
| 1435 | } | ||
| 1436 | if (adev_dimm) | ||
| 1437 | acpi_remove_notify_handler(adev_dimm->handle, | ||
| 1438 | ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify); | ||
| 1439 | } | ||
| 1440 | mutex_unlock(&acpi_desc->init_mutex); | ||
| 1441 | } | ||
| 1442 | |||
| 1321 | static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) | 1443 | static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) |
| 1322 | { | 1444 | { |
| 1323 | struct nfit_mem *nfit_mem; | 1445 | struct nfit_mem *nfit_mem; |
| 1324 | int dimm_count = 0; | 1446 | int dimm_count = 0, rc; |
| 1447 | struct nvdimm *nvdimm; | ||
| 1325 | 1448 | ||
| 1326 | list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { | 1449 | list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { |
| 1327 | struct acpi_nfit_flush_address *flush; | 1450 | struct acpi_nfit_flush_address *flush; |
| 1328 | unsigned long flags = 0, cmd_mask; | 1451 | unsigned long flags = 0, cmd_mask; |
| 1329 | struct nvdimm *nvdimm; | ||
| 1330 | u32 device_handle; | 1452 | u32 device_handle; |
| 1331 | u16 mem_flags; | 1453 | u16 mem_flags; |
| 1332 | int rc; | ||
| 1333 | 1454 | ||
| 1334 | device_handle = __to_nfit_memdev(nfit_mem)->device_handle; | 1455 | device_handle = __to_nfit_memdev(nfit_mem)->device_handle; |
| 1335 | nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle); | 1456 | nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle); |
| @@ -1382,7 +1503,30 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) | |||
| 1382 | 1503 | ||
| 1383 | } | 1504 | } |
| 1384 | 1505 | ||
| 1385 | return nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count); | 1506 | rc = nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count); |
| 1507 | if (rc) | ||
| 1508 | return rc; | ||
| 1509 | |||
| 1510 | /* | ||
| 1511 | * Now that dimms are successfully registered, and async registration | ||
| 1512 | * is flushed, attempt to enable event notification. | ||
| 1513 | */ | ||
| 1514 | list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { | ||
| 1515 | struct kernfs_node *nfit_kernfs; | ||
| 1516 | |||
| 1517 | nvdimm = nfit_mem->nvdimm; | ||
| 1518 | nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit"); | ||
| 1519 | if (nfit_kernfs) | ||
| 1520 | nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs, | ||
| 1521 | "flags"); | ||
| 1522 | sysfs_put(nfit_kernfs); | ||
| 1523 | if (!nfit_mem->flags_attr) | ||
| 1524 | dev_warn(acpi_desc->dev, "%s: notifications disabled\n", | ||
| 1525 | nvdimm_name(nvdimm)); | ||
| 1526 | } | ||
| 1527 | |||
| 1528 | return devm_add_action_or_reset(acpi_desc->dev, shutdown_dimm_notify, | ||
| 1529 | acpi_desc); | ||
| 1386 | } | 1530 | } |
| 1387 | 1531 | ||
| 1388 | static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) | 1532 | static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) |
| @@ -1491,9 +1635,9 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc, | |||
| 1491 | if (!info) | 1635 | if (!info) |
| 1492 | return -ENOMEM; | 1636 | return -ENOMEM; |
| 1493 | for (i = 0; i < nr; i++) { | 1637 | for (i = 0; i < nr; i++) { |
| 1494 | struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i]; | 1638 | struct nd_mapping_desc *mapping = &ndr_desc->mapping[i]; |
| 1495 | struct nfit_set_info_map *map = &info->mapping[i]; | 1639 | struct nfit_set_info_map *map = &info->mapping[i]; |
| 1496 | struct nvdimm *nvdimm = nd_mapping->nvdimm; | 1640 | struct nvdimm *nvdimm = mapping->nvdimm; |
| 1497 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); | 1641 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); |
| 1498 | struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc, | 1642 | struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc, |
| 1499 | spa->range_index, i); | 1643 | spa->range_index, i); |
| @@ -1917,7 +2061,7 @@ static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc, | |||
| 1917 | } | 2061 | } |
| 1918 | 2062 | ||
| 1919 | static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc, | 2063 | static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc, |
| 1920 | struct nd_mapping *nd_mapping, struct nd_region_desc *ndr_desc, | 2064 | struct nd_mapping_desc *mapping, struct nd_region_desc *ndr_desc, |
| 1921 | struct acpi_nfit_memory_map *memdev, | 2065 | struct acpi_nfit_memory_map *memdev, |
| 1922 | struct nfit_spa *nfit_spa) | 2066 | struct nfit_spa *nfit_spa) |
| 1923 | { | 2067 | { |
| @@ -1934,12 +2078,12 @@ static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc, | |||
| 1934 | return -ENODEV; | 2078 | return -ENODEV; |
| 1935 | } | 2079 | } |
| 1936 | 2080 | ||
| 1937 | nd_mapping->nvdimm = nvdimm; | 2081 | mapping->nvdimm = nvdimm; |
| 1938 | switch (nfit_spa_type(spa)) { | 2082 | switch (nfit_spa_type(spa)) { |
| 1939 | case NFIT_SPA_PM: | 2083 | case NFIT_SPA_PM: |
| 1940 | case NFIT_SPA_VOLATILE: | 2084 | case NFIT_SPA_VOLATILE: |
| 1941 | nd_mapping->start = memdev->address; | 2085 | mapping->start = memdev->address; |
| 1942 | nd_mapping->size = memdev->region_size; | 2086 | mapping->size = memdev->region_size; |
| 1943 | break; | 2087 | break; |
| 1944 | case NFIT_SPA_DCR: | 2088 | case NFIT_SPA_DCR: |
| 1945 | nfit_mem = nvdimm_provider_data(nvdimm); | 2089 | nfit_mem = nvdimm_provider_data(nvdimm); |
| @@ -1947,13 +2091,13 @@ static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc, | |||
| 1947 | dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n", | 2091 | dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n", |
| 1948 | spa->range_index, nvdimm_name(nvdimm)); | 2092 | spa->range_index, nvdimm_name(nvdimm)); |
| 1949 | } else { | 2093 | } else { |
| 1950 | nd_mapping->size = nfit_mem->bdw->capacity; | 2094 | mapping->size = nfit_mem->bdw->capacity; |
| 1951 | nd_mapping->start = nfit_mem->bdw->start_address; | 2095 | mapping->start = nfit_mem->bdw->start_address; |
| 1952 | ndr_desc->num_lanes = nfit_mem->bdw->windows; | 2096 | ndr_desc->num_lanes = nfit_mem->bdw->windows; |
| 1953 | blk_valid = 1; | 2097 | blk_valid = 1; |
| 1954 | } | 2098 | } |
| 1955 | 2099 | ||
| 1956 | ndr_desc->nd_mapping = nd_mapping; | 2100 | ndr_desc->mapping = mapping; |
| 1957 | ndr_desc->num_mappings = blk_valid; | 2101 | ndr_desc->num_mappings = blk_valid; |
| 1958 | ndbr_desc = to_blk_region_desc(ndr_desc); | 2102 | ndbr_desc = to_blk_region_desc(ndr_desc); |
| 1959 | ndbr_desc->enable = acpi_nfit_blk_region_enable; | 2103 | ndbr_desc->enable = acpi_nfit_blk_region_enable; |
| @@ -1979,7 +2123,7 @@ static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa) | |||
| 1979 | static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, | 2123 | static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, |
| 1980 | struct nfit_spa *nfit_spa) | 2124 | struct nfit_spa *nfit_spa) |
| 1981 | { | 2125 | { |
| 1982 | static struct nd_mapping nd_mappings[ND_MAX_MAPPINGS]; | 2126 | static struct nd_mapping_desc mappings[ND_MAX_MAPPINGS]; |
| 1983 | struct acpi_nfit_system_address *spa = nfit_spa->spa; | 2127 | struct acpi_nfit_system_address *spa = nfit_spa->spa; |
| 1984 | struct nd_blk_region_desc ndbr_desc; | 2128 | struct nd_blk_region_desc ndbr_desc; |
| 1985 | struct nd_region_desc *ndr_desc; | 2129 | struct nd_region_desc *ndr_desc; |
| @@ -1998,7 +2142,7 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, | |||
| 1998 | } | 2142 | } |
| 1999 | 2143 | ||
| 2000 | memset(&res, 0, sizeof(res)); | 2144 | memset(&res, 0, sizeof(res)); |
| 2001 | memset(&nd_mappings, 0, sizeof(nd_mappings)); | 2145 | memset(&mappings, 0, sizeof(mappings)); |
| 2002 | memset(&ndbr_desc, 0, sizeof(ndbr_desc)); | 2146 | memset(&ndbr_desc, 0, sizeof(ndbr_desc)); |
| 2003 | res.start = spa->address; | 2147 | res.start = spa->address; |
| 2004 | res.end = res.start + spa->length - 1; | 2148 | res.end = res.start + spa->length - 1; |
| @@ -2014,7 +2158,7 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, | |||
| 2014 | 2158 | ||
| 2015 | list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { | 2159 | list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { |
| 2016 | struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; | 2160 | struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; |
| 2017 | struct nd_mapping *nd_mapping; | 2161 | struct nd_mapping_desc *mapping; |
| 2018 | 2162 | ||
| 2019 | if (memdev->range_index != spa->range_index) | 2163 | if (memdev->range_index != spa->range_index) |
| 2020 | continue; | 2164 | continue; |
| @@ -2023,14 +2167,14 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, | |||
| 2023 | spa->range_index, ND_MAX_MAPPINGS); | 2167 | spa->range_index, ND_MAX_MAPPINGS); |
| 2024 | return -ENXIO; | 2168 | return -ENXIO; |
| 2025 | } | 2169 | } |
| 2026 | nd_mapping = &nd_mappings[count++]; | 2170 | mapping = &mappings[count++]; |
| 2027 | rc = acpi_nfit_init_mapping(acpi_desc, nd_mapping, ndr_desc, | 2171 | rc = acpi_nfit_init_mapping(acpi_desc, mapping, ndr_desc, |
| 2028 | memdev, nfit_spa); | 2172 | memdev, nfit_spa); |
| 2029 | if (rc) | 2173 | if (rc) |
| 2030 | goto out; | 2174 | goto out; |
| 2031 | } | 2175 | } |
| 2032 | 2176 | ||
| 2033 | ndr_desc->nd_mapping = nd_mappings; | 2177 | ndr_desc->mapping = mappings; |
| 2034 | ndr_desc->num_mappings = count; | 2178 | ndr_desc->num_mappings = count; |
| 2035 | rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa); | 2179 | rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa); |
| 2036 | if (rc) | 2180 | if (rc) |
| @@ -2678,29 +2822,30 @@ static int acpi_nfit_remove(struct acpi_device *adev) | |||
| 2678 | return 0; | 2822 | return 0; |
| 2679 | } | 2823 | } |
| 2680 | 2824 | ||
| 2681 | static void acpi_nfit_notify(struct acpi_device *adev, u32 event) | 2825 | void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event) |
| 2682 | { | 2826 | { |
| 2683 | struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev); | 2827 | struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev); |
| 2684 | struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; | 2828 | struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; |
| 2685 | struct device *dev = &adev->dev; | ||
| 2686 | union acpi_object *obj; | 2829 | union acpi_object *obj; |
| 2687 | acpi_status status; | 2830 | acpi_status status; |
| 2688 | int ret; | 2831 | int ret; |
| 2689 | 2832 | ||
| 2690 | dev_dbg(dev, "%s: event: %d\n", __func__, event); | 2833 | dev_dbg(dev, "%s: event: %d\n", __func__, event); |
| 2691 | 2834 | ||
| 2692 | device_lock(dev); | 2835 | if (event != NFIT_NOTIFY_UPDATE) |
| 2836 | return; | ||
| 2837 | |||
| 2693 | if (!dev->driver) { | 2838 | if (!dev->driver) { |
| 2694 | /* dev->driver may be null if we're being removed */ | 2839 | /* dev->driver may be null if we're being removed */ |
| 2695 | dev_dbg(dev, "%s: no driver found for dev\n", __func__); | 2840 | dev_dbg(dev, "%s: no driver found for dev\n", __func__); |
| 2696 | goto out_unlock; | 2841 | return; |
| 2697 | } | 2842 | } |
| 2698 | 2843 | ||
| 2699 | if (!acpi_desc) { | 2844 | if (!acpi_desc) { |
| 2700 | acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); | 2845 | acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); |
| 2701 | if (!acpi_desc) | 2846 | if (!acpi_desc) |
| 2702 | goto out_unlock; | 2847 | return; |
| 2703 | acpi_nfit_desc_init(acpi_desc, &adev->dev); | 2848 | acpi_nfit_desc_init(acpi_desc, dev); |
| 2704 | } else { | 2849 | } else { |
| 2705 | /* | 2850 | /* |
| 2706 | * Finish previous registration before considering new | 2851 | * Finish previous registration before considering new |
| @@ -2710,10 +2855,10 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event) | |||
| 2710 | } | 2855 | } |
| 2711 | 2856 | ||
| 2712 | /* Evaluate _FIT */ | 2857 | /* Evaluate _FIT */ |
| 2713 | status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf); | 2858 | status = acpi_evaluate_object(handle, "_FIT", NULL, &buf); |
| 2714 | if (ACPI_FAILURE(status)) { | 2859 | if (ACPI_FAILURE(status)) { |
| 2715 | dev_err(dev, "failed to evaluate _FIT\n"); | 2860 | dev_err(dev, "failed to evaluate _FIT\n"); |
| 2716 | goto out_unlock; | 2861 | return; |
| 2717 | } | 2862 | } |
| 2718 | 2863 | ||
| 2719 | obj = buf.pointer; | 2864 | obj = buf.pointer; |
| @@ -2725,9 +2870,14 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event) | |||
| 2725 | } else | 2870 | } else |
| 2726 | dev_err(dev, "Invalid _FIT\n"); | 2871 | dev_err(dev, "Invalid _FIT\n"); |
| 2727 | kfree(buf.pointer); | 2872 | kfree(buf.pointer); |
| 2873 | } | ||
| 2874 | EXPORT_SYMBOL_GPL(__acpi_nfit_notify); | ||
| 2728 | 2875 | ||
| 2729 | out_unlock: | 2876 | static void acpi_nfit_notify(struct acpi_device *adev, u32 event) |
| 2730 | device_unlock(dev); | 2877 | { |
| 2878 | device_lock(&adev->dev); | ||
| 2879 | __acpi_nfit_notify(&adev->dev, adev->handle, event); | ||
| 2880 | device_unlock(&adev->dev); | ||
| 2731 | } | 2881 | } |
| 2732 | 2882 | ||
| 2733 | static const struct acpi_device_id acpi_nfit_ids[] = { | 2883 | static const struct acpi_device_id acpi_nfit_ids[] = { |
diff --git a/drivers/acpi/nfit/mce.c b/drivers/acpi/nfit/mce.c index 161f91539ae6..e5ce81c38eed 100644 --- a/drivers/acpi/nfit/mce.c +++ b/drivers/acpi/nfit/mce.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | */ | 14 | */ |
| 15 | #include <linux/notifier.h> | 15 | #include <linux/notifier.h> |
| 16 | #include <linux/acpi.h> | 16 | #include <linux/acpi.h> |
| 17 | #include <linux/nd.h> | ||
| 17 | #include <asm/mce.h> | 18 | #include <asm/mce.h> |
| 18 | #include "nfit.h" | 19 | #include "nfit.h" |
| 19 | 20 | ||
| @@ -62,12 +63,25 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val, | |||
| 62 | } | 63 | } |
| 63 | mutex_unlock(&acpi_desc->init_mutex); | 64 | mutex_unlock(&acpi_desc->init_mutex); |
| 64 | 65 | ||
| 65 | /* | 66 | if (!found_match) |
| 66 | * We can ignore an -EBUSY here because if an ARS is already | 67 | continue; |
| 67 | * in progress, just let that be the last authoritative one | 68 | |
| 68 | */ | 69 | /* If this fails due to an -ENOMEM, there is little we can do */ |
| 69 | if (found_match) | 70 | nvdimm_bus_add_poison(acpi_desc->nvdimm_bus, |
| 71 | ALIGN(mce->addr, L1_CACHE_BYTES), | ||
| 72 | L1_CACHE_BYTES); | ||
| 73 | nvdimm_region_notify(nfit_spa->nd_region, | ||
| 74 | NVDIMM_REVALIDATE_POISON); | ||
| 75 | |||
| 76 | if (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON) { | ||
| 77 | /* | ||
| 78 | * We can ignore an -EBUSY here because if an ARS is | ||
| 79 | * already in progress, just let that be the last | ||
| 80 | * authoritative one | ||
| 81 | */ | ||
| 70 | acpi_nfit_ars_rescan(acpi_desc); | 82 | acpi_nfit_ars_rescan(acpi_desc); |
| 83 | } | ||
| 84 | break; | ||
| 71 | } | 85 | } |
| 72 | 86 | ||
| 73 | mutex_unlock(&acpi_desc_lock); | 87 | mutex_unlock(&acpi_desc_lock); |
diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h index e894ded24d99..14296f5267c8 100644 --- a/drivers/acpi/nfit/nfit.h +++ b/drivers/acpi/nfit/nfit.h | |||
| @@ -78,6 +78,14 @@ enum { | |||
| 78 | NFIT_ARS_TIMEOUT = 90, | 78 | NFIT_ARS_TIMEOUT = 90, |
| 79 | }; | 79 | }; |
| 80 | 80 | ||
| 81 | enum nfit_root_notifiers { | ||
| 82 | NFIT_NOTIFY_UPDATE = 0x80, | ||
| 83 | }; | ||
| 84 | |||
| 85 | enum nfit_dimm_notifiers { | ||
| 86 | NFIT_NOTIFY_DIMM_HEALTH = 0x81, | ||
| 87 | }; | ||
| 88 | |||
| 81 | struct nfit_spa { | 89 | struct nfit_spa { |
| 82 | struct list_head list; | 90 | struct list_head list; |
| 83 | struct nd_region *nd_region; | 91 | struct nd_region *nd_region; |
| @@ -124,6 +132,7 @@ struct nfit_mem { | |||
| 124 | struct acpi_nfit_system_address *spa_bdw; | 132 | struct acpi_nfit_system_address *spa_bdw; |
| 125 | struct acpi_nfit_interleave *idt_dcr; | 133 | struct acpi_nfit_interleave *idt_dcr; |
| 126 | struct acpi_nfit_interleave *idt_bdw; | 134 | struct acpi_nfit_interleave *idt_bdw; |
| 135 | struct kernfs_node *flags_attr; | ||
| 127 | struct nfit_flush *nfit_flush; | 136 | struct nfit_flush *nfit_flush; |
| 128 | struct list_head list; | 137 | struct list_head list; |
| 129 | struct acpi_device *adev; | 138 | struct acpi_device *adev; |
| @@ -152,6 +161,7 @@ struct acpi_nfit_desc { | |||
| 152 | struct list_head list; | 161 | struct list_head list; |
| 153 | struct kernfs_node *scrub_count_state; | 162 | struct kernfs_node *scrub_count_state; |
| 154 | unsigned int scrub_count; | 163 | unsigned int scrub_count; |
| 164 | unsigned int scrub_mode; | ||
| 155 | unsigned int cancel:1; | 165 | unsigned int cancel:1; |
| 156 | unsigned long dimm_cmd_force_en; | 166 | unsigned long dimm_cmd_force_en; |
| 157 | unsigned long bus_cmd_force_en; | 167 | unsigned long bus_cmd_force_en; |
| @@ -159,6 +169,11 @@ struct acpi_nfit_desc { | |||
| 159 | void *iobuf, u64 len, int rw); | 169 | void *iobuf, u64 len, int rw); |
| 160 | }; | 170 | }; |
| 161 | 171 | ||
| 172 | enum scrub_mode { | ||
| 173 | HW_ERROR_SCRUB_OFF, | ||
| 174 | HW_ERROR_SCRUB_ON, | ||
| 175 | }; | ||
| 176 | |||
| 162 | enum nd_blk_mmio_selector { | 177 | enum nd_blk_mmio_selector { |
| 163 | BDW, | 178 | BDW, |
| 164 | DCR, | 179 | DCR, |
| @@ -223,5 +238,7 @@ static inline struct acpi_nfit_desc *to_acpi_desc( | |||
| 223 | 238 | ||
| 224 | const u8 *to_nfit_uuid(enum nfit_uuids id); | 239 | const u8 *to_nfit_uuid(enum nfit_uuids id); |
| 225 | int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *nfit, acpi_size sz); | 240 | int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *nfit, acpi_size sz); |
| 241 | void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event); | ||
| 242 | void __acpi_nvdimm_notify(struct device *dev, u32 event); | ||
| 226 | void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev); | 243 | void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev); |
| 227 | #endif /* __NFIT_H__ */ | 244 | #endif /* __NFIT_H__ */ |
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index 935866fe5ec2..a8b6949a8778 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c | |||
| @@ -217,6 +217,8 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys, | |||
| 217 | return rc; | 217 | return rc; |
| 218 | if (cmd_rc < 0) | 218 | if (cmd_rc < 0) |
| 219 | return cmd_rc; | 219 | return cmd_rc; |
| 220 | |||
| 221 | nvdimm_clear_from_poison_list(nvdimm_bus, phys, len); | ||
| 220 | return clear_err.cleared; | 222 | return clear_err.cleared; |
| 221 | } | 223 | } |
| 222 | EXPORT_SYMBOL_GPL(nvdimm_clear_poison); | 224 | EXPORT_SYMBOL_GPL(nvdimm_clear_poison); |
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c index 4d7bbd2df5c0..7ceba08774b6 100644 --- a/drivers/nvdimm/core.c +++ b/drivers/nvdimm/core.c | |||
| @@ -547,11 +547,12 @@ void nvdimm_badblocks_populate(struct nd_region *nd_region, | |||
| 547 | } | 547 | } |
| 548 | EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate); | 548 | EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate); |
| 549 | 549 | ||
| 550 | static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length) | 550 | static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length, |
| 551 | gfp_t flags) | ||
| 551 | { | 552 | { |
| 552 | struct nd_poison *pl; | 553 | struct nd_poison *pl; |
| 553 | 554 | ||
| 554 | pl = kzalloc(sizeof(*pl), GFP_KERNEL); | 555 | pl = kzalloc(sizeof(*pl), flags); |
| 555 | if (!pl) | 556 | if (!pl) |
| 556 | return -ENOMEM; | 557 | return -ENOMEM; |
| 557 | 558 | ||
| @@ -567,7 +568,7 @@ static int bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length) | |||
| 567 | struct nd_poison *pl; | 568 | struct nd_poison *pl; |
| 568 | 569 | ||
| 569 | if (list_empty(&nvdimm_bus->poison_list)) | 570 | if (list_empty(&nvdimm_bus->poison_list)) |
| 570 | return add_poison(nvdimm_bus, addr, length); | 571 | return add_poison(nvdimm_bus, addr, length, GFP_KERNEL); |
| 571 | 572 | ||
| 572 | /* | 573 | /* |
| 573 | * There is a chance this is a duplicate, check for those first. | 574 | * There is a chance this is a duplicate, check for those first. |
| @@ -587,7 +588,7 @@ static int bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length) | |||
| 587 | * as any overlapping ranges will get resolved when the list is consumed | 588 | * as any overlapping ranges will get resolved when the list is consumed |
| 588 | * and converted to badblocks | 589 | * and converted to badblocks |
| 589 | */ | 590 | */ |
| 590 | return add_poison(nvdimm_bus, addr, length); | 591 | return add_poison(nvdimm_bus, addr, length, GFP_KERNEL); |
| 591 | } | 592 | } |
| 592 | 593 | ||
| 593 | int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length) | 594 | int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length) |
| @@ -602,6 +603,70 @@ int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length) | |||
| 602 | } | 603 | } |
| 603 | EXPORT_SYMBOL_GPL(nvdimm_bus_add_poison); | 604 | EXPORT_SYMBOL_GPL(nvdimm_bus_add_poison); |
| 604 | 605 | ||
| 606 | void nvdimm_clear_from_poison_list(struct nvdimm_bus *nvdimm_bus, | ||
| 607 | phys_addr_t start, unsigned int len) | ||
| 608 | { | ||
| 609 | struct list_head *poison_list = &nvdimm_bus->poison_list; | ||
| 610 | u64 clr_end = start + len - 1; | ||
| 611 | struct nd_poison *pl, *next; | ||
| 612 | |||
| 613 | nvdimm_bus_lock(&nvdimm_bus->dev); | ||
| 614 | WARN_ON_ONCE(list_empty(poison_list)); | ||
| 615 | |||
| 616 | /* | ||
| 617 | * [start, clr_end] is the poison interval being cleared. | ||
| 618 | * [pl->start, pl_end] is the poison_list entry we're comparing | ||
| 619 | * the above interval against. The poison list entry may need | ||
| 620 | * to be modified (update either start or length), deleted, or | ||
| 621 | * split into two based on the overlap characteristics | ||
| 622 | */ | ||
| 623 | |||
| 624 | list_for_each_entry_safe(pl, next, poison_list, list) { | ||
| 625 | u64 pl_end = pl->start + pl->length - 1; | ||
| 626 | |||
| 627 | /* Skip intervals with no intersection */ | ||
| 628 | if (pl_end < start) | ||
| 629 | continue; | ||
| 630 | if (pl->start > clr_end) | ||
| 631 | continue; | ||
| 632 | /* Delete completely overlapped poison entries */ | ||
| 633 | if ((pl->start >= start) && (pl_end <= clr_end)) { | ||
| 634 | list_del(&pl->list); | ||
| 635 | kfree(pl); | ||
| 636 | continue; | ||
| 637 | } | ||
| 638 | /* Adjust start point of partially cleared entries */ | ||
| 639 | if ((start <= pl->start) && (clr_end > pl->start)) { | ||
| 640 | pl->length -= clr_end - pl->start + 1; | ||
| 641 | pl->start = clr_end + 1; | ||
| 642 | continue; | ||
| 643 | } | ||
| 644 | /* Adjust pl->length for partial clearing at the tail end */ | ||
| 645 | if ((pl->start < start) && (pl_end <= clr_end)) { | ||
| 646 | /* pl->start remains the same */ | ||
| 647 | pl->length = start - pl->start; | ||
| 648 | continue; | ||
| 649 | } | ||
| 650 | /* | ||
| 651 | * If clearing in the middle of an entry, we split it into | ||
| 652 | * two by modifying the current entry to represent one half of | ||
| 653 | * the split, and adding a new entry for the second half. | ||
| 654 | */ | ||
| 655 | if ((pl->start < start) && (pl_end > clr_end)) { | ||
| 656 | u64 new_start = clr_end + 1; | ||
| 657 | u64 new_len = pl_end - new_start + 1; | ||
| 658 | |||
| 659 | /* Add new entry covering the right half */ | ||
| 660 | add_poison(nvdimm_bus, new_start, new_len, GFP_NOIO); | ||
| 661 | /* Adjust this entry to cover the left half */ | ||
| 662 | pl->length = start - pl->start; | ||
| 663 | continue; | ||
| 664 | } | ||
| 665 | } | ||
| 666 | nvdimm_bus_unlock(&nvdimm_bus->dev); | ||
| 667 | } | ||
| 668 | EXPORT_SYMBOL_GPL(nvdimm_clear_from_poison_list); | ||
| 669 | |||
| 605 | #ifdef CONFIG_BLK_DEV_INTEGRITY | 670 | #ifdef CONFIG_BLK_DEV_INTEGRITY |
| 606 | int nd_integrity_init(struct gendisk *disk, unsigned long meta_size) | 671 | int nd_integrity_init(struct gendisk *disk, unsigned long meta_size) |
| 607 | { | 672 | { |
diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c index 71d12bb67339..619834e144d1 100644 --- a/drivers/nvdimm/dimm.c +++ b/drivers/nvdimm/dimm.c | |||
| @@ -26,6 +26,14 @@ static int nvdimm_probe(struct device *dev) | |||
| 26 | struct nvdimm_drvdata *ndd; | 26 | struct nvdimm_drvdata *ndd; |
| 27 | int rc; | 27 | int rc; |
| 28 | 28 | ||
| 29 | rc = nvdimm_check_config_data(dev); | ||
| 30 | if (rc) { | ||
| 31 | /* not required for non-aliased nvdimm, ex. NVDIMM-N */ | ||
| 32 | if (rc == -ENOTTY) | ||
| 33 | rc = 0; | ||
| 34 | return rc; | ||
| 35 | } | ||
| 36 | |||
| 29 | ndd = kzalloc(sizeof(*ndd), GFP_KERNEL); | 37 | ndd = kzalloc(sizeof(*ndd), GFP_KERNEL); |
| 30 | if (!ndd) | 38 | if (!ndd) |
| 31 | return -ENOMEM; | 39 | return -ENOMEM; |
| @@ -72,6 +80,9 @@ static int nvdimm_remove(struct device *dev) | |||
| 72 | { | 80 | { |
| 73 | struct nvdimm_drvdata *ndd = dev_get_drvdata(dev); | 81 | struct nvdimm_drvdata *ndd = dev_get_drvdata(dev); |
| 74 | 82 | ||
| 83 | if (!ndd) | ||
| 84 | return 0; | ||
| 85 | |||
| 75 | nvdimm_bus_lock(dev); | 86 | nvdimm_bus_lock(dev); |
| 76 | dev_set_drvdata(dev, NULL); | 87 | dev_set_drvdata(dev, NULL); |
| 77 | nvdimm_bus_unlock(dev); | 88 | nvdimm_bus_unlock(dev); |
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index d9bba5edd8dc..d614493ad5ac 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c | |||
| @@ -28,28 +28,30 @@ static DEFINE_IDA(dimm_ida); | |||
| 28 | * Retrieve bus and dimm handle and return if this bus supports | 28 | * Retrieve bus and dimm handle and return if this bus supports |
| 29 | * get_config_data commands | 29 | * get_config_data commands |
| 30 | */ | 30 | */ |
| 31 | static int __validate_dimm(struct nvdimm_drvdata *ndd) | 31 | int nvdimm_check_config_data(struct device *dev) |
| 32 | { | 32 | { |
| 33 | struct nvdimm *nvdimm; | 33 | struct nvdimm *nvdimm = to_nvdimm(dev); |
| 34 | |||
| 35 | if (!ndd) | ||
| 36 | return -EINVAL; | ||
| 37 | |||
| 38 | nvdimm = to_nvdimm(ndd->dev); | ||
| 39 | 34 | ||
| 40 | if (!nvdimm->cmd_mask) | 35 | if (!nvdimm->cmd_mask || |
| 41 | return -ENXIO; | 36 | !test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) { |
| 42 | if (!test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) | 37 | if (nvdimm->flags & NDD_ALIASING) |
| 43 | return -ENXIO; | 38 | return -ENXIO; |
| 39 | else | ||
| 40 | return -ENOTTY; | ||
| 41 | } | ||
| 44 | 42 | ||
| 45 | return 0; | 43 | return 0; |
| 46 | } | 44 | } |
| 47 | 45 | ||
| 48 | static int validate_dimm(struct nvdimm_drvdata *ndd) | 46 | static int validate_dimm(struct nvdimm_drvdata *ndd) |
| 49 | { | 47 | { |
| 50 | int rc = __validate_dimm(ndd); | 48 | int rc; |
| 51 | 49 | ||
| 52 | if (rc && ndd) | 50 | if (!ndd) |
| 51 | return -EINVAL; | ||
| 52 | |||
| 53 | rc = nvdimm_check_config_data(ndd->dev); | ||
| 54 | if (rc) | ||
| 53 | dev_dbg(ndd->dev, "%pf: %s error: %d\n", | 55 | dev_dbg(ndd->dev, "%pf: %s error: %d\n", |
| 54 | __builtin_return_address(0), __func__, rc); | 56 | __builtin_return_address(0), __func__, rc); |
| 55 | return rc; | 57 | return rc; |
| @@ -263,6 +265,12 @@ const char *nvdimm_name(struct nvdimm *nvdimm) | |||
| 263 | } | 265 | } |
| 264 | EXPORT_SYMBOL_GPL(nvdimm_name); | 266 | EXPORT_SYMBOL_GPL(nvdimm_name); |
| 265 | 267 | ||
| 268 | struct kobject *nvdimm_kobj(struct nvdimm *nvdimm) | ||
| 269 | { | ||
| 270 | return &nvdimm->dev.kobj; | ||
| 271 | } | ||
| 272 | EXPORT_SYMBOL_GPL(nvdimm_kobj); | ||
| 273 | |||
| 266 | unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm) | 274 | unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm) |
| 267 | { | 275 | { |
| 268 | return nvdimm->cmd_mask; | 276 | return nvdimm->cmd_mask; |
| @@ -378,40 +386,166 @@ struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data, | |||
| 378 | } | 386 | } |
| 379 | EXPORT_SYMBOL_GPL(nvdimm_create); | 387 | EXPORT_SYMBOL_GPL(nvdimm_create); |
| 380 | 388 | ||
| 389 | int alias_dpa_busy(struct device *dev, void *data) | ||
| 390 | { | ||
| 391 | resource_size_t map_end, blk_start, new, busy; | ||
| 392 | struct blk_alloc_info *info = data; | ||
| 393 | struct nd_mapping *nd_mapping; | ||
| 394 | struct nd_region *nd_region; | ||
| 395 | struct nvdimm_drvdata *ndd; | ||
| 396 | struct resource *res; | ||
| 397 | int i; | ||
| 398 | |||
| 399 | if (!is_nd_pmem(dev)) | ||
| 400 | return 0; | ||
| 401 | |||
| 402 | nd_region = to_nd_region(dev); | ||
| 403 | for (i = 0; i < nd_region->ndr_mappings; i++) { | ||
| 404 | nd_mapping = &nd_region->mapping[i]; | ||
| 405 | if (nd_mapping->nvdimm == info->nd_mapping->nvdimm) | ||
| 406 | break; | ||
| 407 | } | ||
| 408 | |||
| 409 | if (i >= nd_region->ndr_mappings) | ||
| 410 | return 0; | ||
| 411 | |||
| 412 | ndd = to_ndd(nd_mapping); | ||
| 413 | map_end = nd_mapping->start + nd_mapping->size - 1; | ||
| 414 | blk_start = nd_mapping->start; | ||
| 415 | |||
| 416 | /* | ||
| 417 | * In the allocation case ->res is set to free space that we are | ||
| 418 | * looking to validate against PMEM aliasing collision rules | ||
| 419 | * (i.e. BLK is allocated after all aliased PMEM). | ||
| 420 | */ | ||
| 421 | if (info->res) { | ||
| 422 | if (info->res->start >= nd_mapping->start | ||
| 423 | && info->res->start < map_end) | ||
| 424 | /* pass */; | ||
| 425 | else | ||
| 426 | return 0; | ||
| 427 | } | ||
| 428 | |||
| 429 | retry: | ||
| 430 | /* | ||
| 431 | * Find the free dpa from the end of the last pmem allocation to | ||
| 432 | * the end of the interleave-set mapping that is not already | ||
| 433 | * covered by a blk allocation. | ||
| 434 | */ | ||
| 435 | busy = 0; | ||
| 436 | for_each_dpa_resource(ndd, res) { | ||
| 437 | if ((res->start >= blk_start && res->start < map_end) | ||
| 438 | || (res->end >= blk_start | ||
| 439 | && res->end <= map_end)) { | ||
| 440 | if (strncmp(res->name, "pmem", 4) == 0) { | ||
| 441 | new = max(blk_start, min(map_end + 1, | ||
| 442 | res->end + 1)); | ||
| 443 | if (new != blk_start) { | ||
| 444 | blk_start = new; | ||
| 445 | goto retry; | ||
| 446 | } | ||
| 447 | } else | ||
| 448 | busy += min(map_end, res->end) | ||
| 449 | - max(nd_mapping->start, res->start) + 1; | ||
| 450 | } else if (nd_mapping->start > res->start | ||
| 451 | && map_end < res->end) { | ||
| 452 | /* total eclipse of the PMEM region mapping */ | ||
| 453 | busy += nd_mapping->size; | ||
| 454 | break; | ||
| 455 | } | ||
| 456 | } | ||
| 457 | |||
| 458 | /* update the free space range with the probed blk_start */ | ||
| 459 | if (info->res && blk_start > info->res->start) { | ||
| 460 | info->res->start = max(info->res->start, blk_start); | ||
| 461 | if (info->res->start > info->res->end) | ||
| 462 | info->res->end = info->res->start - 1; | ||
| 463 | return 1; | ||
| 464 | } | ||
| 465 | |||
| 466 | info->available -= blk_start - nd_mapping->start + busy; | ||
| 467 | |||
| 468 | return 0; | ||
| 469 | } | ||
| 470 | |||
| 471 | static int blk_dpa_busy(struct device *dev, void *data) | ||
| 472 | { | ||
| 473 | struct blk_alloc_info *info = data; | ||
| 474 | struct nd_mapping *nd_mapping; | ||
| 475 | struct nd_region *nd_region; | ||
| 476 | resource_size_t map_end; | ||
| 477 | int i; | ||
| 478 | |||
| 479 | if (!is_nd_pmem(dev)) | ||
| 480 | return 0; | ||
| 481 | |||
| 482 | nd_region = to_nd_region(dev); | ||
| 483 | for (i = 0; i < nd_region->ndr_mappings; i++) { | ||
| 484 | nd_mapping = &nd_region->mapping[i]; | ||
| 485 | if (nd_mapping->nvdimm == info->nd_mapping->nvdimm) | ||
| 486 | break; | ||
| 487 | } | ||
| 488 | |||
| 489 | if (i >= nd_region->ndr_mappings) | ||
| 490 | return 0; | ||
| 491 | |||
| 492 | map_end = nd_mapping->start + nd_mapping->size - 1; | ||
| 493 | if (info->res->start >= nd_mapping->start | ||
| 494 | && info->res->start < map_end) { | ||
| 495 | if (info->res->end <= map_end) { | ||
| 496 | info->busy = 0; | ||
| 497 | return 1; | ||
| 498 | } else { | ||
| 499 | info->busy -= info->res->end - map_end; | ||
| 500 | return 0; | ||
| 501 | } | ||
| 502 | } else if (info->res->end >= nd_mapping->start | ||
| 503 | && info->res->end <= map_end) { | ||
| 504 | info->busy -= nd_mapping->start - info->res->start; | ||
| 505 | return 0; | ||
| 506 | } else { | ||
| 507 | info->busy -= nd_mapping->size; | ||
| 508 | return 0; | ||
| 509 | } | ||
| 510 | } | ||
| 511 | |||
| 381 | /** | 512 | /** |
| 382 | * nd_blk_available_dpa - account the unused dpa of BLK region | 513 | * nd_blk_available_dpa - account the unused dpa of BLK region |
| 383 | * @nd_mapping: container of dpa-resource-root + labels | 514 | * @nd_mapping: container of dpa-resource-root + labels |
| 384 | * | 515 | * |
| 385 | * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges. | 516 | * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges, but |
| 517 | * we arrange for them to never start at an lower dpa than the last | ||
| 518 | * PMEM allocation in an aliased region. | ||
| 386 | */ | 519 | */ |
| 387 | resource_size_t nd_blk_available_dpa(struct nd_mapping *nd_mapping) | 520 | resource_size_t nd_blk_available_dpa(struct nd_region *nd_region) |
| 388 | { | 521 | { |
| 522 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); | ||
| 523 | struct nd_mapping *nd_mapping = &nd_region->mapping[0]; | ||
| 389 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); | 524 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); |
| 390 | resource_size_t map_end, busy = 0, available; | 525 | struct blk_alloc_info info = { |
| 526 | .nd_mapping = nd_mapping, | ||
| 527 | .available = nd_mapping->size, | ||
| 528 | .res = NULL, | ||
| 529 | }; | ||
| 391 | struct resource *res; | 530 | struct resource *res; |
| 392 | 531 | ||
| 393 | if (!ndd) | 532 | if (!ndd) |
| 394 | return 0; | 533 | return 0; |
| 395 | 534 | ||
| 396 | map_end = nd_mapping->start + nd_mapping->size - 1; | 535 | device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy); |
| 397 | for_each_dpa_resource(ndd, res) | ||
| 398 | if (res->start >= nd_mapping->start && res->start < map_end) { | ||
| 399 | resource_size_t end = min(map_end, res->end); | ||
| 400 | 536 | ||
| 401 | busy += end - res->start + 1; | 537 | /* now account for busy blk allocations in unaliased dpa */ |
| 402 | } else if (res->end >= nd_mapping->start | 538 | for_each_dpa_resource(ndd, res) { |
| 403 | && res->end <= map_end) { | 539 | if (strncmp(res->name, "blk", 3) != 0) |
| 404 | busy += res->end - nd_mapping->start; | 540 | continue; |
| 405 | } else if (nd_mapping->start > res->start | ||
| 406 | && nd_mapping->start < res->end) { | ||
| 407 | /* total eclipse of the BLK region mapping */ | ||
| 408 | busy += nd_mapping->size; | ||
| 409 | } | ||
| 410 | 541 | ||
| 411 | available = map_end - nd_mapping->start + 1; | 542 | info.res = res; |
| 412 | if (busy < available) | 543 | info.busy = resource_size(res); |
| 413 | return available - busy; | 544 | device_for_each_child(&nvdimm_bus->dev, &info, blk_dpa_busy); |
| 414 | return 0; | 545 | info.available -= info.busy; |
| 546 | } | ||
| 547 | |||
| 548 | return info.available; | ||
| 415 | } | 549 | } |
| 416 | 550 | ||
| 417 | /** | 551 | /** |
| @@ -443,21 +577,16 @@ resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region, | |||
| 443 | map_start = nd_mapping->start; | 577 | map_start = nd_mapping->start; |
| 444 | map_end = map_start + nd_mapping->size - 1; | 578 | map_end = map_start + nd_mapping->size - 1; |
| 445 | blk_start = max(map_start, map_end + 1 - *overlap); | 579 | blk_start = max(map_start, map_end + 1 - *overlap); |
| 446 | for_each_dpa_resource(ndd, res) | 580 | for_each_dpa_resource(ndd, res) { |
| 447 | if (res->start >= map_start && res->start < map_end) { | 581 | if (res->start >= map_start && res->start < map_end) { |
| 448 | if (strncmp(res->name, "blk", 3) == 0) | 582 | if (strncmp(res->name, "blk", 3) == 0) |
| 449 | blk_start = min(blk_start, res->start); | 583 | blk_start = min(blk_start, |
| 450 | else if (res->start != map_start) { | 584 | max(map_start, res->start)); |
| 585 | else if (res->end > map_end) { | ||
| 451 | reason = "misaligned to iset"; | 586 | reason = "misaligned to iset"; |
| 452 | goto err; | 587 | goto err; |
| 453 | } else { | 588 | } else |
| 454 | if (busy) { | ||
| 455 | reason = "duplicate overlapping PMEM reservations?"; | ||
| 456 | goto err; | ||
| 457 | } | ||
| 458 | busy += resource_size(res); | 589 | busy += resource_size(res); |
| 459 | continue; | ||
| 460 | } | ||
| 461 | } else if (res->end >= map_start && res->end <= map_end) { | 590 | } else if (res->end >= map_start && res->end <= map_end) { |
| 462 | if (strncmp(res->name, "blk", 3) == 0) { | 591 | if (strncmp(res->name, "blk", 3) == 0) { |
| 463 | /* | 592 | /* |
| @@ -466,15 +595,14 @@ resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region, | |||
| 466 | * be used for BLK. | 595 | * be used for BLK. |
| 467 | */ | 596 | */ |
| 468 | blk_start = map_start; | 597 | blk_start = map_start; |
| 469 | } else { | 598 | } else |
| 470 | reason = "misaligned to iset"; | 599 | busy += resource_size(res); |
| 471 | goto err; | ||
| 472 | } | ||
| 473 | } else if (map_start > res->start && map_start < res->end) { | 600 | } else if (map_start > res->start && map_start < res->end) { |
| 474 | /* total eclipse of the mapping */ | 601 | /* total eclipse of the mapping */ |
| 475 | busy += nd_mapping->size; | 602 | busy += nd_mapping->size; |
| 476 | blk_start = map_start; | 603 | blk_start = map_start; |
| 477 | } | 604 | } |
| 605 | } | ||
| 478 | 606 | ||
| 479 | *overlap = map_end + 1 - blk_start; | 607 | *overlap = map_end + 1 - blk_start; |
| 480 | available = blk_start - map_start; | 608 | available = blk_start - map_start; |
| @@ -483,10 +611,6 @@ resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region, | |||
| 483 | return 0; | 611 | return 0; |
| 484 | 612 | ||
| 485 | err: | 613 | err: |
| 486 | /* | ||
| 487 | * Something is wrong, PMEM must align with the start of the | ||
| 488 | * interleave set, and there can only be one allocation per set. | ||
| 489 | */ | ||
| 490 | nd_dbg_dpa(nd_region, ndd, res, "%s\n", reason); | 614 | nd_dbg_dpa(nd_region, ndd, res, "%s\n", reason); |
| 491 | return 0; | 615 | return 0; |
| 492 | } | 616 | } |
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c index 96526dcfdd37..fac7cabe8f56 100644 --- a/drivers/nvdimm/label.c +++ b/drivers/nvdimm/label.c | |||
| @@ -494,11 +494,13 @@ static int __pmem_label_update(struct nd_region *nd_region, | |||
| 494 | struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm, | 494 | struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm, |
| 495 | int pos) | 495 | int pos) |
| 496 | { | 496 | { |
| 497 | u64 cookie = nd_region_interleave_set_cookie(nd_region), rawsize; | 497 | u64 cookie = nd_region_interleave_set_cookie(nd_region); |
| 498 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); | 498 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); |
| 499 | struct nd_namespace_label *victim_label; | 499 | struct nd_label_ent *label_ent, *victim = NULL; |
| 500 | struct nd_namespace_label *nd_label; | 500 | struct nd_namespace_label *nd_label; |
| 501 | struct nd_namespace_index *nsindex; | 501 | struct nd_namespace_index *nsindex; |
| 502 | struct nd_label_id label_id; | ||
| 503 | struct resource *res; | ||
| 502 | unsigned long *free; | 504 | unsigned long *free; |
| 503 | u32 nslot, slot; | 505 | u32 nslot, slot; |
| 504 | size_t offset; | 506 | size_t offset; |
| @@ -507,6 +509,16 @@ static int __pmem_label_update(struct nd_region *nd_region, | |||
| 507 | if (!preamble_next(ndd, &nsindex, &free, &nslot)) | 509 | if (!preamble_next(ndd, &nsindex, &free, &nslot)) |
| 508 | return -ENXIO; | 510 | return -ENXIO; |
| 509 | 511 | ||
| 512 | nd_label_gen_id(&label_id, nspm->uuid, 0); | ||
| 513 | for_each_dpa_resource(ndd, res) | ||
| 514 | if (strcmp(res->name, label_id.id) == 0) | ||
| 515 | break; | ||
| 516 | |||
| 517 | if (!res) { | ||
| 518 | WARN_ON_ONCE(1); | ||
| 519 | return -ENXIO; | ||
| 520 | } | ||
| 521 | |||
| 510 | /* allocate and write the label to the staging (next) index */ | 522 | /* allocate and write the label to the staging (next) index */ |
| 511 | slot = nd_label_alloc_slot(ndd); | 523 | slot = nd_label_alloc_slot(ndd); |
| 512 | if (slot == UINT_MAX) | 524 | if (slot == UINT_MAX) |
| @@ -522,11 +534,10 @@ static int __pmem_label_update(struct nd_region *nd_region, | |||
| 522 | nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings); | 534 | nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings); |
| 523 | nd_label->position = __cpu_to_le16(pos); | 535 | nd_label->position = __cpu_to_le16(pos); |
| 524 | nd_label->isetcookie = __cpu_to_le64(cookie); | 536 | nd_label->isetcookie = __cpu_to_le64(cookie); |
| 525 | rawsize = div_u64(resource_size(&nspm->nsio.res), | 537 | nd_label->rawsize = __cpu_to_le64(resource_size(res)); |
| 526 | nd_region->ndr_mappings); | 538 | nd_label->dpa = __cpu_to_le64(res->start); |
| 527 | nd_label->rawsize = __cpu_to_le64(rawsize); | ||
| 528 | nd_label->dpa = __cpu_to_le64(nd_mapping->start); | ||
| 529 | nd_label->slot = __cpu_to_le32(slot); | 539 | nd_label->slot = __cpu_to_le32(slot); |
| 540 | nd_dbg_dpa(nd_region, ndd, res, "%s\n", __func__); | ||
| 530 | 541 | ||
| 531 | /* update label */ | 542 | /* update label */ |
| 532 | offset = nd_label_offset(ndd, nd_label); | 543 | offset = nd_label_offset(ndd, nd_label); |
| @@ -536,38 +547,43 @@ static int __pmem_label_update(struct nd_region *nd_region, | |||
| 536 | return rc; | 547 | return rc; |
| 537 | 548 | ||
| 538 | /* Garbage collect the previous label */ | 549 | /* Garbage collect the previous label */ |
| 539 | victim_label = nd_mapping->labels[0]; | 550 | mutex_lock(&nd_mapping->lock); |
| 540 | if (victim_label) { | 551 | list_for_each_entry(label_ent, &nd_mapping->labels, list) { |
| 541 | slot = to_slot(ndd, victim_label); | 552 | if (!label_ent->label) |
| 542 | nd_label_free_slot(ndd, slot); | 553 | continue; |
| 554 | if (memcmp(nspm->uuid, label_ent->label->uuid, | ||
| 555 | NSLABEL_UUID_LEN) != 0) | ||
| 556 | continue; | ||
| 557 | victim = label_ent; | ||
| 558 | list_move_tail(&victim->list, &nd_mapping->labels); | ||
| 559 | break; | ||
| 560 | } | ||
| 561 | if (victim) { | ||
| 543 | dev_dbg(ndd->dev, "%s: free: %d\n", __func__, slot); | 562 | dev_dbg(ndd->dev, "%s: free: %d\n", __func__, slot); |
| 563 | slot = to_slot(ndd, victim->label); | ||
| 564 | nd_label_free_slot(ndd, slot); | ||
| 565 | victim->label = NULL; | ||
| 544 | } | 566 | } |
| 545 | 567 | ||
| 546 | /* update index */ | 568 | /* update index */ |
| 547 | rc = nd_label_write_index(ndd, ndd->ns_next, | 569 | rc = nd_label_write_index(ndd, ndd->ns_next, |
| 548 | nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0); | 570 | nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0); |
| 549 | if (rc < 0) | 571 | if (rc == 0) { |
| 550 | return rc; | 572 | list_for_each_entry(label_ent, &nd_mapping->labels, list) |
| 551 | 573 | if (!label_ent->label) { | |
| 552 | nd_mapping->labels[0] = nd_label; | 574 | label_ent->label = nd_label; |
| 553 | 575 | nd_label = NULL; | |
| 554 | return 0; | 576 | break; |
| 555 | } | 577 | } |
| 556 | 578 | dev_WARN_ONCE(&nspm->nsio.common.dev, nd_label, | |
| 557 | static void del_label(struct nd_mapping *nd_mapping, int l) | 579 | "failed to track label: %d\n", |
| 558 | { | 580 | to_slot(ndd, nd_label)); |
| 559 | struct nd_namespace_label *next_label, *nd_label; | 581 | if (nd_label) |
| 560 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); | 582 | rc = -ENXIO; |
| 561 | unsigned int slot; | 583 | } |
| 562 | int j; | 584 | mutex_unlock(&nd_mapping->lock); |
| 563 | |||
| 564 | nd_label = nd_mapping->labels[l]; | ||
| 565 | slot = to_slot(ndd, nd_label); | ||
| 566 | dev_vdbg(ndd->dev, "%s: clear: %d\n", __func__, slot); | ||
| 567 | 585 | ||
| 568 | for (j = l; (next_label = nd_mapping->labels[j + 1]); j++) | 586 | return rc; |
| 569 | nd_mapping->labels[j] = next_label; | ||
| 570 | nd_mapping->labels[j] = NULL; | ||
| 571 | } | 587 | } |
| 572 | 588 | ||
| 573 | static bool is_old_resource(struct resource *res, struct resource **list, int n) | 589 | static bool is_old_resource(struct resource *res, struct resource **list, int n) |
| @@ -607,14 +623,16 @@ static int __blk_label_update(struct nd_region *nd_region, | |||
| 607 | struct nd_mapping *nd_mapping, struct nd_namespace_blk *nsblk, | 623 | struct nd_mapping *nd_mapping, struct nd_namespace_blk *nsblk, |
| 608 | int num_labels) | 624 | int num_labels) |
| 609 | { | 625 | { |
| 610 | int i, l, alloc, victims, nfree, old_num_resources, nlabel, rc = -ENXIO; | 626 | int i, alloc, victims, nfree, old_num_resources, nlabel, rc = -ENXIO; |
| 611 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); | 627 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); |
| 612 | struct nd_namespace_label *nd_label; | 628 | struct nd_namespace_label *nd_label; |
| 629 | struct nd_label_ent *label_ent, *e; | ||
| 613 | struct nd_namespace_index *nsindex; | 630 | struct nd_namespace_index *nsindex; |
| 614 | unsigned long *free, *victim_map = NULL; | 631 | unsigned long *free, *victim_map = NULL; |
| 615 | struct resource *res, **old_res_list; | 632 | struct resource *res, **old_res_list; |
| 616 | struct nd_label_id label_id; | 633 | struct nd_label_id label_id; |
| 617 | u8 uuid[NSLABEL_UUID_LEN]; | 634 | u8 uuid[NSLABEL_UUID_LEN]; |
| 635 | LIST_HEAD(list); | ||
| 618 | u32 nslot, slot; | 636 | u32 nslot, slot; |
| 619 | 637 | ||
| 620 | if (!preamble_next(ndd, &nsindex, &free, &nslot)) | 638 | if (!preamble_next(ndd, &nsindex, &free, &nslot)) |
| @@ -736,15 +754,22 @@ static int __blk_label_update(struct nd_region *nd_region, | |||
| 736 | * entries in nd_mapping->labels | 754 | * entries in nd_mapping->labels |
| 737 | */ | 755 | */ |
| 738 | nlabel = 0; | 756 | nlabel = 0; |
| 739 | for_each_label(l, nd_label, nd_mapping->labels) { | 757 | mutex_lock(&nd_mapping->lock); |
| 758 | list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) { | ||
| 759 | nd_label = label_ent->label; | ||
| 760 | if (!nd_label) | ||
| 761 | continue; | ||
| 740 | nlabel++; | 762 | nlabel++; |
| 741 | memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN); | 763 | memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN); |
| 742 | if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0) | 764 | if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0) |
| 743 | continue; | 765 | continue; |
| 744 | nlabel--; | 766 | nlabel--; |
| 745 | del_label(nd_mapping, l); | 767 | list_move(&label_ent->list, &list); |
| 746 | l--; /* retry with the new label at this index */ | 768 | label_ent->label = NULL; |
| 747 | } | 769 | } |
| 770 | list_splice_tail_init(&list, &nd_mapping->labels); | ||
| 771 | mutex_unlock(&nd_mapping->lock); | ||
| 772 | |||
| 748 | if (nlabel + nsblk->num_resources > num_labels) { | 773 | if (nlabel + nsblk->num_resources > num_labels) { |
| 749 | /* | 774 | /* |
| 750 | * Bug, we can't end up with more resources than | 775 | * Bug, we can't end up with more resources than |
| @@ -755,6 +780,15 @@ static int __blk_label_update(struct nd_region *nd_region, | |||
| 755 | goto out; | 780 | goto out; |
| 756 | } | 781 | } |
| 757 | 782 | ||
| 783 | mutex_lock(&nd_mapping->lock); | ||
| 784 | label_ent = list_first_entry_or_null(&nd_mapping->labels, | ||
| 785 | typeof(*label_ent), list); | ||
| 786 | if (!label_ent) { | ||
| 787 | WARN_ON(1); | ||
| 788 | mutex_unlock(&nd_mapping->lock); | ||
| 789 | rc = -ENXIO; | ||
| 790 | goto out; | ||
| 791 | } | ||
| 758 | for_each_clear_bit_le(slot, free, nslot) { | 792 | for_each_clear_bit_le(slot, free, nslot) { |
| 759 | nd_label = nd_label_base(ndd) + slot; | 793 | nd_label = nd_label_base(ndd) + slot; |
| 760 | memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN); | 794 | memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN); |
| @@ -762,11 +796,19 @@ static int __blk_label_update(struct nd_region *nd_region, | |||
| 762 | continue; | 796 | continue; |
| 763 | res = to_resource(ndd, nd_label); | 797 | res = to_resource(ndd, nd_label); |
| 764 | res->flags &= ~DPA_RESOURCE_ADJUSTED; | 798 | res->flags &= ~DPA_RESOURCE_ADJUSTED; |
| 765 | dev_vdbg(&nsblk->common.dev, "assign label[%d] slot: %d\n", | 799 | dev_vdbg(&nsblk->common.dev, "assign label slot: %d\n", slot); |
| 766 | l, slot); | 800 | list_for_each_entry_from(label_ent, &nd_mapping->labels, list) { |
| 767 | nd_mapping->labels[l++] = nd_label; | 801 | if (label_ent->label) |
| 802 | continue; | ||
| 803 | label_ent->label = nd_label; | ||
| 804 | nd_label = NULL; | ||
| 805 | break; | ||
| 806 | } | ||
| 807 | if (nd_label) | ||
| 808 | dev_WARN(&nsblk->common.dev, | ||
| 809 | "failed to track label slot%d\n", slot); | ||
| 768 | } | 810 | } |
| 769 | nd_mapping->labels[l] = NULL; | 811 | mutex_unlock(&nd_mapping->lock); |
| 770 | 812 | ||
| 771 | out: | 813 | out: |
| 772 | kfree(old_res_list); | 814 | kfree(old_res_list); |
| @@ -788,32 +830,28 @@ static int __blk_label_update(struct nd_region *nd_region, | |||
| 788 | 830 | ||
| 789 | static int init_labels(struct nd_mapping *nd_mapping, int num_labels) | 831 | static int init_labels(struct nd_mapping *nd_mapping, int num_labels) |
| 790 | { | 832 | { |
| 791 | int i, l, old_num_labels = 0; | 833 | int i, old_num_labels = 0; |
| 834 | struct nd_label_ent *label_ent; | ||
| 792 | struct nd_namespace_index *nsindex; | 835 | struct nd_namespace_index *nsindex; |
| 793 | struct nd_namespace_label *nd_label; | ||
| 794 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); | 836 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); |
| 795 | size_t size = (num_labels + 1) * sizeof(struct nd_namespace_label *); | ||
| 796 | 837 | ||
| 797 | for_each_label(l, nd_label, nd_mapping->labels) | 838 | mutex_lock(&nd_mapping->lock); |
| 839 | list_for_each_entry(label_ent, &nd_mapping->labels, list) | ||
| 798 | old_num_labels++; | 840 | old_num_labels++; |
| 841 | mutex_unlock(&nd_mapping->lock); | ||
| 799 | 842 | ||
| 800 | /* | 843 | /* |
| 801 | * We need to preserve all the old labels for the mapping so | 844 | * We need to preserve all the old labels for the mapping so |
| 802 | * they can be garbage collected after writing the new labels. | 845 | * they can be garbage collected after writing the new labels. |
| 803 | */ | 846 | */ |
| 804 | if (num_labels > old_num_labels) { | 847 | for (i = old_num_labels; i < num_labels; i++) { |
| 805 | struct nd_namespace_label **labels; | 848 | label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL); |
| 806 | 849 | if (!label_ent) | |
| 807 | labels = krealloc(nd_mapping->labels, size, GFP_KERNEL); | ||
| 808 | if (!labels) | ||
| 809 | return -ENOMEM; | 850 | return -ENOMEM; |
| 810 | nd_mapping->labels = labels; | 851 | mutex_lock(&nd_mapping->lock); |
| 852 | list_add_tail(&label_ent->list, &nd_mapping->labels); | ||
| 853 | mutex_unlock(&nd_mapping->lock); | ||
| 811 | } | 854 | } |
| 812 | if (!nd_mapping->labels) | ||
| 813 | return -ENOMEM; | ||
| 814 | |||
| 815 | for (i = old_num_labels; i <= num_labels; i++) | ||
| 816 | nd_mapping->labels[i] = NULL; | ||
| 817 | 855 | ||
| 818 | if (ndd->ns_current == -1 || ndd->ns_next == -1) | 856 | if (ndd->ns_current == -1 || ndd->ns_next == -1) |
| 819 | /* pass */; | 857 | /* pass */; |
| @@ -837,42 +875,45 @@ static int init_labels(struct nd_mapping *nd_mapping, int num_labels) | |||
| 837 | static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid) | 875 | static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid) |
| 838 | { | 876 | { |
| 839 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); | 877 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); |
| 840 | struct nd_namespace_label *nd_label; | 878 | struct nd_label_ent *label_ent, *e; |
| 841 | struct nd_namespace_index *nsindex; | 879 | struct nd_namespace_index *nsindex; |
| 842 | u8 label_uuid[NSLABEL_UUID_LEN]; | 880 | u8 label_uuid[NSLABEL_UUID_LEN]; |
| 843 | int l, num_freed = 0; | ||
| 844 | unsigned long *free; | 881 | unsigned long *free; |
| 882 | LIST_HEAD(list); | ||
| 845 | u32 nslot, slot; | 883 | u32 nslot, slot; |
| 884 | int active = 0; | ||
| 846 | 885 | ||
| 847 | if (!uuid) | 886 | if (!uuid) |
| 848 | return 0; | 887 | return 0; |
| 849 | 888 | ||
| 850 | /* no index || no labels == nothing to delete */ | 889 | /* no index || no labels == nothing to delete */ |
| 851 | if (!preamble_next(ndd, &nsindex, &free, &nslot) | 890 | if (!preamble_next(ndd, &nsindex, &free, &nslot)) |
| 852 | || !nd_mapping->labels) | ||
| 853 | return 0; | 891 | return 0; |
| 854 | 892 | ||
| 855 | for_each_label(l, nd_label, nd_mapping->labels) { | 893 | mutex_lock(&nd_mapping->lock); |
| 894 | list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) { | ||
| 895 | struct nd_namespace_label *nd_label = label_ent->label; | ||
| 896 | |||
| 897 | if (!nd_label) | ||
| 898 | continue; | ||
| 899 | active++; | ||
| 856 | memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN); | 900 | memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN); |
| 857 | if (memcmp(label_uuid, uuid, NSLABEL_UUID_LEN) != 0) | 901 | if (memcmp(label_uuid, uuid, NSLABEL_UUID_LEN) != 0) |
| 858 | continue; | 902 | continue; |
| 903 | active--; | ||
| 859 | slot = to_slot(ndd, nd_label); | 904 | slot = to_slot(ndd, nd_label); |
| 860 | nd_label_free_slot(ndd, slot); | 905 | nd_label_free_slot(ndd, slot); |
| 861 | dev_dbg(ndd->dev, "%s: free: %d\n", __func__, slot); | 906 | dev_dbg(ndd->dev, "%s: free: %d\n", __func__, slot); |
| 862 | del_label(nd_mapping, l); | 907 | list_move_tail(&label_ent->list, &list); |
| 863 | num_freed++; | 908 | label_ent->label = NULL; |
| 864 | l--; /* retry with new label at this index */ | ||
| 865 | } | 909 | } |
| 910 | list_splice_tail_init(&list, &nd_mapping->labels); | ||
| 866 | 911 | ||
| 867 | if (num_freed > l) { | 912 | if (active == 0) { |
| 868 | /* | 913 | nd_mapping_free_labels(nd_mapping); |
| 869 | * num_freed will only ever be > l when we delete the last | 914 | dev_dbg(ndd->dev, "%s: no more active labels\n", __func__); |
| 870 | * label | ||
| 871 | */ | ||
| 872 | kfree(nd_mapping->labels); | ||
| 873 | nd_mapping->labels = NULL; | ||
| 874 | dev_dbg(ndd->dev, "%s: no more labels\n", __func__); | ||
| 875 | } | 915 | } |
| 916 | mutex_unlock(&nd_mapping->lock); | ||
| 876 | 917 | ||
| 877 | return nd_label_write_index(ndd, ndd->ns_next, | 918 | return nd_label_write_index(ndd, ndd->ns_next, |
| 878 | nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0); | 919 | nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0); |
| @@ -885,7 +926,9 @@ int nd_pmem_namespace_label_update(struct nd_region *nd_region, | |||
| 885 | 926 | ||
| 886 | for (i = 0; i < nd_region->ndr_mappings; i++) { | 927 | for (i = 0; i < nd_region->ndr_mappings; i++) { |
| 887 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; | 928 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; |
| 888 | int rc; | 929 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); |
| 930 | struct resource *res; | ||
| 931 | int rc, count = 0; | ||
| 889 | 932 | ||
| 890 | if (size == 0) { | 933 | if (size == 0) { |
| 891 | rc = del_labels(nd_mapping, nspm->uuid); | 934 | rc = del_labels(nd_mapping, nspm->uuid); |
| @@ -894,7 +937,12 @@ int nd_pmem_namespace_label_update(struct nd_region *nd_region, | |||
| 894 | continue; | 937 | continue; |
| 895 | } | 938 | } |
| 896 | 939 | ||
| 897 | rc = init_labels(nd_mapping, 1); | 940 | for_each_dpa_resource(ndd, res) |
| 941 | if (strncmp(res->name, "pmem", 3) == 0) | ||
| 942 | count++; | ||
| 943 | WARN_ON_ONCE(!count); | ||
| 944 | |||
| 945 | rc = init_labels(nd_mapping, count); | ||
| 898 | if (rc < 0) | 946 | if (rc < 0) |
| 899 | return rc; | 947 | return rc; |
| 900 | 948 | ||
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index c5e3196c45b0..3509cff68ef9 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c | |||
| @@ -12,8 +12,10 @@ | |||
| 12 | */ | 12 | */ |
| 13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
| 14 | #include <linux/device.h> | 14 | #include <linux/device.h> |
| 15 | #include <linux/sort.h> | ||
| 15 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
| 16 | #include <linux/pmem.h> | 17 | #include <linux/pmem.h> |
| 18 | #include <linux/list.h> | ||
| 17 | #include <linux/nd.h> | 19 | #include <linux/nd.h> |
| 18 | #include "nd-core.h" | 20 | #include "nd-core.h" |
| 19 | #include "nd.h" | 21 | #include "nd.h" |
| @@ -28,7 +30,10 @@ static void namespace_io_release(struct device *dev) | |||
| 28 | static void namespace_pmem_release(struct device *dev) | 30 | static void namespace_pmem_release(struct device *dev) |
| 29 | { | 31 | { |
| 30 | struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); | 32 | struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); |
| 33 | struct nd_region *nd_region = to_nd_region(dev->parent); | ||
| 31 | 34 | ||
| 35 | if (nspm->id >= 0) | ||
| 36 | ida_simple_remove(&nd_region->ns_ida, nspm->id); | ||
| 32 | kfree(nspm->alt_name); | 37 | kfree(nspm->alt_name); |
| 33 | kfree(nspm->uuid); | 38 | kfree(nspm->uuid); |
| 34 | kfree(nspm); | 39 | kfree(nspm); |
| @@ -62,17 +67,17 @@ static struct device_type namespace_blk_device_type = { | |||
| 62 | .release = namespace_blk_release, | 67 | .release = namespace_blk_release, |
| 63 | }; | 68 | }; |
| 64 | 69 | ||
| 65 | static bool is_namespace_pmem(struct device *dev) | 70 | static bool is_namespace_pmem(const struct device *dev) |
| 66 | { | 71 | { |
| 67 | return dev ? dev->type == &namespace_pmem_device_type : false; | 72 | return dev ? dev->type == &namespace_pmem_device_type : false; |
| 68 | } | 73 | } |
| 69 | 74 | ||
| 70 | static bool is_namespace_blk(struct device *dev) | 75 | static bool is_namespace_blk(const struct device *dev) |
| 71 | { | 76 | { |
| 72 | return dev ? dev->type == &namespace_blk_device_type : false; | 77 | return dev ? dev->type == &namespace_blk_device_type : false; |
| 73 | } | 78 | } |
| 74 | 79 | ||
| 75 | static bool is_namespace_io(struct device *dev) | 80 | static bool is_namespace_io(const struct device *dev) |
| 76 | { | 81 | { |
| 77 | return dev ? dev->type == &namespace_io_device_type : false; | 82 | return dev ? dev->type == &namespace_io_device_type : false; |
| 78 | } | 83 | } |
| @@ -168,7 +173,21 @@ const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns, | |||
| 168 | suffix = "s"; | 173 | suffix = "s"; |
| 169 | 174 | ||
| 170 | if (is_namespace_pmem(&ndns->dev) || is_namespace_io(&ndns->dev)) { | 175 | if (is_namespace_pmem(&ndns->dev) || is_namespace_io(&ndns->dev)) { |
| 171 | sprintf(name, "pmem%d%s", nd_region->id, suffix ? suffix : ""); | 176 | int nsidx = 0; |
| 177 | |||
| 178 | if (is_namespace_pmem(&ndns->dev)) { | ||
| 179 | struct nd_namespace_pmem *nspm; | ||
| 180 | |||
| 181 | nspm = to_nd_namespace_pmem(&ndns->dev); | ||
| 182 | nsidx = nspm->id; | ||
| 183 | } | ||
| 184 | |||
| 185 | if (nsidx) | ||
| 186 | sprintf(name, "pmem%d.%d%s", nd_region->id, nsidx, | ||
| 187 | suffix ? suffix : ""); | ||
| 188 | else | ||
| 189 | sprintf(name, "pmem%d%s", nd_region->id, | ||
| 190 | suffix ? suffix : ""); | ||
| 172 | } else if (is_namespace_blk(&ndns->dev)) { | 191 | } else if (is_namespace_blk(&ndns->dev)) { |
| 173 | struct nd_namespace_blk *nsblk; | 192 | struct nd_namespace_blk *nsblk; |
| 174 | 193 | ||
| @@ -294,7 +313,7 @@ static bool __nd_namespace_blk_validate(struct nd_namespace_blk *nsblk) | |||
| 294 | if (strcmp(res->name, label_id.id) != 0) | 313 | if (strcmp(res->name, label_id.id) != 0) |
| 295 | continue; | 314 | continue; |
| 296 | /* | 315 | /* |
| 297 | * Resources with unacknoweldged adjustments indicate a | 316 | * Resources with unacknowledged adjustments indicate a |
| 298 | * failure to update labels | 317 | * failure to update labels |
| 299 | */ | 318 | */ |
| 300 | if (res->flags & DPA_RESOURCE_ADJUSTED) | 319 | if (res->flags & DPA_RESOURCE_ADJUSTED) |
| @@ -510,19 +529,68 @@ static resource_size_t init_dpa_allocation(struct nd_label_id *label_id, | |||
| 510 | return rc ? n : 0; | 529 | return rc ? n : 0; |
| 511 | } | 530 | } |
| 512 | 531 | ||
| 513 | static bool space_valid(bool is_pmem, bool is_reserve, | 532 | |
| 514 | struct nd_label_id *label_id, struct resource *res) | 533 | /** |
| 534 | * space_valid() - validate free dpa space against constraints | ||
| 535 | * @nd_region: hosting region of the free space | ||
| 536 | * @ndd: dimm device data for debug | ||
| 537 | * @label_id: namespace id to allocate space | ||
| 538 | * @prev: potential allocation that precedes free space | ||
| 539 | * @next: allocation that follows the given free space range | ||
| 540 | * @exist: first allocation with same id in the mapping | ||
| 541 | * @n: range that must satisfied for pmem allocations | ||
| 542 | * @valid: free space range to validate | ||
| 543 | * | ||
| 544 | * BLK-space is valid as long as it does not precede a PMEM | ||
| 545 | * allocation in a given region. PMEM-space must be contiguous | ||
| 546 | * and adjacent to an existing existing allocation (if one | ||
| 547 | * exists). If reserving PMEM any space is valid. | ||
| 548 | */ | ||
| 549 | static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd, | ||
| 550 | struct nd_label_id *label_id, struct resource *prev, | ||
| 551 | struct resource *next, struct resource *exist, | ||
| 552 | resource_size_t n, struct resource *valid) | ||
| 515 | { | 553 | { |
| 516 | /* | 554 | bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0; |
| 517 | * For BLK-space any space is valid, for PMEM-space, it must be | 555 | bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0; |
| 518 | * contiguous with an existing allocation unless we are | 556 | |
| 519 | * reserving pmem. | 557 | if (valid->start >= valid->end) |
| 520 | */ | 558 | goto invalid; |
| 521 | if (is_reserve || !is_pmem) | 559 | |
| 522 | return true; | 560 | if (is_reserve) |
| 523 | if (!res || strcmp(res->name, label_id->id) == 0) | 561 | return; |
| 524 | return true; | 562 | |
| 525 | return false; | 563 | if (!is_pmem) { |
| 564 | struct nd_mapping *nd_mapping = &nd_region->mapping[0]; | ||
| 565 | struct nvdimm_bus *nvdimm_bus; | ||
| 566 | struct blk_alloc_info info = { | ||
| 567 | .nd_mapping = nd_mapping, | ||
| 568 | .available = nd_mapping->size, | ||
| 569 | .res = valid, | ||
| 570 | }; | ||
| 571 | |||
| 572 | WARN_ON(!is_nd_blk(&nd_region->dev)); | ||
| 573 | nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); | ||
| 574 | device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy); | ||
| 575 | return; | ||
| 576 | } | ||
| 577 | |||
| 578 | /* allocation needs to be contiguous, so this is all or nothing */ | ||
| 579 | if (resource_size(valid) < n) | ||
| 580 | goto invalid; | ||
| 581 | |||
| 582 | /* we've got all the space we need and no existing allocation */ | ||
| 583 | if (!exist) | ||
| 584 | return; | ||
| 585 | |||
| 586 | /* allocation needs to be contiguous with the existing namespace */ | ||
| 587 | if (valid->start == exist->end + 1 | ||
| 588 | || valid->end == exist->start - 1) | ||
| 589 | return; | ||
| 590 | |||
| 591 | invalid: | ||
| 592 | /* truncate @valid size to 0 */ | ||
| 593 | valid->end = valid->start - 1; | ||
| 526 | } | 594 | } |
| 527 | 595 | ||
| 528 | enum alloc_loc { | 596 | enum alloc_loc { |
| @@ -534,18 +602,24 @@ static resource_size_t scan_allocate(struct nd_region *nd_region, | |||
| 534 | resource_size_t n) | 602 | resource_size_t n) |
| 535 | { | 603 | { |
| 536 | resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1; | 604 | resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1; |
| 537 | bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0; | ||
| 538 | bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0; | 605 | bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0; |
| 539 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); | 606 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); |
| 607 | struct resource *res, *exist = NULL, valid; | ||
| 540 | const resource_size_t to_allocate = n; | 608 | const resource_size_t to_allocate = n; |
| 541 | struct resource *res; | ||
| 542 | int first; | 609 | int first; |
| 543 | 610 | ||
| 611 | for_each_dpa_resource(ndd, res) | ||
| 612 | if (strcmp(label_id->id, res->name) == 0) | ||
| 613 | exist = res; | ||
| 614 | |||
| 615 | valid.start = nd_mapping->start; | ||
| 616 | valid.end = mapping_end; | ||
| 617 | valid.name = "free space"; | ||
| 544 | retry: | 618 | retry: |
| 545 | first = 0; | 619 | first = 0; |
| 546 | for_each_dpa_resource(ndd, res) { | 620 | for_each_dpa_resource(ndd, res) { |
| 547 | resource_size_t allocate, available = 0, free_start, free_end; | ||
| 548 | struct resource *next = res->sibling, *new_res = NULL; | 621 | struct resource *next = res->sibling, *new_res = NULL; |
| 622 | resource_size_t allocate, available = 0; | ||
| 549 | enum alloc_loc loc = ALLOC_ERR; | 623 | enum alloc_loc loc = ALLOC_ERR; |
| 550 | const char *action; | 624 | const char *action; |
| 551 | int rc = 0; | 625 | int rc = 0; |
| @@ -558,32 +632,35 @@ static resource_size_t scan_allocate(struct nd_region *nd_region, | |||
| 558 | 632 | ||
| 559 | /* space at the beginning of the mapping */ | 633 | /* space at the beginning of the mapping */ |
| 560 | if (!first++ && res->start > nd_mapping->start) { | 634 | if (!first++ && res->start > nd_mapping->start) { |
| 561 | free_start = nd_mapping->start; | 635 | valid.start = nd_mapping->start; |
| 562 | available = res->start - free_start; | 636 | valid.end = res->start - 1; |
| 563 | if (space_valid(is_pmem, is_reserve, label_id, NULL)) | 637 | space_valid(nd_region, ndd, label_id, NULL, next, exist, |
| 638 | to_allocate, &valid); | ||
| 639 | available = resource_size(&valid); | ||
| 640 | if (available) | ||
| 564 | loc = ALLOC_BEFORE; | 641 | loc = ALLOC_BEFORE; |
| 565 | } | 642 | } |
| 566 | 643 | ||
| 567 | /* space between allocations */ | 644 | /* space between allocations */ |
| 568 | if (!loc && next) { | 645 | if (!loc && next) { |
| 569 | free_start = res->start + resource_size(res); | 646 | valid.start = res->start + resource_size(res); |
| 570 | free_end = min(mapping_end, next->start - 1); | 647 | valid.end = min(mapping_end, next->start - 1); |
| 571 | if (space_valid(is_pmem, is_reserve, label_id, res) | 648 | space_valid(nd_region, ndd, label_id, res, next, exist, |
| 572 | && free_start < free_end) { | 649 | to_allocate, &valid); |
| 573 | available = free_end + 1 - free_start; | 650 | available = resource_size(&valid); |
| 651 | if (available) | ||
| 574 | loc = ALLOC_MID; | 652 | loc = ALLOC_MID; |
| 575 | } | ||
| 576 | } | 653 | } |
| 577 | 654 | ||
| 578 | /* space at the end of the mapping */ | 655 | /* space at the end of the mapping */ |
| 579 | if (!loc && !next) { | 656 | if (!loc && !next) { |
| 580 | free_start = res->start + resource_size(res); | 657 | valid.start = res->start + resource_size(res); |
| 581 | free_end = mapping_end; | 658 | valid.end = mapping_end; |
| 582 | if (space_valid(is_pmem, is_reserve, label_id, res) | 659 | space_valid(nd_region, ndd, label_id, res, next, exist, |
| 583 | && free_start < free_end) { | 660 | to_allocate, &valid); |
| 584 | available = free_end + 1 - free_start; | 661 | available = resource_size(&valid); |
| 662 | if (available) | ||
| 585 | loc = ALLOC_AFTER; | 663 | loc = ALLOC_AFTER; |
| 586 | } | ||
| 587 | } | 664 | } |
| 588 | 665 | ||
| 589 | if (!loc || !available) | 666 | if (!loc || !available) |
| @@ -593,8 +670,6 @@ static resource_size_t scan_allocate(struct nd_region *nd_region, | |||
| 593 | case ALLOC_BEFORE: | 670 | case ALLOC_BEFORE: |
| 594 | if (strcmp(res->name, label_id->id) == 0) { | 671 | if (strcmp(res->name, label_id->id) == 0) { |
| 595 | /* adjust current resource up */ | 672 | /* adjust current resource up */ |
| 596 | if (is_pmem && !is_reserve) | ||
| 597 | return n; | ||
| 598 | rc = adjust_resource(res, res->start - allocate, | 673 | rc = adjust_resource(res, res->start - allocate, |
| 599 | resource_size(res) + allocate); | 674 | resource_size(res) + allocate); |
| 600 | action = "cur grow up"; | 675 | action = "cur grow up"; |
| @@ -604,8 +679,6 @@ static resource_size_t scan_allocate(struct nd_region *nd_region, | |||
| 604 | case ALLOC_MID: | 679 | case ALLOC_MID: |
| 605 | if (strcmp(next->name, label_id->id) == 0) { | 680 | if (strcmp(next->name, label_id->id) == 0) { |
| 606 | /* adjust next resource up */ | 681 | /* adjust next resource up */ |
| 607 | if (is_pmem && !is_reserve) | ||
| 608 | return n; | ||
| 609 | rc = adjust_resource(next, next->start | 682 | rc = adjust_resource(next, next->start |
| 610 | - allocate, resource_size(next) | 683 | - allocate, resource_size(next) |
| 611 | + allocate); | 684 | + allocate); |
| @@ -629,12 +702,10 @@ static resource_size_t scan_allocate(struct nd_region *nd_region, | |||
| 629 | if (strcmp(action, "allocate") == 0) { | 702 | if (strcmp(action, "allocate") == 0) { |
| 630 | /* BLK allocate bottom up */ | 703 | /* BLK allocate bottom up */ |
| 631 | if (!is_pmem) | 704 | if (!is_pmem) |
| 632 | free_start += available - allocate; | 705 | valid.start += available - allocate; |
| 633 | else if (!is_reserve && free_start != nd_mapping->start) | ||
| 634 | return n; | ||
| 635 | 706 | ||
| 636 | new_res = nvdimm_allocate_dpa(ndd, label_id, | 707 | new_res = nvdimm_allocate_dpa(ndd, label_id, |
| 637 | free_start, allocate); | 708 | valid.start, allocate); |
| 638 | if (!new_res) | 709 | if (!new_res) |
| 639 | rc = -EBUSY; | 710 | rc = -EBUSY; |
| 640 | } else if (strcmp(action, "grow down") == 0) { | 711 | } else if (strcmp(action, "grow down") == 0) { |
| @@ -832,13 +903,45 @@ static int grow_dpa_allocation(struct nd_region *nd_region, | |||
| 832 | return 0; | 903 | return 0; |
| 833 | } | 904 | } |
| 834 | 905 | ||
| 835 | static void nd_namespace_pmem_set_size(struct nd_region *nd_region, | 906 | static void nd_namespace_pmem_set_resource(struct nd_region *nd_region, |
| 836 | struct nd_namespace_pmem *nspm, resource_size_t size) | 907 | struct nd_namespace_pmem *nspm, resource_size_t size) |
| 837 | { | 908 | { |
| 838 | struct resource *res = &nspm->nsio.res; | 909 | struct resource *res = &nspm->nsio.res; |
| 910 | resource_size_t offset = 0; | ||
| 839 | 911 | ||
| 840 | res->start = nd_region->ndr_start; | 912 | if (size && !nspm->uuid) { |
| 841 | res->end = nd_region->ndr_start + size - 1; | 913 | WARN_ON_ONCE(1); |
| 914 | size = 0; | ||
| 915 | } | ||
| 916 | |||
| 917 | if (size && nspm->uuid) { | ||
| 918 | struct nd_mapping *nd_mapping = &nd_region->mapping[0]; | ||
| 919 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); | ||
| 920 | struct nd_label_id label_id; | ||
| 921 | struct resource *res; | ||
| 922 | |||
| 923 | if (!ndd) { | ||
| 924 | size = 0; | ||
| 925 | goto out; | ||
| 926 | } | ||
| 927 | |||
| 928 | nd_label_gen_id(&label_id, nspm->uuid, 0); | ||
| 929 | |||
| 930 | /* calculate a spa offset from the dpa allocation offset */ | ||
| 931 | for_each_dpa_resource(ndd, res) | ||
| 932 | if (strcmp(res->name, label_id.id) == 0) { | ||
| 933 | offset = (res->start - nd_mapping->start) | ||
| 934 | * nd_region->ndr_mappings; | ||
| 935 | goto out; | ||
| 936 | } | ||
| 937 | |||
| 938 | WARN_ON_ONCE(1); | ||
| 939 | size = 0; | ||
| 940 | } | ||
| 941 | |||
| 942 | out: | ||
| 943 | res->start = nd_region->ndr_start + offset; | ||
| 944 | res->end = res->start + size - 1; | ||
| 842 | } | 945 | } |
| 843 | 946 | ||
| 844 | static bool uuid_not_set(const u8 *uuid, struct device *dev, const char *where) | 947 | static bool uuid_not_set(const u8 *uuid, struct device *dev, const char *where) |
| @@ -929,7 +1032,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val) | |||
| 929 | if (is_namespace_pmem(dev)) { | 1032 | if (is_namespace_pmem(dev)) { |
| 930 | struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); | 1033 | struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); |
| 931 | 1034 | ||
| 932 | nd_namespace_pmem_set_size(nd_region, nspm, | 1035 | nd_namespace_pmem_set_resource(nd_region, nspm, |
| 933 | val * nd_region->ndr_mappings); | 1036 | val * nd_region->ndr_mappings); |
| 934 | } else if (is_namespace_blk(dev)) { | 1037 | } else if (is_namespace_blk(dev)) { |
| 935 | struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); | 1038 | struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); |
| @@ -1031,22 +1134,27 @@ static ssize_t size_show(struct device *dev, | |||
| 1031 | } | 1134 | } |
| 1032 | static DEVICE_ATTR(size, S_IRUGO, size_show, size_store); | 1135 | static DEVICE_ATTR(size, S_IRUGO, size_show, size_store); |
| 1033 | 1136 | ||
| 1034 | static ssize_t uuid_show(struct device *dev, | 1137 | static u8 *namespace_to_uuid(struct device *dev) |
| 1035 | struct device_attribute *attr, char *buf) | ||
| 1036 | { | 1138 | { |
| 1037 | u8 *uuid; | ||
| 1038 | |||
| 1039 | if (is_namespace_pmem(dev)) { | 1139 | if (is_namespace_pmem(dev)) { |
| 1040 | struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); | 1140 | struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); |
| 1041 | 1141 | ||
| 1042 | uuid = nspm->uuid; | 1142 | return nspm->uuid; |
| 1043 | } else if (is_namespace_blk(dev)) { | 1143 | } else if (is_namespace_blk(dev)) { |
| 1044 | struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); | 1144 | struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); |
| 1045 | 1145 | ||
| 1046 | uuid = nsblk->uuid; | 1146 | return nsblk->uuid; |
| 1047 | } else | 1147 | } else |
| 1048 | return -ENXIO; | 1148 | return ERR_PTR(-ENXIO); |
| 1149 | } | ||
| 1049 | 1150 | ||
| 1151 | static ssize_t uuid_show(struct device *dev, | ||
| 1152 | struct device_attribute *attr, char *buf) | ||
| 1153 | { | ||
| 1154 | u8 *uuid = namespace_to_uuid(dev); | ||
| 1155 | |||
| 1156 | if (IS_ERR(uuid)) | ||
| 1157 | return PTR_ERR(uuid); | ||
| 1050 | if (uuid) | 1158 | if (uuid) |
| 1051 | return sprintf(buf, "%pUb\n", uuid); | 1159 | return sprintf(buf, "%pUb\n", uuid); |
| 1052 | return sprintf(buf, "\n"); | 1160 | return sprintf(buf, "\n"); |
| @@ -1089,7 +1197,7 @@ static int namespace_update_uuid(struct nd_region *nd_region, | |||
| 1089 | * | 1197 | * |
| 1090 | * FIXME: can we delete uuid with zero dpa allocated? | 1198 | * FIXME: can we delete uuid with zero dpa allocated? |
| 1091 | */ | 1199 | */ |
| 1092 | if (nd_mapping->labels) | 1200 | if (list_empty(&nd_mapping->labels)) |
| 1093 | return -EBUSY; | 1201 | return -EBUSY; |
| 1094 | } | 1202 | } |
| 1095 | 1203 | ||
| @@ -1491,14 +1599,19 @@ static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid, | |||
| 1491 | 1599 | ||
| 1492 | for (i = 0; i < nd_region->ndr_mappings; i++) { | 1600 | for (i = 0; i < nd_region->ndr_mappings; i++) { |
| 1493 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; | 1601 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; |
| 1494 | struct nd_namespace_label *nd_label; | 1602 | struct nd_label_ent *label_ent; |
| 1495 | bool found_uuid = false; | 1603 | bool found_uuid = false; |
| 1496 | int l; | ||
| 1497 | 1604 | ||
| 1498 | for_each_label(l, nd_label, nd_mapping->labels) { | 1605 | list_for_each_entry(label_ent, &nd_mapping->labels, list) { |
| 1499 | u64 isetcookie = __le64_to_cpu(nd_label->isetcookie); | 1606 | struct nd_namespace_label *nd_label = label_ent->label; |
| 1500 | u16 position = __le16_to_cpu(nd_label->position); | 1607 | u16 position, nlabel; |
| 1501 | u16 nlabel = __le16_to_cpu(nd_label->nlabel); | 1608 | u64 isetcookie; |
| 1609 | |||
| 1610 | if (!nd_label) | ||
| 1611 | continue; | ||
| 1612 | isetcookie = __le64_to_cpu(nd_label->isetcookie); | ||
| 1613 | position = __le16_to_cpu(nd_label->position); | ||
| 1614 | nlabel = __le16_to_cpu(nd_label->nlabel); | ||
| 1502 | 1615 | ||
| 1503 | if (isetcookie != cookie) | 1616 | if (isetcookie != cookie) |
| 1504 | continue; | 1617 | continue; |
| @@ -1528,7 +1641,6 @@ static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid, | |||
| 1528 | 1641 | ||
| 1529 | static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id) | 1642 | static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id) |
| 1530 | { | 1643 | { |
| 1531 | struct nd_namespace_label *select = NULL; | ||
| 1532 | int i; | 1644 | int i; |
| 1533 | 1645 | ||
| 1534 | if (!pmem_id) | 1646 | if (!pmem_id) |
| @@ -1536,90 +1648,106 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id) | |||
| 1536 | 1648 | ||
| 1537 | for (i = 0; i < nd_region->ndr_mappings; i++) { | 1649 | for (i = 0; i < nd_region->ndr_mappings; i++) { |
| 1538 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; | 1650 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; |
| 1539 | struct nd_namespace_label *nd_label; | 1651 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); |
| 1652 | struct nd_namespace_label *nd_label = NULL; | ||
| 1540 | u64 hw_start, hw_end, pmem_start, pmem_end; | 1653 | u64 hw_start, hw_end, pmem_start, pmem_end; |
| 1541 | int l; | 1654 | struct nd_label_ent *label_ent; |
| 1542 | 1655 | ||
| 1543 | for_each_label(l, nd_label, nd_mapping->labels) | 1656 | WARN_ON(!mutex_is_locked(&nd_mapping->lock)); |
| 1657 | list_for_each_entry(label_ent, &nd_mapping->labels, list) { | ||
| 1658 | nd_label = label_ent->label; | ||
| 1659 | if (!nd_label) | ||
| 1660 | continue; | ||
| 1544 | if (memcmp(nd_label->uuid, pmem_id, NSLABEL_UUID_LEN) == 0) | 1661 | if (memcmp(nd_label->uuid, pmem_id, NSLABEL_UUID_LEN) == 0) |
| 1545 | break; | 1662 | break; |
| 1663 | nd_label = NULL; | ||
| 1664 | } | ||
| 1546 | 1665 | ||
| 1547 | if (!nd_label) { | 1666 | if (!nd_label) { |
| 1548 | WARN_ON(1); | 1667 | WARN_ON(1); |
| 1549 | return -EINVAL; | 1668 | return -EINVAL; |
| 1550 | } | 1669 | } |
| 1551 | 1670 | ||
| 1552 | select = nd_label; | ||
| 1553 | /* | 1671 | /* |
| 1554 | * Check that this label is compliant with the dpa | 1672 | * Check that this label is compliant with the dpa |
| 1555 | * range published in NFIT | 1673 | * range published in NFIT |
| 1556 | */ | 1674 | */ |
| 1557 | hw_start = nd_mapping->start; | 1675 | hw_start = nd_mapping->start; |
| 1558 | hw_end = hw_start + nd_mapping->size; | 1676 | hw_end = hw_start + nd_mapping->size; |
| 1559 | pmem_start = __le64_to_cpu(select->dpa); | 1677 | pmem_start = __le64_to_cpu(nd_label->dpa); |
| 1560 | pmem_end = pmem_start + __le64_to_cpu(select->rawsize); | 1678 | pmem_end = pmem_start + __le64_to_cpu(nd_label->rawsize); |
| 1561 | if (pmem_start == hw_start && pmem_end <= hw_end) | 1679 | if (pmem_start >= hw_start && pmem_start < hw_end |
| 1680 | && pmem_end <= hw_end && pmem_end > hw_start) | ||
| 1562 | /* pass */; | 1681 | /* pass */; |
| 1563 | else | 1682 | else { |
| 1683 | dev_dbg(&nd_region->dev, "%s invalid label for %pUb\n", | ||
| 1684 | dev_name(ndd->dev), nd_label->uuid); | ||
| 1564 | return -EINVAL; | 1685 | return -EINVAL; |
| 1686 | } | ||
| 1565 | 1687 | ||
| 1566 | nd_mapping->labels[0] = select; | 1688 | /* move recently validated label to the front of the list */ |
| 1567 | nd_mapping->labels[1] = NULL; | 1689 | list_move(&label_ent->list, &nd_mapping->labels); |
| 1568 | } | 1690 | } |
| 1569 | return 0; | 1691 | return 0; |
| 1570 | } | 1692 | } |
| 1571 | 1693 | ||
| 1572 | /** | 1694 | /** |
| 1573 | * find_pmem_label_set - validate interleave set labelling, retrieve label0 | 1695 | * create_namespace_pmem - validate interleave set labelling, retrieve label0 |
| 1574 | * @nd_region: region with mappings to validate | 1696 | * @nd_region: region with mappings to validate |
| 1697 | * @nspm: target namespace to create | ||
| 1698 | * @nd_label: target pmem namespace label to evaluate | ||
| 1575 | */ | 1699 | */ |
| 1576 | static int find_pmem_label_set(struct nd_region *nd_region, | 1700 | struct device *create_namespace_pmem(struct nd_region *nd_region, |
| 1577 | struct nd_namespace_pmem *nspm) | 1701 | struct nd_namespace_label *nd_label) |
| 1578 | { | 1702 | { |
| 1579 | u64 cookie = nd_region_interleave_set_cookie(nd_region); | 1703 | u64 cookie = nd_region_interleave_set_cookie(nd_region); |
| 1580 | struct nd_namespace_label *nd_label; | 1704 | struct nd_label_ent *label_ent; |
| 1581 | u8 select_id[NSLABEL_UUID_LEN]; | 1705 | struct nd_namespace_pmem *nspm; |
| 1706 | struct nd_mapping *nd_mapping; | ||
| 1582 | resource_size_t size = 0; | 1707 | resource_size_t size = 0; |
| 1583 | u8 *pmem_id = NULL; | 1708 | struct resource *res; |
| 1584 | int rc = -ENODEV, l; | 1709 | struct device *dev; |
| 1710 | int rc = 0; | ||
| 1585 | u16 i; | 1711 | u16 i; |
| 1586 | 1712 | ||
| 1587 | if (cookie == 0) | 1713 | if (cookie == 0) { |
| 1588 | return -ENXIO; | 1714 | dev_dbg(&nd_region->dev, "invalid interleave-set-cookie\n"); |
| 1715 | return ERR_PTR(-ENXIO); | ||
| 1716 | } | ||
| 1589 | 1717 | ||
| 1590 | /* | 1718 | if (__le64_to_cpu(nd_label->isetcookie) != cookie) { |
| 1591 | * Find a complete set of labels by uuid. By definition we can start | 1719 | dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb\n", |
| 1592 | * with any mapping as the reference label | 1720 | nd_label->uuid); |
| 1593 | */ | 1721 | return ERR_PTR(-EAGAIN); |
| 1594 | for_each_label(l, nd_label, nd_region->mapping[0].labels) { | 1722 | } |
| 1595 | u64 isetcookie = __le64_to_cpu(nd_label->isetcookie); | ||
| 1596 | 1723 | ||
| 1597 | if (isetcookie != cookie) | 1724 | nspm = kzalloc(sizeof(*nspm), GFP_KERNEL); |
| 1598 | continue; | 1725 | if (!nspm) |
| 1726 | return ERR_PTR(-ENOMEM); | ||
| 1599 | 1727 | ||
| 1600 | for (i = 0; nd_region->ndr_mappings; i++) | 1728 | nspm->id = -1; |
| 1601 | if (!has_uuid_at_pos(nd_region, nd_label->uuid, | 1729 | dev = &nspm->nsio.common.dev; |
| 1602 | cookie, i)) | 1730 | dev->type = &namespace_pmem_device_type; |
| 1603 | break; | 1731 | dev->parent = &nd_region->dev; |
| 1604 | if (i < nd_region->ndr_mappings) { | 1732 | res = &nspm->nsio.res; |
| 1605 | /* | 1733 | res->name = dev_name(&nd_region->dev); |
| 1606 | * Give up if we don't find an instance of a | 1734 | res->flags = IORESOURCE_MEM; |
| 1607 | * uuid at each position (from 0 to | 1735 | |
| 1608 | * nd_region->ndr_mappings - 1), or if we find a | 1736 | for (i = 0; i < nd_region->ndr_mappings; i++) |
| 1609 | * dimm with two instances of the same uuid. | 1737 | if (!has_uuid_at_pos(nd_region, nd_label->uuid, cookie, i)) |
| 1610 | */ | 1738 | break; |
| 1611 | rc = -EINVAL; | 1739 | if (i < nd_region->ndr_mappings) { |
| 1612 | goto err; | 1740 | struct nvdimm_drvdata *ndd = to_ndd(&nd_region->mapping[i]); |
| 1613 | } else if (pmem_id) { | 1741 | |
| 1614 | /* | 1742 | /* |
| 1615 | * If there is more than one valid uuid set, we | 1743 | * Give up if we don't find an instance of a uuid at each |
| 1616 | * need userspace to clean this up. | 1744 | * position (from 0 to nd_region->ndr_mappings - 1), or if we |
| 1617 | */ | 1745 | * find a dimm with two instances of the same uuid. |
| 1618 | rc = -EBUSY; | 1746 | */ |
| 1619 | goto err; | 1747 | dev_err(&nd_region->dev, "%s missing label for %pUb\n", |
| 1620 | } | 1748 | dev_name(ndd->dev), nd_label->uuid); |
| 1621 | memcpy(select_id, nd_label->uuid, NSLABEL_UUID_LEN); | 1749 | rc = -EINVAL; |
| 1622 | pmem_id = select_id; | 1750 | goto err; |
| 1623 | } | 1751 | } |
| 1624 | 1752 | ||
| 1625 | /* | 1753 | /* |
| @@ -1630,14 +1758,23 @@ static int find_pmem_label_set(struct nd_region *nd_region, | |||
| 1630 | * the dimm being enabled (i.e. nd_label_reserve_dpa() | 1758 | * the dimm being enabled (i.e. nd_label_reserve_dpa() |
| 1631 | * succeeded). | 1759 | * succeeded). |
| 1632 | */ | 1760 | */ |
| 1633 | rc = select_pmem_id(nd_region, pmem_id); | 1761 | rc = select_pmem_id(nd_region, nd_label->uuid); |
| 1634 | if (rc) | 1762 | if (rc) |
| 1635 | goto err; | 1763 | goto err; |
| 1636 | 1764 | ||
| 1637 | /* Calculate total size and populate namespace properties from label0 */ | 1765 | /* Calculate total size and populate namespace properties from label0 */ |
| 1638 | for (i = 0; i < nd_region->ndr_mappings; i++) { | 1766 | for (i = 0; i < nd_region->ndr_mappings; i++) { |
| 1639 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; | 1767 | struct nd_namespace_label *label0; |
| 1640 | struct nd_namespace_label *label0 = nd_mapping->labels[0]; | 1768 | |
| 1769 | nd_mapping = &nd_region->mapping[i]; | ||
| 1770 | label_ent = list_first_entry_or_null(&nd_mapping->labels, | ||
| 1771 | typeof(*label_ent), list); | ||
| 1772 | label0 = label_ent ? label_ent->label : 0; | ||
| 1773 | |||
| 1774 | if (!label0) { | ||
| 1775 | WARN_ON(1); | ||
| 1776 | continue; | ||
| 1777 | } | ||
| 1641 | 1778 | ||
| 1642 | size += __le64_to_cpu(label0->rawsize); | 1779 | size += __le64_to_cpu(label0->rawsize); |
| 1643 | if (__le16_to_cpu(label0->position) != 0) | 1780 | if (__le16_to_cpu(label0->position) != 0) |
| @@ -1654,10 +1791,11 @@ static int find_pmem_label_set(struct nd_region *nd_region, | |||
| 1654 | goto err; | 1791 | goto err; |
| 1655 | } | 1792 | } |
| 1656 | 1793 | ||
| 1657 | nd_namespace_pmem_set_size(nd_region, nspm, size); | 1794 | nd_namespace_pmem_set_resource(nd_region, nspm, size); |
| 1658 | 1795 | ||
| 1659 | return 0; | 1796 | return dev; |
| 1660 | err: | 1797 | err: |
| 1798 | namespace_pmem_release(dev); | ||
| 1661 | switch (rc) { | 1799 | switch (rc) { |
| 1662 | case -EINVAL: | 1800 | case -EINVAL: |
| 1663 | dev_dbg(&nd_region->dev, "%s: invalid label(s)\n", __func__); | 1801 | dev_dbg(&nd_region->dev, "%s: invalid label(s)\n", __func__); |
| @@ -1670,55 +1808,7 @@ static int find_pmem_label_set(struct nd_region *nd_region, | |||
| 1670 | __func__, rc); | 1808 | __func__, rc); |
| 1671 | break; | 1809 | break; |
| 1672 | } | 1810 | } |
| 1673 | return rc; | 1811 | return ERR_PTR(rc); |
| 1674 | } | ||
| 1675 | |||
| 1676 | static struct device **create_namespace_pmem(struct nd_region *nd_region) | ||
| 1677 | { | ||
| 1678 | struct nd_namespace_pmem *nspm; | ||
| 1679 | struct device *dev, **devs; | ||
| 1680 | struct resource *res; | ||
| 1681 | int rc; | ||
| 1682 | |||
| 1683 | nspm = kzalloc(sizeof(*nspm), GFP_KERNEL); | ||
| 1684 | if (!nspm) | ||
| 1685 | return NULL; | ||
| 1686 | |||
| 1687 | dev = &nspm->nsio.common.dev; | ||
| 1688 | dev->type = &namespace_pmem_device_type; | ||
| 1689 | dev->parent = &nd_region->dev; | ||
| 1690 | res = &nspm->nsio.res; | ||
| 1691 | res->name = dev_name(&nd_region->dev); | ||
| 1692 | res->flags = IORESOURCE_MEM; | ||
| 1693 | rc = find_pmem_label_set(nd_region, nspm); | ||
| 1694 | if (rc == -ENODEV) { | ||
| 1695 | int i; | ||
| 1696 | |||
| 1697 | /* Pass, try to permit namespace creation... */ | ||
| 1698 | for (i = 0; i < nd_region->ndr_mappings; i++) { | ||
| 1699 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; | ||
| 1700 | |||
| 1701 | kfree(nd_mapping->labels); | ||
| 1702 | nd_mapping->labels = NULL; | ||
| 1703 | } | ||
| 1704 | |||
| 1705 | /* Publish a zero-sized namespace for userspace to configure. */ | ||
| 1706 | nd_namespace_pmem_set_size(nd_region, nspm, 0); | ||
| 1707 | |||
| 1708 | rc = 0; | ||
| 1709 | } else if (rc) | ||
| 1710 | goto err; | ||
| 1711 | |||
| 1712 | devs = kcalloc(2, sizeof(struct device *), GFP_KERNEL); | ||
| 1713 | if (!devs) | ||
| 1714 | goto err; | ||
| 1715 | |||
| 1716 | devs[0] = dev; | ||
| 1717 | return devs; | ||
| 1718 | |||
| 1719 | err: | ||
| 1720 | namespace_pmem_release(&nspm->nsio.common.dev); | ||
| 1721 | return NULL; | ||
| 1722 | } | 1812 | } |
| 1723 | 1813 | ||
| 1724 | struct resource *nsblk_add_resource(struct nd_region *nd_region, | 1814 | struct resource *nsblk_add_resource(struct nd_region *nd_region, |
| @@ -1770,16 +1860,58 @@ static struct device *nd_namespace_blk_create(struct nd_region *nd_region) | |||
| 1770 | return &nsblk->common.dev; | 1860 | return &nsblk->common.dev; |
| 1771 | } | 1861 | } |
| 1772 | 1862 | ||
| 1773 | void nd_region_create_blk_seed(struct nd_region *nd_region) | 1863 | static struct device *nd_namespace_pmem_create(struct nd_region *nd_region) |
| 1864 | { | ||
| 1865 | struct nd_namespace_pmem *nspm; | ||
| 1866 | struct resource *res; | ||
| 1867 | struct device *dev; | ||
| 1868 | |||
| 1869 | if (!is_nd_pmem(&nd_region->dev)) | ||
| 1870 | return NULL; | ||
| 1871 | |||
| 1872 | nspm = kzalloc(sizeof(*nspm), GFP_KERNEL); | ||
| 1873 | if (!nspm) | ||
| 1874 | return NULL; | ||
| 1875 | |||
| 1876 | dev = &nspm->nsio.common.dev; | ||
| 1877 | dev->type = &namespace_pmem_device_type; | ||
| 1878 | dev->parent = &nd_region->dev; | ||
| 1879 | res = &nspm->nsio.res; | ||
| 1880 | res->name = dev_name(&nd_region->dev); | ||
| 1881 | res->flags = IORESOURCE_MEM; | ||
| 1882 | |||
| 1883 | nspm->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL); | ||
| 1884 | if (nspm->id < 0) { | ||
| 1885 | kfree(nspm); | ||
| 1886 | return NULL; | ||
| 1887 | } | ||
| 1888 | dev_set_name(dev, "namespace%d.%d", nd_region->id, nspm->id); | ||
| 1889 | dev->parent = &nd_region->dev; | ||
| 1890 | dev->groups = nd_namespace_attribute_groups; | ||
| 1891 | nd_namespace_pmem_set_resource(nd_region, nspm, 0); | ||
| 1892 | |||
| 1893 | return dev; | ||
| 1894 | } | ||
| 1895 | |||
| 1896 | void nd_region_create_ns_seed(struct nd_region *nd_region) | ||
| 1774 | { | 1897 | { |
| 1775 | WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); | 1898 | WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); |
| 1776 | nd_region->ns_seed = nd_namespace_blk_create(nd_region); | 1899 | |
| 1900 | if (nd_region_to_nstype(nd_region) == ND_DEVICE_NAMESPACE_IO) | ||
| 1901 | return; | ||
| 1902 | |||
| 1903 | if (is_nd_blk(&nd_region->dev)) | ||
| 1904 | nd_region->ns_seed = nd_namespace_blk_create(nd_region); | ||
| 1905 | else | ||
| 1906 | nd_region->ns_seed = nd_namespace_pmem_create(nd_region); | ||
| 1907 | |||
| 1777 | /* | 1908 | /* |
| 1778 | * Seed creation failures are not fatal, provisioning is simply | 1909 | * Seed creation failures are not fatal, provisioning is simply |
| 1779 | * disabled until memory becomes available | 1910 | * disabled until memory becomes available |
| 1780 | */ | 1911 | */ |
| 1781 | if (!nd_region->ns_seed) | 1912 | if (!nd_region->ns_seed) |
| 1782 | dev_err(&nd_region->dev, "failed to create blk namespace\n"); | 1913 | dev_err(&nd_region->dev, "failed to create %s namespace\n", |
| 1914 | is_nd_blk(&nd_region->dev) ? "blk" : "pmem"); | ||
| 1783 | else | 1915 | else |
| 1784 | nd_device_register(nd_region->ns_seed); | 1916 | nd_device_register(nd_region->ns_seed); |
| 1785 | } | 1917 | } |
| @@ -1820,43 +1952,137 @@ void nd_region_create_btt_seed(struct nd_region *nd_region) | |||
| 1820 | dev_err(&nd_region->dev, "failed to create btt namespace\n"); | 1952 | dev_err(&nd_region->dev, "failed to create btt namespace\n"); |
| 1821 | } | 1953 | } |
| 1822 | 1954 | ||
| 1823 | static struct device **create_namespace_blk(struct nd_region *nd_region) | 1955 | static int add_namespace_resource(struct nd_region *nd_region, |
| 1956 | struct nd_namespace_label *nd_label, struct device **devs, | ||
| 1957 | int count) | ||
| 1824 | { | 1958 | { |
| 1825 | struct nd_mapping *nd_mapping = &nd_region->mapping[0]; | 1959 | struct nd_mapping *nd_mapping = &nd_region->mapping[0]; |
| 1826 | struct nd_namespace_label *nd_label; | 1960 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); |
| 1827 | struct device *dev, **devs = NULL; | 1961 | int i; |
| 1962 | |||
| 1963 | for (i = 0; i < count; i++) { | ||
| 1964 | u8 *uuid = namespace_to_uuid(devs[i]); | ||
| 1965 | struct resource *res; | ||
| 1966 | |||
| 1967 | if (IS_ERR_OR_NULL(uuid)) { | ||
| 1968 | WARN_ON(1); | ||
| 1969 | continue; | ||
| 1970 | } | ||
| 1971 | |||
| 1972 | if (memcmp(uuid, nd_label->uuid, NSLABEL_UUID_LEN) != 0) | ||
| 1973 | continue; | ||
| 1974 | if (is_namespace_blk(devs[i])) { | ||
| 1975 | res = nsblk_add_resource(nd_region, ndd, | ||
| 1976 | to_nd_namespace_blk(devs[i]), | ||
| 1977 | __le64_to_cpu(nd_label->dpa)); | ||
| 1978 | if (!res) | ||
| 1979 | return -ENXIO; | ||
| 1980 | nd_dbg_dpa(nd_region, ndd, res, "%d assign\n", count); | ||
| 1981 | } else { | ||
| 1982 | dev_err(&nd_region->dev, | ||
| 1983 | "error: conflicting extents for uuid: %pUb\n", | ||
| 1984 | nd_label->uuid); | ||
| 1985 | return -ENXIO; | ||
| 1986 | } | ||
| 1987 | break; | ||
| 1988 | } | ||
| 1989 | |||
| 1990 | return i; | ||
| 1991 | } | ||
| 1992 | |||
| 1993 | struct device *create_namespace_blk(struct nd_region *nd_region, | ||
| 1994 | struct nd_namespace_label *nd_label, int count) | ||
| 1995 | { | ||
| 1996 | |||
| 1997 | struct nd_mapping *nd_mapping = &nd_region->mapping[0]; | ||
| 1998 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); | ||
| 1828 | struct nd_namespace_blk *nsblk; | 1999 | struct nd_namespace_blk *nsblk; |
| 1829 | struct nvdimm_drvdata *ndd; | 2000 | char *name[NSLABEL_NAME_LEN]; |
| 1830 | int i, l, count = 0; | 2001 | struct device *dev = NULL; |
| 1831 | struct resource *res; | 2002 | struct resource *res; |
| 1832 | 2003 | ||
| 1833 | if (nd_region->ndr_mappings == 0) | 2004 | nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL); |
| 1834 | return NULL; | 2005 | if (!nsblk) |
| 2006 | return ERR_PTR(-ENOMEM); | ||
| 2007 | dev = &nsblk->common.dev; | ||
| 2008 | dev->type = &namespace_blk_device_type; | ||
| 2009 | dev->parent = &nd_region->dev; | ||
| 2010 | nsblk->id = -1; | ||
| 2011 | nsblk->lbasize = __le64_to_cpu(nd_label->lbasize); | ||
| 2012 | nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN, | ||
| 2013 | GFP_KERNEL); | ||
| 2014 | if (!nsblk->uuid) | ||
| 2015 | goto blk_err; | ||
| 2016 | memcpy(name, nd_label->name, NSLABEL_NAME_LEN); | ||
| 2017 | if (name[0]) | ||
| 2018 | nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN, | ||
| 2019 | GFP_KERNEL); | ||
| 2020 | res = nsblk_add_resource(nd_region, ndd, nsblk, | ||
| 2021 | __le64_to_cpu(nd_label->dpa)); | ||
| 2022 | if (!res) | ||
| 2023 | goto blk_err; | ||
| 2024 | nd_dbg_dpa(nd_region, ndd, res, "%d: assign\n", count); | ||
| 2025 | return dev; | ||
| 2026 | blk_err: | ||
| 2027 | namespace_blk_release(dev); | ||
| 2028 | return ERR_PTR(-ENXIO); | ||
| 2029 | } | ||
| 2030 | |||
| 2031 | static int cmp_dpa(const void *a, const void *b) | ||
| 2032 | { | ||
| 2033 | const struct device *dev_a = *(const struct device **) a; | ||
| 2034 | const struct device *dev_b = *(const struct device **) b; | ||
| 2035 | struct nd_namespace_blk *nsblk_a, *nsblk_b; | ||
| 2036 | struct nd_namespace_pmem *nspm_a, *nspm_b; | ||
| 2037 | |||
| 2038 | if (is_namespace_io(dev_a)) | ||
| 2039 | return 0; | ||
| 2040 | |||
| 2041 | if (is_namespace_blk(dev_a)) { | ||
| 2042 | nsblk_a = to_nd_namespace_blk(dev_a); | ||
| 2043 | nsblk_b = to_nd_namespace_blk(dev_b); | ||
| 2044 | |||
| 2045 | return memcmp(&nsblk_a->res[0]->start, &nsblk_b->res[0]->start, | ||
| 2046 | sizeof(resource_size_t)); | ||
| 2047 | } | ||
| 2048 | |||
| 2049 | nspm_a = to_nd_namespace_pmem(dev_a); | ||
| 2050 | nspm_b = to_nd_namespace_pmem(dev_b); | ||
| 2051 | |||
| 2052 | return memcmp(&nspm_a->nsio.res.start, &nspm_b->nsio.res.start, | ||
| 2053 | sizeof(resource_size_t)); | ||
| 2054 | } | ||
| 1835 | 2055 | ||
| 1836 | ndd = to_ndd(nd_mapping); | 2056 | static struct device **scan_labels(struct nd_region *nd_region) |
| 1837 | for_each_label(l, nd_label, nd_mapping->labels) { | 2057 | { |
| 1838 | u32 flags = __le32_to_cpu(nd_label->flags); | 2058 | int i, count = 0; |
| 1839 | char *name[NSLABEL_NAME_LEN]; | 2059 | struct device *dev, **devs = NULL; |
| 2060 | struct nd_label_ent *label_ent, *e; | ||
| 2061 | struct nd_mapping *nd_mapping = &nd_region->mapping[0]; | ||
| 2062 | resource_size_t map_end = nd_mapping->start + nd_mapping->size - 1; | ||
| 2063 | |||
| 2064 | /* "safe" because create_namespace_pmem() might list_move() label_ent */ | ||
| 2065 | list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) { | ||
| 2066 | struct nd_namespace_label *nd_label = label_ent->label; | ||
| 1840 | struct device **__devs; | 2067 | struct device **__devs; |
| 2068 | u32 flags; | ||
| 1841 | 2069 | ||
| 1842 | if (flags & NSLABEL_FLAG_LOCAL) | 2070 | if (!nd_label) |
| 1843 | /* pass */; | 2071 | continue; |
| 2072 | flags = __le32_to_cpu(nd_label->flags); | ||
| 2073 | if (is_nd_blk(&nd_region->dev) | ||
| 2074 | == !!(flags & NSLABEL_FLAG_LOCAL)) | ||
| 2075 | /* pass, region matches label type */; | ||
| 1844 | else | 2076 | else |
| 1845 | continue; | 2077 | continue; |
| 1846 | 2078 | ||
| 1847 | for (i = 0; i < count; i++) { | 2079 | /* skip labels that describe extents outside of the region */ |
| 1848 | nsblk = to_nd_namespace_blk(devs[i]); | 2080 | if (nd_label->dpa < nd_mapping->start || nd_label->dpa > map_end) |
| 1849 | if (memcmp(nsblk->uuid, nd_label->uuid, | 2081 | continue; |
| 1850 | NSLABEL_UUID_LEN) == 0) { | 2082 | |
| 1851 | res = nsblk_add_resource(nd_region, ndd, nsblk, | 2083 | i = add_namespace_resource(nd_region, nd_label, devs, count); |
| 1852 | __le64_to_cpu(nd_label->dpa)); | 2084 | if (i < 0) |
| 1853 | if (!res) | 2085 | goto err; |
| 1854 | goto err; | ||
| 1855 | nd_dbg_dpa(nd_region, ndd, res, "%s assign\n", | ||
| 1856 | dev_name(&nsblk->common.dev)); | ||
| 1857 | break; | ||
| 1858 | } | ||
| 1859 | } | ||
| 1860 | if (i < count) | 2086 | if (i < count) |
| 1861 | continue; | 2087 | continue; |
| 1862 | __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL); | 2088 | __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL); |
| @@ -1866,67 +2092,126 @@ static struct device **create_namespace_blk(struct nd_region *nd_region) | |||
| 1866 | kfree(devs); | 2092 | kfree(devs); |
| 1867 | devs = __devs; | 2093 | devs = __devs; |
| 1868 | 2094 | ||
| 1869 | nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL); | 2095 | if (is_nd_blk(&nd_region->dev)) { |
| 1870 | if (!nsblk) | 2096 | dev = create_namespace_blk(nd_region, nd_label, count); |
| 1871 | goto err; | 2097 | if (IS_ERR(dev)) |
| 1872 | dev = &nsblk->common.dev; | 2098 | goto err; |
| 1873 | dev->type = &namespace_blk_device_type; | 2099 | devs[count++] = dev; |
| 1874 | dev->parent = &nd_region->dev; | 2100 | } else { |
| 1875 | dev_set_name(dev, "namespace%d.%d", nd_region->id, count); | 2101 | dev = create_namespace_pmem(nd_region, nd_label); |
| 1876 | devs[count++] = dev; | 2102 | if (IS_ERR(dev)) { |
| 1877 | nsblk->id = -1; | 2103 | switch (PTR_ERR(dev)) { |
| 1878 | nsblk->lbasize = __le64_to_cpu(nd_label->lbasize); | 2104 | case -EAGAIN: |
| 1879 | nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN, | 2105 | /* skip invalid labels */ |
| 1880 | GFP_KERNEL); | 2106 | continue; |
| 1881 | if (!nsblk->uuid) | 2107 | case -ENODEV: |
| 1882 | goto err; | 2108 | /* fallthrough to seed creation */ |
| 1883 | memcpy(name, nd_label->name, NSLABEL_NAME_LEN); | 2109 | break; |
| 1884 | if (name[0]) | 2110 | default: |
| 1885 | nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN, | 2111 | goto err; |
| 1886 | GFP_KERNEL); | 2112 | } |
| 1887 | res = nsblk_add_resource(nd_region, ndd, nsblk, | 2113 | } else |
| 1888 | __le64_to_cpu(nd_label->dpa)); | 2114 | devs[count++] = dev; |
| 1889 | if (!res) | 2115 | } |
| 1890 | goto err; | ||
| 1891 | nd_dbg_dpa(nd_region, ndd, res, "%s assign\n", | ||
| 1892 | dev_name(&nsblk->common.dev)); | ||
| 1893 | } | 2116 | } |
| 1894 | 2117 | ||
| 1895 | dev_dbg(&nd_region->dev, "%s: discovered %d blk namespace%s\n", | 2118 | dev_dbg(&nd_region->dev, "%s: discovered %d %s namespace%s\n", |
| 1896 | __func__, count, count == 1 ? "" : "s"); | 2119 | __func__, count, is_nd_blk(&nd_region->dev) |
| 2120 | ? "blk" : "pmem", count == 1 ? "" : "s"); | ||
| 1897 | 2121 | ||
| 1898 | if (count == 0) { | 2122 | if (count == 0) { |
| 1899 | /* Publish a zero-sized namespace for userspace to configure. */ | 2123 | /* Publish a zero-sized namespace for userspace to configure. */ |
| 1900 | for (i = 0; i < nd_region->ndr_mappings; i++) { | 2124 | nd_mapping_free_labels(nd_mapping); |
| 1901 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; | ||
| 1902 | |||
| 1903 | kfree(nd_mapping->labels); | ||
| 1904 | nd_mapping->labels = NULL; | ||
| 1905 | } | ||
| 1906 | 2125 | ||
| 1907 | devs = kcalloc(2, sizeof(dev), GFP_KERNEL); | 2126 | devs = kcalloc(2, sizeof(dev), GFP_KERNEL); |
| 1908 | if (!devs) | 2127 | if (!devs) |
| 1909 | goto err; | 2128 | goto err; |
| 1910 | nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL); | 2129 | if (is_nd_blk(&nd_region->dev)) { |
| 1911 | if (!nsblk) | 2130 | struct nd_namespace_blk *nsblk; |
| 1912 | goto err; | 2131 | |
| 1913 | dev = &nsblk->common.dev; | 2132 | nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL); |
| 1914 | dev->type = &namespace_blk_device_type; | 2133 | if (!nsblk) |
| 2134 | goto err; | ||
| 2135 | dev = &nsblk->common.dev; | ||
| 2136 | dev->type = &namespace_blk_device_type; | ||
| 2137 | } else { | ||
| 2138 | struct nd_namespace_pmem *nspm; | ||
| 2139 | |||
| 2140 | nspm = kzalloc(sizeof(*nspm), GFP_KERNEL); | ||
| 2141 | if (!nspm) | ||
| 2142 | goto err; | ||
| 2143 | dev = &nspm->nsio.common.dev; | ||
| 2144 | dev->type = &namespace_pmem_device_type; | ||
| 2145 | nd_namespace_pmem_set_resource(nd_region, nspm, 0); | ||
| 2146 | } | ||
| 1915 | dev->parent = &nd_region->dev; | 2147 | dev->parent = &nd_region->dev; |
| 1916 | devs[count++] = dev; | 2148 | devs[count++] = dev; |
| 2149 | } else if (is_nd_pmem(&nd_region->dev)) { | ||
| 2150 | /* clean unselected labels */ | ||
| 2151 | for (i = 0; i < nd_region->ndr_mappings; i++) { | ||
| 2152 | struct list_head *l, *e; | ||
| 2153 | LIST_HEAD(list); | ||
| 2154 | int j; | ||
| 2155 | |||
| 2156 | nd_mapping = &nd_region->mapping[i]; | ||
| 2157 | if (list_empty(&nd_mapping->labels)) { | ||
| 2158 | WARN_ON(1); | ||
| 2159 | continue; | ||
| 2160 | } | ||
| 2161 | |||
| 2162 | j = count; | ||
| 2163 | list_for_each_safe(l, e, &nd_mapping->labels) { | ||
| 2164 | if (!j--) | ||
| 2165 | break; | ||
| 2166 | list_move_tail(l, &list); | ||
| 2167 | } | ||
| 2168 | nd_mapping_free_labels(nd_mapping); | ||
| 2169 | list_splice_init(&list, &nd_mapping->labels); | ||
| 2170 | } | ||
| 1917 | } | 2171 | } |
| 1918 | 2172 | ||
| 2173 | if (count > 1) | ||
| 2174 | sort(devs, count, sizeof(struct device *), cmp_dpa, NULL); | ||
| 2175 | |||
| 1919 | return devs; | 2176 | return devs; |
| 1920 | 2177 | ||
| 1921 | err: | 2178 | err: |
| 1922 | for (i = 0; i < count; i++) { | 2179 | for (i = 0; devs[i]; i++) |
| 1923 | nsblk = to_nd_namespace_blk(devs[i]); | 2180 | if (is_nd_blk(&nd_region->dev)) |
| 1924 | namespace_blk_release(&nsblk->common.dev); | 2181 | namespace_blk_release(devs[i]); |
| 1925 | } | 2182 | else |
| 2183 | namespace_pmem_release(devs[i]); | ||
| 1926 | kfree(devs); | 2184 | kfree(devs); |
| 1927 | return NULL; | 2185 | return NULL; |
| 1928 | } | 2186 | } |
| 1929 | 2187 | ||
| 2188 | static struct device **create_namespaces(struct nd_region *nd_region) | ||
| 2189 | { | ||
| 2190 | struct nd_mapping *nd_mapping = &nd_region->mapping[0]; | ||
| 2191 | struct device **devs; | ||
| 2192 | int i; | ||
| 2193 | |||
| 2194 | if (nd_region->ndr_mappings == 0) | ||
| 2195 | return NULL; | ||
| 2196 | |||
| 2197 | /* lock down all mappings while we scan labels */ | ||
| 2198 | for (i = 0; i < nd_region->ndr_mappings; i++) { | ||
| 2199 | nd_mapping = &nd_region->mapping[i]; | ||
| 2200 | mutex_lock_nested(&nd_mapping->lock, i); | ||
| 2201 | } | ||
| 2202 | |||
| 2203 | devs = scan_labels(nd_region); | ||
| 2204 | |||
| 2205 | for (i = 0; i < nd_region->ndr_mappings; i++) { | ||
| 2206 | int reverse = nd_region->ndr_mappings - 1 - i; | ||
| 2207 | |||
| 2208 | nd_mapping = &nd_region->mapping[reverse]; | ||
| 2209 | mutex_unlock(&nd_mapping->lock); | ||
| 2210 | } | ||
| 2211 | |||
| 2212 | return devs; | ||
| 2213 | } | ||
| 2214 | |||
| 1930 | static int init_active_labels(struct nd_region *nd_region) | 2215 | static int init_active_labels(struct nd_region *nd_region) |
| 1931 | { | 2216 | { |
| 1932 | int i; | 2217 | int i; |
| @@ -1935,6 +2220,7 @@ static int init_active_labels(struct nd_region *nd_region) | |||
| 1935 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; | 2220 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; |
| 1936 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); | 2221 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); |
| 1937 | struct nvdimm *nvdimm = nd_mapping->nvdimm; | 2222 | struct nvdimm *nvdimm = nd_mapping->nvdimm; |
| 2223 | struct nd_label_ent *label_ent; | ||
| 1938 | int count, j; | 2224 | int count, j; |
| 1939 | 2225 | ||
| 1940 | /* | 2226 | /* |
| @@ -1956,16 +2242,27 @@ static int init_active_labels(struct nd_region *nd_region) | |||
| 1956 | dev_dbg(ndd->dev, "%s: %d\n", __func__, count); | 2242 | dev_dbg(ndd->dev, "%s: %d\n", __func__, count); |
| 1957 | if (!count) | 2243 | if (!count) |
| 1958 | continue; | 2244 | continue; |
| 1959 | nd_mapping->labels = kcalloc(count + 1, sizeof(void *), | ||
| 1960 | GFP_KERNEL); | ||
| 1961 | if (!nd_mapping->labels) | ||
| 1962 | return -ENOMEM; | ||
| 1963 | for (j = 0; j < count; j++) { | 2245 | for (j = 0; j < count; j++) { |
| 1964 | struct nd_namespace_label *label; | 2246 | struct nd_namespace_label *label; |
| 1965 | 2247 | ||
| 2248 | label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL); | ||
| 2249 | if (!label_ent) | ||
| 2250 | break; | ||
| 1966 | label = nd_label_active(ndd, j); | 2251 | label = nd_label_active(ndd, j); |
| 1967 | nd_mapping->labels[j] = label; | 2252 | label_ent->label = label; |
| 2253 | |||
| 2254 | mutex_lock(&nd_mapping->lock); | ||
| 2255 | list_add_tail(&label_ent->list, &nd_mapping->labels); | ||
| 2256 | mutex_unlock(&nd_mapping->lock); | ||
| 1968 | } | 2257 | } |
| 2258 | |||
| 2259 | if (j >= count) | ||
| 2260 | continue; | ||
| 2261 | |||
| 2262 | mutex_lock(&nd_mapping->lock); | ||
| 2263 | nd_mapping_free_labels(nd_mapping); | ||
| 2264 | mutex_unlock(&nd_mapping->lock); | ||
| 2265 | return -ENOMEM; | ||
| 1969 | } | 2266 | } |
| 1970 | 2267 | ||
| 1971 | return 0; | 2268 | return 0; |
| @@ -1990,10 +2287,8 @@ int nd_region_register_namespaces(struct nd_region *nd_region, int *err) | |||
| 1990 | devs = create_namespace_io(nd_region); | 2287 | devs = create_namespace_io(nd_region); |
| 1991 | break; | 2288 | break; |
| 1992 | case ND_DEVICE_NAMESPACE_PMEM: | 2289 | case ND_DEVICE_NAMESPACE_PMEM: |
| 1993 | devs = create_namespace_pmem(nd_region); | ||
| 1994 | break; | ||
| 1995 | case ND_DEVICE_NAMESPACE_BLK: | 2290 | case ND_DEVICE_NAMESPACE_BLK: |
| 1996 | devs = create_namespace_blk(nd_region); | 2291 | devs = create_namespaces(nd_region); |
| 1997 | break; | 2292 | break; |
| 1998 | default: | 2293 | default: |
| 1999 | break; | 2294 | break; |
| @@ -2014,6 +2309,13 @@ int nd_region_register_namespaces(struct nd_region *nd_region, int *err) | |||
| 2014 | id = ida_simple_get(&nd_region->ns_ida, 0, 0, | 2309 | id = ida_simple_get(&nd_region->ns_ida, 0, 0, |
| 2015 | GFP_KERNEL); | 2310 | GFP_KERNEL); |
| 2016 | nsblk->id = id; | 2311 | nsblk->id = id; |
| 2312 | } else if (type == ND_DEVICE_NAMESPACE_PMEM) { | ||
| 2313 | struct nd_namespace_pmem *nspm; | ||
| 2314 | |||
| 2315 | nspm = to_nd_namespace_pmem(dev); | ||
| 2316 | id = ida_simple_get(&nd_region->ns_ida, 0, 0, | ||
| 2317 | GFP_KERNEL); | ||
| 2318 | nspm->id = id; | ||
| 2017 | } else | 2319 | } else |
| 2018 | id = i; | 2320 | id = i; |
| 2019 | 2321 | ||
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h index 38ce6bbbc170..8623e57c2ce3 100644 --- a/drivers/nvdimm/nd-core.h +++ b/drivers/nvdimm/nd-core.h | |||
| @@ -44,6 +44,23 @@ struct nvdimm { | |||
| 44 | struct resource *flush_wpq; | 44 | struct resource *flush_wpq; |
| 45 | }; | 45 | }; |
| 46 | 46 | ||
| 47 | /** | ||
| 48 | * struct blk_alloc_info - tracking info for BLK dpa scanning | ||
| 49 | * @nd_mapping: blk region mapping boundaries | ||
| 50 | * @available: decremented in alias_dpa_busy as aliased PMEM is scanned | ||
| 51 | * @busy: decremented in blk_dpa_busy to account for ranges already | ||
| 52 | * handled by alias_dpa_busy | ||
| 53 | * @res: alias_dpa_busy interprets this a free space range that needs to | ||
| 54 | * be truncated to the valid BLK allocation starting DPA, blk_dpa_busy | ||
| 55 | * treats it as a busy range that needs the aliased PMEM ranges | ||
| 56 | * truncated. | ||
| 57 | */ | ||
| 58 | struct blk_alloc_info { | ||
| 59 | struct nd_mapping *nd_mapping; | ||
| 60 | resource_size_t available, busy; | ||
| 61 | struct resource *res; | ||
| 62 | }; | ||
| 63 | |||
| 47 | bool is_nvdimm(struct device *dev); | 64 | bool is_nvdimm(struct device *dev); |
| 48 | bool is_nd_pmem(struct device *dev); | 65 | bool is_nd_pmem(struct device *dev); |
| 49 | bool is_nd_blk(struct device *dev); | 66 | bool is_nd_blk(struct device *dev); |
| @@ -54,7 +71,7 @@ void nvdimm_devs_exit(void); | |||
| 54 | void nd_region_devs_exit(void); | 71 | void nd_region_devs_exit(void); |
| 55 | void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev); | 72 | void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev); |
| 56 | struct nd_region; | 73 | struct nd_region; |
| 57 | void nd_region_create_blk_seed(struct nd_region *nd_region); | 74 | void nd_region_create_ns_seed(struct nd_region *nd_region); |
| 58 | void nd_region_create_btt_seed(struct nd_region *nd_region); | 75 | void nd_region_create_btt_seed(struct nd_region *nd_region); |
| 59 | void nd_region_create_pfn_seed(struct nd_region *nd_region); | 76 | void nd_region_create_pfn_seed(struct nd_region *nd_region); |
| 60 | void nd_region_create_dax_seed(struct nd_region *nd_region); | 77 | void nd_region_create_dax_seed(struct nd_region *nd_region); |
| @@ -73,13 +90,14 @@ bool nd_is_uuid_unique(struct device *dev, u8 *uuid); | |||
| 73 | struct nd_region; | 90 | struct nd_region; |
| 74 | struct nvdimm_drvdata; | 91 | struct nvdimm_drvdata; |
| 75 | struct nd_mapping; | 92 | struct nd_mapping; |
| 93 | void nd_mapping_free_labels(struct nd_mapping *nd_mapping); | ||
| 76 | resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region, | 94 | resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region, |
| 77 | struct nd_mapping *nd_mapping, resource_size_t *overlap); | 95 | struct nd_mapping *nd_mapping, resource_size_t *overlap); |
| 78 | resource_size_t nd_blk_available_dpa(struct nd_mapping *nd_mapping); | 96 | resource_size_t nd_blk_available_dpa(struct nd_region *nd_region); |
| 79 | resource_size_t nd_region_available_dpa(struct nd_region *nd_region); | 97 | resource_size_t nd_region_available_dpa(struct nd_region *nd_region); |
| 80 | resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd, | 98 | resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd, |
| 81 | struct nd_label_id *label_id); | 99 | struct nd_label_id *label_id); |
| 82 | struct nd_mapping; | 100 | int alias_dpa_busy(struct device *dev, void *data); |
| 83 | struct resource *nsblk_add_resource(struct nd_region *nd_region, | 101 | struct resource *nsblk_add_resource(struct nd_region *nd_region, |
| 84 | struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk, | 102 | struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk, |
| 85 | resource_size_t start); | 103 | resource_size_t start); |
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index 0b78a8211f4a..d3b2fca8deec 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h | |||
| @@ -101,9 +101,6 @@ static inline struct nd_namespace_index *to_next_namespace_index( | |||
| 101 | (unsigned long long) (res ? resource_size(res) : 0), \ | 101 | (unsigned long long) (res ? resource_size(res) : 0), \ |
| 102 | (unsigned long long) (res ? res->start : 0), ##arg) | 102 | (unsigned long long) (res ? res->start : 0), ##arg) |
| 103 | 103 | ||
| 104 | #define for_each_label(l, label, labels) \ | ||
| 105 | for (l = 0; (label = labels ? labels[l] : NULL); l++) | ||
| 106 | |||
| 107 | #define for_each_dpa_resource(ndd, res) \ | 104 | #define for_each_dpa_resource(ndd, res) \ |
| 108 | for (res = (ndd)->dpa.child; res; res = res->sibling) | 105 | for (res = (ndd)->dpa.child; res; res = res->sibling) |
| 109 | 106 | ||
| @@ -116,6 +113,31 @@ struct nd_percpu_lane { | |||
| 116 | spinlock_t lock; | 113 | spinlock_t lock; |
| 117 | }; | 114 | }; |
| 118 | 115 | ||
| 116 | struct nd_label_ent { | ||
| 117 | struct list_head list; | ||
| 118 | struct nd_namespace_label *label; | ||
| 119 | }; | ||
| 120 | |||
| 121 | enum nd_mapping_lock_class { | ||
| 122 | ND_MAPPING_CLASS0, | ||
| 123 | ND_MAPPING_UUID_SCAN, | ||
| 124 | }; | ||
| 125 | |||
| 126 | struct nd_mapping { | ||
| 127 | struct nvdimm *nvdimm; | ||
| 128 | u64 start; | ||
| 129 | u64 size; | ||
| 130 | struct list_head labels; | ||
| 131 | struct mutex lock; | ||
| 132 | /* | ||
| 133 | * @ndd is for private use at region enable / disable time for | ||
| 134 | * get_ndd() + put_ndd(), all other nd_mapping to ndd | ||
| 135 | * conversions use to_ndd() which respects enabled state of the | ||
| 136 | * nvdimm. | ||
| 137 | */ | ||
| 138 | struct nvdimm_drvdata *ndd; | ||
| 139 | }; | ||
| 140 | |||
| 119 | struct nd_region { | 141 | struct nd_region { |
| 120 | struct device dev; | 142 | struct device dev; |
| 121 | struct ida ns_ida; | 143 | struct ida ns_ida; |
| @@ -209,6 +231,7 @@ void nvdimm_exit(void); | |||
| 209 | void nd_region_exit(void); | 231 | void nd_region_exit(void); |
| 210 | struct nvdimm; | 232 | struct nvdimm; |
| 211 | struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping); | 233 | struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping); |
| 234 | int nvdimm_check_config_data(struct device *dev); | ||
| 212 | int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd); | 235 | int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd); |
| 213 | int nvdimm_init_config_data(struct nvdimm_drvdata *ndd); | 236 | int nvdimm_init_config_data(struct nvdimm_drvdata *ndd); |
| 214 | int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, | 237 | int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, |
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 571a6c7ee2fc..42b3a8217073 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c | |||
| @@ -66,13 +66,32 @@ static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset, | |||
| 66 | invalidate_pmem(pmem->virt_addr + offset, len); | 66 | invalidate_pmem(pmem->virt_addr + offset, len); |
| 67 | } | 67 | } |
| 68 | 68 | ||
| 69 | static void write_pmem(void *pmem_addr, struct page *page, | ||
| 70 | unsigned int off, unsigned int len) | ||
| 71 | { | ||
| 72 | void *mem = kmap_atomic(page); | ||
| 73 | |||
| 74 | memcpy_to_pmem(pmem_addr, mem + off, len); | ||
| 75 | kunmap_atomic(mem); | ||
| 76 | } | ||
| 77 | |||
| 78 | static int read_pmem(struct page *page, unsigned int off, | ||
| 79 | void *pmem_addr, unsigned int len) | ||
| 80 | { | ||
| 81 | int rc; | ||
| 82 | void *mem = kmap_atomic(page); | ||
| 83 | |||
| 84 | rc = memcpy_from_pmem(mem + off, pmem_addr, len); | ||
| 85 | kunmap_atomic(mem); | ||
| 86 | return rc; | ||
| 87 | } | ||
| 88 | |||
| 69 | static int pmem_do_bvec(struct pmem_device *pmem, struct page *page, | 89 | static int pmem_do_bvec(struct pmem_device *pmem, struct page *page, |
| 70 | unsigned int len, unsigned int off, bool is_write, | 90 | unsigned int len, unsigned int off, bool is_write, |
| 71 | sector_t sector) | 91 | sector_t sector) |
| 72 | { | 92 | { |
| 73 | int rc = 0; | 93 | int rc = 0; |
| 74 | bool bad_pmem = false; | 94 | bool bad_pmem = false; |
| 75 | void *mem = kmap_atomic(page); | ||
| 76 | phys_addr_t pmem_off = sector * 512 + pmem->data_offset; | 95 | phys_addr_t pmem_off = sector * 512 + pmem->data_offset; |
| 77 | void *pmem_addr = pmem->virt_addr + pmem_off; | 96 | void *pmem_addr = pmem->virt_addr + pmem_off; |
| 78 | 97 | ||
| @@ -83,7 +102,7 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page, | |||
| 83 | if (unlikely(bad_pmem)) | 102 | if (unlikely(bad_pmem)) |
| 84 | rc = -EIO; | 103 | rc = -EIO; |
| 85 | else { | 104 | else { |
| 86 | rc = memcpy_from_pmem(mem + off, pmem_addr, len); | 105 | rc = read_pmem(page, off, pmem_addr, len); |
| 87 | flush_dcache_page(page); | 106 | flush_dcache_page(page); |
| 88 | } | 107 | } |
| 89 | } else { | 108 | } else { |
| @@ -102,14 +121,13 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page, | |||
| 102 | * after clear poison. | 121 | * after clear poison. |
| 103 | */ | 122 | */ |
| 104 | flush_dcache_page(page); | 123 | flush_dcache_page(page); |
| 105 | memcpy_to_pmem(pmem_addr, mem + off, len); | 124 | write_pmem(pmem_addr, page, off, len); |
| 106 | if (unlikely(bad_pmem)) { | 125 | if (unlikely(bad_pmem)) { |
| 107 | pmem_clear_poison(pmem, pmem_off, len); | 126 | pmem_clear_poison(pmem, pmem_off, len); |
| 108 | memcpy_to_pmem(pmem_addr, mem + off, len); | 127 | write_pmem(pmem_addr, page, off, len); |
| 109 | } | 128 | } |
| 110 | } | 129 | } |
| 111 | 130 | ||
| 112 | kunmap_atomic(mem); | ||
| 113 | return rc; | 131 | return rc; |
| 114 | } | 132 | } |
| 115 | 133 | ||
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index f9d58c2b5341..6af5e629140c 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c | |||
| @@ -313,9 +313,8 @@ resource_size_t nd_region_available_dpa(struct nd_region *nd_region) | |||
| 313 | blk_max_overlap = overlap; | 313 | blk_max_overlap = overlap; |
| 314 | goto retry; | 314 | goto retry; |
| 315 | } | 315 | } |
| 316 | } else if (is_nd_blk(&nd_region->dev)) { | 316 | } else if (is_nd_blk(&nd_region->dev)) |
| 317 | available += nd_blk_available_dpa(nd_mapping); | 317 | available += nd_blk_available_dpa(nd_region); |
| 318 | } | ||
| 319 | } | 318 | } |
| 320 | 319 | ||
| 321 | return available; | 320 | return available; |
| @@ -506,6 +505,17 @@ u64 nd_region_interleave_set_cookie(struct nd_region *nd_region) | |||
| 506 | return 0; | 505 | return 0; |
| 507 | } | 506 | } |
| 508 | 507 | ||
| 508 | void nd_mapping_free_labels(struct nd_mapping *nd_mapping) | ||
| 509 | { | ||
| 510 | struct nd_label_ent *label_ent, *e; | ||
| 511 | |||
| 512 | WARN_ON(!mutex_is_locked(&nd_mapping->lock)); | ||
| 513 | list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) { | ||
| 514 | list_del(&label_ent->list); | ||
| 515 | kfree(label_ent); | ||
| 516 | } | ||
| 517 | } | ||
| 518 | |||
| 509 | /* | 519 | /* |
| 510 | * Upon successful probe/remove, take/release a reference on the | 520 | * Upon successful probe/remove, take/release a reference on the |
| 511 | * associated interleave set (if present), and plant new btt + namespace | 521 | * associated interleave set (if present), and plant new btt + namespace |
| @@ -526,8 +536,10 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus, | |||
| 526 | struct nvdimm_drvdata *ndd = nd_mapping->ndd; | 536 | struct nvdimm_drvdata *ndd = nd_mapping->ndd; |
| 527 | struct nvdimm *nvdimm = nd_mapping->nvdimm; | 537 | struct nvdimm *nvdimm = nd_mapping->nvdimm; |
| 528 | 538 | ||
| 529 | kfree(nd_mapping->labels); | 539 | mutex_lock(&nd_mapping->lock); |
| 530 | nd_mapping->labels = NULL; | 540 | nd_mapping_free_labels(nd_mapping); |
| 541 | mutex_unlock(&nd_mapping->lock); | ||
| 542 | |||
| 531 | put_ndd(ndd); | 543 | put_ndd(ndd); |
| 532 | nd_mapping->ndd = NULL; | 544 | nd_mapping->ndd = NULL; |
| 533 | if (ndd) | 545 | if (ndd) |
| @@ -537,11 +549,12 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus, | |||
| 537 | if (is_nd_pmem(dev)) | 549 | if (is_nd_pmem(dev)) |
| 538 | return; | 550 | return; |
| 539 | } | 551 | } |
| 540 | if (dev->parent && is_nd_blk(dev->parent) && probe) { | 552 | if (dev->parent && (is_nd_blk(dev->parent) || is_nd_pmem(dev->parent)) |
| 553 | && probe) { | ||
| 541 | nd_region = to_nd_region(dev->parent); | 554 | nd_region = to_nd_region(dev->parent); |
| 542 | nvdimm_bus_lock(dev); | 555 | nvdimm_bus_lock(dev); |
| 543 | if (nd_region->ns_seed == dev) | 556 | if (nd_region->ns_seed == dev) |
| 544 | nd_region_create_blk_seed(nd_region); | 557 | nd_region_create_ns_seed(nd_region); |
| 545 | nvdimm_bus_unlock(dev); | 558 | nvdimm_bus_unlock(dev); |
| 546 | } | 559 | } |
| 547 | if (is_nd_btt(dev) && probe) { | 560 | if (is_nd_btt(dev) && probe) { |
| @@ -551,23 +564,30 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus, | |||
| 551 | nvdimm_bus_lock(dev); | 564 | nvdimm_bus_lock(dev); |
| 552 | if (nd_region->btt_seed == dev) | 565 | if (nd_region->btt_seed == dev) |
| 553 | nd_region_create_btt_seed(nd_region); | 566 | nd_region_create_btt_seed(nd_region); |
| 554 | if (nd_region->ns_seed == &nd_btt->ndns->dev && | 567 | if (nd_region->ns_seed == &nd_btt->ndns->dev) |
| 555 | is_nd_blk(dev->parent)) | 568 | nd_region_create_ns_seed(nd_region); |
| 556 | nd_region_create_blk_seed(nd_region); | ||
| 557 | nvdimm_bus_unlock(dev); | 569 | nvdimm_bus_unlock(dev); |
| 558 | } | 570 | } |
| 559 | if (is_nd_pfn(dev) && probe) { | 571 | if (is_nd_pfn(dev) && probe) { |
| 572 | struct nd_pfn *nd_pfn = to_nd_pfn(dev); | ||
| 573 | |||
| 560 | nd_region = to_nd_region(dev->parent); | 574 | nd_region = to_nd_region(dev->parent); |
| 561 | nvdimm_bus_lock(dev); | 575 | nvdimm_bus_lock(dev); |
| 562 | if (nd_region->pfn_seed == dev) | 576 | if (nd_region->pfn_seed == dev) |
| 563 | nd_region_create_pfn_seed(nd_region); | 577 | nd_region_create_pfn_seed(nd_region); |
| 578 | if (nd_region->ns_seed == &nd_pfn->ndns->dev) | ||
| 579 | nd_region_create_ns_seed(nd_region); | ||
| 564 | nvdimm_bus_unlock(dev); | 580 | nvdimm_bus_unlock(dev); |
| 565 | } | 581 | } |
| 566 | if (is_nd_dax(dev) && probe) { | 582 | if (is_nd_dax(dev) && probe) { |
| 583 | struct nd_dax *nd_dax = to_nd_dax(dev); | ||
| 584 | |||
| 567 | nd_region = to_nd_region(dev->parent); | 585 | nd_region = to_nd_region(dev->parent); |
| 568 | nvdimm_bus_lock(dev); | 586 | nvdimm_bus_lock(dev); |
| 569 | if (nd_region->dax_seed == dev) | 587 | if (nd_region->dax_seed == dev) |
| 570 | nd_region_create_dax_seed(nd_region); | 588 | nd_region_create_dax_seed(nd_region); |
| 589 | if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev) | ||
| 590 | nd_region_create_ns_seed(nd_region); | ||
| 571 | nvdimm_bus_unlock(dev); | 591 | nvdimm_bus_unlock(dev); |
| 572 | } | 592 | } |
| 573 | } | 593 | } |
| @@ -774,10 +794,10 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus, | |||
| 774 | int ro = 0; | 794 | int ro = 0; |
| 775 | 795 | ||
| 776 | for (i = 0; i < ndr_desc->num_mappings; i++) { | 796 | for (i = 0; i < ndr_desc->num_mappings; i++) { |
| 777 | struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i]; | 797 | struct nd_mapping_desc *mapping = &ndr_desc->mapping[i]; |
| 778 | struct nvdimm *nvdimm = nd_mapping->nvdimm; | 798 | struct nvdimm *nvdimm = mapping->nvdimm; |
| 779 | 799 | ||
| 780 | if ((nd_mapping->start | nd_mapping->size) % SZ_4K) { | 800 | if ((mapping->start | mapping->size) % SZ_4K) { |
| 781 | dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not 4K aligned\n", | 801 | dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not 4K aligned\n", |
| 782 | caller, dev_name(&nvdimm->dev), i); | 802 | caller, dev_name(&nvdimm->dev), i); |
| 783 | 803 | ||
| @@ -828,11 +848,15 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus, | |||
| 828 | ndl->count = 0; | 848 | ndl->count = 0; |
| 829 | } | 849 | } |
| 830 | 850 | ||
| 831 | memcpy(nd_region->mapping, ndr_desc->nd_mapping, | ||
| 832 | sizeof(struct nd_mapping) * ndr_desc->num_mappings); | ||
| 833 | for (i = 0; i < ndr_desc->num_mappings; i++) { | 851 | for (i = 0; i < ndr_desc->num_mappings; i++) { |
| 834 | struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i]; | 852 | struct nd_mapping_desc *mapping = &ndr_desc->mapping[i]; |
| 835 | struct nvdimm *nvdimm = nd_mapping->nvdimm; | 853 | struct nvdimm *nvdimm = mapping->nvdimm; |
| 854 | |||
| 855 | nd_region->mapping[i].nvdimm = nvdimm; | ||
| 856 | nd_region->mapping[i].start = mapping->start; | ||
| 857 | nd_region->mapping[i].size = mapping->size; | ||
| 858 | INIT_LIST_HEAD(&nd_region->mapping[i].labels); | ||
| 859 | mutex_init(&nd_region->mapping[i].lock); | ||
| 836 | 860 | ||
| 837 | get_device(&nvdimm->dev); | 861 | get_device(&nvdimm->dev); |
| 838 | } | 862 | } |
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index b519e137b9b7..f4947fda11e7 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h | |||
| @@ -50,23 +50,6 @@ typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc, | |||
| 50 | struct nvdimm *nvdimm, unsigned int cmd, void *buf, | 50 | struct nvdimm *nvdimm, unsigned int cmd, void *buf, |
| 51 | unsigned int buf_len, int *cmd_rc); | 51 | unsigned int buf_len, int *cmd_rc); |
| 52 | 52 | ||
| 53 | struct nd_namespace_label; | ||
| 54 | struct nvdimm_drvdata; | ||
| 55 | |||
| 56 | struct nd_mapping { | ||
| 57 | struct nvdimm *nvdimm; | ||
| 58 | struct nd_namespace_label **labels; | ||
| 59 | u64 start; | ||
| 60 | u64 size; | ||
| 61 | /* | ||
| 62 | * @ndd is for private use at region enable / disable time for | ||
| 63 | * get_ndd() + put_ndd(), all other nd_mapping to ndd | ||
| 64 | * conversions use to_ndd() which respects enabled state of the | ||
| 65 | * nvdimm. | ||
| 66 | */ | ||
| 67 | struct nvdimm_drvdata *ndd; | ||
| 68 | }; | ||
| 69 | |||
| 70 | struct nvdimm_bus_descriptor { | 53 | struct nvdimm_bus_descriptor { |
| 71 | const struct attribute_group **attr_groups; | 54 | const struct attribute_group **attr_groups; |
| 72 | unsigned long cmd_mask; | 55 | unsigned long cmd_mask; |
| @@ -89,9 +72,15 @@ struct nd_interleave_set { | |||
| 89 | u64 cookie; | 72 | u64 cookie; |
| 90 | }; | 73 | }; |
| 91 | 74 | ||
| 75 | struct nd_mapping_desc { | ||
| 76 | struct nvdimm *nvdimm; | ||
| 77 | u64 start; | ||
| 78 | u64 size; | ||
| 79 | }; | ||
| 80 | |||
| 92 | struct nd_region_desc { | 81 | struct nd_region_desc { |
| 93 | struct resource *res; | 82 | struct resource *res; |
| 94 | struct nd_mapping *nd_mapping; | 83 | struct nd_mapping_desc *mapping; |
| 95 | u16 num_mappings; | 84 | u16 num_mappings; |
| 96 | const struct attribute_group **attr_groups; | 85 | const struct attribute_group **attr_groups; |
| 97 | struct nd_interleave_set *nd_set; | 86 | struct nd_interleave_set *nd_set; |
| @@ -129,6 +118,8 @@ static inline struct nd_blk_region_desc *to_blk_region_desc( | |||
| 129 | } | 118 | } |
| 130 | 119 | ||
| 131 | int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length); | 120 | int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length); |
| 121 | void nvdimm_clear_from_poison_list(struct nvdimm_bus *nvdimm_bus, | ||
| 122 | phys_addr_t start, unsigned int len); | ||
| 132 | struct nvdimm_bus *nvdimm_bus_register(struct device *parent, | 123 | struct nvdimm_bus *nvdimm_bus_register(struct device *parent, |
| 133 | struct nvdimm_bus_descriptor *nfit_desc); | 124 | struct nvdimm_bus_descriptor *nfit_desc); |
| 134 | void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus); | 125 | void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus); |
| @@ -139,6 +130,7 @@ struct nd_blk_region *to_nd_blk_region(struct device *dev); | |||
| 139 | struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus); | 130 | struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus); |
| 140 | struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus); | 131 | struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus); |
| 141 | const char *nvdimm_name(struct nvdimm *nvdimm); | 132 | const char *nvdimm_name(struct nvdimm *nvdimm); |
| 133 | struct kobject *nvdimm_kobj(struct nvdimm *nvdimm); | ||
| 142 | unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm); | 134 | unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm); |
| 143 | void *nvdimm_provider_data(struct nvdimm *nvdimm); | 135 | void *nvdimm_provider_data(struct nvdimm *nvdimm); |
| 144 | struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data, | 136 | struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data, |
diff --git a/include/linux/nd.h b/include/linux/nd.h index f1ea426d6a5e..fa66aeed441a 100644 --- a/include/linux/nd.h +++ b/include/linux/nd.h | |||
| @@ -77,11 +77,13 @@ struct nd_namespace_io { | |||
| 77 | * @nsio: device and system physical address range to drive | 77 | * @nsio: device and system physical address range to drive |
| 78 | * @alt_name: namespace name supplied in the dimm label | 78 | * @alt_name: namespace name supplied in the dimm label |
| 79 | * @uuid: namespace name supplied in the dimm label | 79 | * @uuid: namespace name supplied in the dimm label |
| 80 | * @id: ida allocated id | ||
| 80 | */ | 81 | */ |
| 81 | struct nd_namespace_pmem { | 82 | struct nd_namespace_pmem { |
| 82 | struct nd_namespace_io nsio; | 83 | struct nd_namespace_io nsio; |
| 83 | char *alt_name; | 84 | char *alt_name; |
| 84 | u8 *uuid; | 85 | u8 *uuid; |
| 86 | int id; | ||
| 85 | }; | 87 | }; |
| 86 | 88 | ||
| 87 | /** | 89 | /** |
| @@ -105,19 +107,19 @@ struct nd_namespace_blk { | |||
| 105 | struct resource **res; | 107 | struct resource **res; |
| 106 | }; | 108 | }; |
| 107 | 109 | ||
| 108 | static inline struct nd_namespace_io *to_nd_namespace_io(struct device *dev) | 110 | static inline struct nd_namespace_io *to_nd_namespace_io(const struct device *dev) |
| 109 | { | 111 | { |
| 110 | return container_of(dev, struct nd_namespace_io, common.dev); | 112 | return container_of(dev, struct nd_namespace_io, common.dev); |
| 111 | } | 113 | } |
| 112 | 114 | ||
| 113 | static inline struct nd_namespace_pmem *to_nd_namespace_pmem(struct device *dev) | 115 | static inline struct nd_namespace_pmem *to_nd_namespace_pmem(const struct device *dev) |
| 114 | { | 116 | { |
| 115 | struct nd_namespace_io *nsio = to_nd_namespace_io(dev); | 117 | struct nd_namespace_io *nsio = to_nd_namespace_io(dev); |
| 116 | 118 | ||
| 117 | return container_of(nsio, struct nd_namespace_pmem, nsio); | 119 | return container_of(nsio, struct nd_namespace_pmem, nsio); |
| 118 | } | 120 | } |
| 119 | 121 | ||
| 120 | static inline struct nd_namespace_blk *to_nd_namespace_blk(struct device *dev) | 122 | static inline struct nd_namespace_blk *to_nd_namespace_blk(const struct device *dev) |
| 121 | { | 123 | { |
| 122 | return container_of(dev, struct nd_namespace_blk, common.dev); | 124 | return container_of(dev, struct nd_namespace_blk, common.dev); |
| 123 | } | 125 | } |
diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h index ba5a8c79652a..ede5c6a62164 100644 --- a/include/uapi/linux/ndctl.h +++ b/include/uapi/linux/ndctl.h | |||
| @@ -21,14 +21,16 @@ struct nd_cmd_smart { | |||
| 21 | } __packed; | 21 | } __packed; |
| 22 | 22 | ||
| 23 | #define ND_SMART_HEALTH_VALID (1 << 0) | 23 | #define ND_SMART_HEALTH_VALID (1 << 0) |
| 24 | #define ND_SMART_TEMP_VALID (1 << 1) | 24 | #define ND_SMART_SPARES_VALID (1 << 1) |
| 25 | #define ND_SMART_SPARES_VALID (1 << 2) | 25 | #define ND_SMART_USED_VALID (1 << 2) |
| 26 | #define ND_SMART_ALARM_VALID (1 << 3) | 26 | #define ND_SMART_TEMP_VALID (1 << 3) |
| 27 | #define ND_SMART_USED_VALID (1 << 4) | 27 | #define ND_SMART_CTEMP_VALID (1 << 4) |
| 28 | #define ND_SMART_SHUTDOWN_VALID (1 << 5) | 28 | #define ND_SMART_ALARM_VALID (1 << 9) |
| 29 | #define ND_SMART_VENDOR_VALID (1 << 6) | 29 | #define ND_SMART_SHUTDOWN_VALID (1 << 10) |
| 30 | #define ND_SMART_TEMP_TRIP (1 << 0) | 30 | #define ND_SMART_VENDOR_VALID (1 << 11) |
| 31 | #define ND_SMART_SPARE_TRIP (1 << 1) | 31 | #define ND_SMART_SPARE_TRIP (1 << 0) |
| 32 | #define ND_SMART_TEMP_TRIP (1 << 1) | ||
| 33 | #define ND_SMART_CTEMP_TRIP (1 << 2) | ||
| 32 | #define ND_SMART_NON_CRITICAL_HEALTH (1 << 0) | 34 | #define ND_SMART_NON_CRITICAL_HEALTH (1 << 0) |
| 33 | #define ND_SMART_CRITICAL_HEALTH (1 << 1) | 35 | #define ND_SMART_CRITICAL_HEALTH (1 << 1) |
| 34 | #define ND_SMART_FATAL_HEALTH (1 << 2) | 36 | #define ND_SMART_FATAL_HEALTH (1 << 2) |
| @@ -37,14 +39,15 @@ struct nd_smart_payload { | |||
| 37 | __u32 flags; | 39 | __u32 flags; |
| 38 | __u8 reserved0[4]; | 40 | __u8 reserved0[4]; |
| 39 | __u8 health; | 41 | __u8 health; |
| 40 | __u16 temperature; | ||
| 41 | __u8 spares; | 42 | __u8 spares; |
| 42 | __u8 alarm_flags; | ||
| 43 | __u8 life_used; | 43 | __u8 life_used; |
| 44 | __u8 alarm_flags; | ||
| 45 | __u16 temperature; | ||
| 46 | __u16 ctrl_temperature; | ||
| 47 | __u8 reserved1[15]; | ||
| 44 | __u8 shutdown_state; | 48 | __u8 shutdown_state; |
| 45 | __u8 reserved1; | ||
| 46 | __u32 vendor_size; | 49 | __u32 vendor_size; |
| 47 | __u8 vendor_data[108]; | 50 | __u8 vendor_data[92]; |
| 48 | } __packed; | 51 | } __packed; |
| 49 | 52 | ||
| 50 | struct nd_cmd_smart_threshold { | 53 | struct nd_cmd_smart_threshold { |
| @@ -53,7 +56,8 @@ struct nd_cmd_smart_threshold { | |||
| 53 | } __packed; | 56 | } __packed; |
| 54 | 57 | ||
| 55 | struct nd_smart_threshold_payload { | 58 | struct nd_smart_threshold_payload { |
| 56 | __u16 alarm_control; | 59 | __u8 alarm_control; |
| 60 | __u8 reserved0; | ||
| 57 | __u16 temperature; | 61 | __u16 temperature; |
| 58 | __u8 spares; | 62 | __u8 spares; |
| 59 | __u8 reserved[3]; | 63 | __u8 reserved[3]; |
diff --git a/tools/testing/nvdimm/Kbuild b/tools/testing/nvdimm/Kbuild index ad6dd0543019..582db95127ed 100644 --- a/tools/testing/nvdimm/Kbuild +++ b/tools/testing/nvdimm/Kbuild | |||
| @@ -13,6 +13,7 @@ ldflags-y += --wrap=__release_region | |||
| 13 | ldflags-y += --wrap=devm_memremap_pages | 13 | ldflags-y += --wrap=devm_memremap_pages |
| 14 | ldflags-y += --wrap=insert_resource | 14 | ldflags-y += --wrap=insert_resource |
| 15 | ldflags-y += --wrap=remove_resource | 15 | ldflags-y += --wrap=remove_resource |
| 16 | ldflags-y += --wrap=acpi_evaluate_object | ||
| 16 | 17 | ||
| 17 | DRIVERS := ../../../drivers | 18 | DRIVERS := ../../../drivers |
| 18 | NVDIMM_SRC := $(DRIVERS)/nvdimm | 19 | NVDIMM_SRC := $(DRIVERS)/nvdimm |
diff --git a/tools/testing/nvdimm/test/iomap.c b/tools/testing/nvdimm/test/iomap.c index c29f8dca9e67..3ccef732fce9 100644 --- a/tools/testing/nvdimm/test/iomap.c +++ b/tools/testing/nvdimm/test/iomap.c | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
| 18 | #include <linux/types.h> | 18 | #include <linux/types.h> |
| 19 | #include <linux/pfn_t.h> | 19 | #include <linux/pfn_t.h> |
| 20 | #include <linux/acpi.h> | ||
| 20 | #include <linux/io.h> | 21 | #include <linux/io.h> |
| 21 | #include <linux/mm.h> | 22 | #include <linux/mm.h> |
| 22 | #include "nfit_test.h" | 23 | #include "nfit_test.h" |
| @@ -73,7 +74,7 @@ void __iomem *__nfit_test_ioremap(resource_size_t offset, unsigned long size, | |||
| 73 | 74 | ||
| 74 | if (nfit_res) | 75 | if (nfit_res) |
| 75 | return (void __iomem *) nfit_res->buf + offset | 76 | return (void __iomem *) nfit_res->buf + offset |
| 76 | - nfit_res->res->start; | 77 | - nfit_res->res.start; |
| 77 | return fallback_fn(offset, size); | 78 | return fallback_fn(offset, size); |
| 78 | } | 79 | } |
| 79 | 80 | ||
| @@ -84,7 +85,7 @@ void __iomem *__wrap_devm_ioremap_nocache(struct device *dev, | |||
| 84 | 85 | ||
| 85 | if (nfit_res) | 86 | if (nfit_res) |
| 86 | return (void __iomem *) nfit_res->buf + offset | 87 | return (void __iomem *) nfit_res->buf + offset |
| 87 | - nfit_res->res->start; | 88 | - nfit_res->res.start; |
| 88 | return devm_ioremap_nocache(dev, offset, size); | 89 | return devm_ioremap_nocache(dev, offset, size); |
| 89 | } | 90 | } |
| 90 | EXPORT_SYMBOL(__wrap_devm_ioremap_nocache); | 91 | EXPORT_SYMBOL(__wrap_devm_ioremap_nocache); |
| @@ -95,7 +96,7 @@ void *__wrap_devm_memremap(struct device *dev, resource_size_t offset, | |||
| 95 | struct nfit_test_resource *nfit_res = get_nfit_res(offset); | 96 | struct nfit_test_resource *nfit_res = get_nfit_res(offset); |
| 96 | 97 | ||
| 97 | if (nfit_res) | 98 | if (nfit_res) |
| 98 | return nfit_res->buf + offset - nfit_res->res->start; | 99 | return nfit_res->buf + offset - nfit_res->res.start; |
| 99 | return devm_memremap(dev, offset, size, flags); | 100 | return devm_memremap(dev, offset, size, flags); |
| 100 | } | 101 | } |
| 101 | EXPORT_SYMBOL(__wrap_devm_memremap); | 102 | EXPORT_SYMBOL(__wrap_devm_memremap); |
| @@ -107,7 +108,7 @@ void *__wrap_devm_memremap_pages(struct device *dev, struct resource *res, | |||
| 107 | struct nfit_test_resource *nfit_res = get_nfit_res(offset); | 108 | struct nfit_test_resource *nfit_res = get_nfit_res(offset); |
| 108 | 109 | ||
| 109 | if (nfit_res) | 110 | if (nfit_res) |
| 110 | return nfit_res->buf + offset - nfit_res->res->start; | 111 | return nfit_res->buf + offset - nfit_res->res.start; |
| 111 | return devm_memremap_pages(dev, res, ref, altmap); | 112 | return devm_memremap_pages(dev, res, ref, altmap); |
| 112 | } | 113 | } |
| 113 | EXPORT_SYMBOL(__wrap_devm_memremap_pages); | 114 | EXPORT_SYMBOL(__wrap_devm_memremap_pages); |
| @@ -128,7 +129,7 @@ void *__wrap_memremap(resource_size_t offset, size_t size, | |||
| 128 | struct nfit_test_resource *nfit_res = get_nfit_res(offset); | 129 | struct nfit_test_resource *nfit_res = get_nfit_res(offset); |
| 129 | 130 | ||
| 130 | if (nfit_res) | 131 | if (nfit_res) |
| 131 | return nfit_res->buf + offset - nfit_res->res->start; | 132 | return nfit_res->buf + offset - nfit_res->res.start; |
| 132 | return memremap(offset, size, flags); | 133 | return memremap(offset, size, flags); |
| 133 | } | 134 | } |
| 134 | EXPORT_SYMBOL(__wrap_memremap); | 135 | EXPORT_SYMBOL(__wrap_memremap); |
| @@ -174,6 +175,63 @@ void __wrap_memunmap(void *addr) | |||
| 174 | } | 175 | } |
| 175 | EXPORT_SYMBOL(__wrap_memunmap); | 176 | EXPORT_SYMBOL(__wrap_memunmap); |
| 176 | 177 | ||
| 178 | static bool nfit_test_release_region(struct device *dev, | ||
| 179 | struct resource *parent, resource_size_t start, | ||
| 180 | resource_size_t n); | ||
| 181 | |||
| 182 | static void nfit_devres_release(struct device *dev, void *data) | ||
| 183 | { | ||
| 184 | struct resource *res = *((struct resource **) data); | ||
| 185 | |||
| 186 | WARN_ON(!nfit_test_release_region(NULL, &iomem_resource, res->start, | ||
| 187 | resource_size(res))); | ||
| 188 | } | ||
| 189 | |||
| 190 | static int match(struct device *dev, void *__res, void *match_data) | ||
| 191 | { | ||
| 192 | struct resource *res = *((struct resource **) __res); | ||
| 193 | resource_size_t start = *((resource_size_t *) match_data); | ||
| 194 | |||
| 195 | return res->start == start; | ||
| 196 | } | ||
| 197 | |||
| 198 | static bool nfit_test_release_region(struct device *dev, | ||
| 199 | struct resource *parent, resource_size_t start, | ||
| 200 | resource_size_t n) | ||
| 201 | { | ||
| 202 | if (parent == &iomem_resource) { | ||
| 203 | struct nfit_test_resource *nfit_res = get_nfit_res(start); | ||
| 204 | |||
| 205 | if (nfit_res) { | ||
| 206 | struct nfit_test_request *req; | ||
| 207 | struct resource *res = NULL; | ||
| 208 | |||
| 209 | if (dev) { | ||
| 210 | devres_release(dev, nfit_devres_release, match, | ||
| 211 | &start); | ||
| 212 | return true; | ||
| 213 | } | ||
| 214 | |||
| 215 | spin_lock(&nfit_res->lock); | ||
| 216 | list_for_each_entry(req, &nfit_res->requests, list) | ||
| 217 | if (req->res.start == start) { | ||
| 218 | res = &req->res; | ||
| 219 | list_del(&req->list); | ||
| 220 | break; | ||
| 221 | } | ||
| 222 | spin_unlock(&nfit_res->lock); | ||
| 223 | |||
| 224 | WARN(!res || resource_size(res) != n, | ||
| 225 | "%s: start: %llx n: %llx mismatch: %pr\n", | ||
| 226 | __func__, start, n, res); | ||
| 227 | if (res) | ||
| 228 | kfree(req); | ||
| 229 | return true; | ||
| 230 | } | ||
| 231 | } | ||
| 232 | return false; | ||
| 233 | } | ||
| 234 | |||
| 177 | static struct resource *nfit_test_request_region(struct device *dev, | 235 | static struct resource *nfit_test_request_region(struct device *dev, |
| 178 | struct resource *parent, resource_size_t start, | 236 | struct resource *parent, resource_size_t start, |
| 179 | resource_size_t n, const char *name, int flags) | 237 | resource_size_t n, const char *name, int flags) |
| @@ -183,21 +241,57 @@ static struct resource *nfit_test_request_region(struct device *dev, | |||
| 183 | if (parent == &iomem_resource) { | 241 | if (parent == &iomem_resource) { |
| 184 | nfit_res = get_nfit_res(start); | 242 | nfit_res = get_nfit_res(start); |
| 185 | if (nfit_res) { | 243 | if (nfit_res) { |
| 186 | struct resource *res = nfit_res->res + 1; | 244 | struct nfit_test_request *req; |
| 245 | struct resource *res = NULL; | ||
| 187 | 246 | ||
| 188 | if (start + n > nfit_res->res->start | 247 | if (start + n > nfit_res->res.start |
| 189 | + resource_size(nfit_res->res)) { | 248 | + resource_size(&nfit_res->res)) { |
| 190 | pr_debug("%s: start: %llx n: %llx overflow: %pr\n", | 249 | pr_debug("%s: start: %llx n: %llx overflow: %pr\n", |
| 191 | __func__, start, n, | 250 | __func__, start, n, |
| 192 | nfit_res->res); | 251 | &nfit_res->res); |
| 193 | return NULL; | 252 | return NULL; |
| 194 | } | 253 | } |
| 195 | 254 | ||
| 255 | spin_lock(&nfit_res->lock); | ||
| 256 | list_for_each_entry(req, &nfit_res->requests, list) | ||
| 257 | if (start == req->res.start) { | ||
| 258 | res = &req->res; | ||
| 259 | break; | ||
| 260 | } | ||
| 261 | spin_unlock(&nfit_res->lock); | ||
| 262 | |||
| 263 | if (res) { | ||
| 264 | WARN(1, "%pr already busy\n", res); | ||
| 265 | return NULL; | ||
| 266 | } | ||
| 267 | |||
| 268 | req = kzalloc(sizeof(*req), GFP_KERNEL); | ||
| 269 | if (!req) | ||
| 270 | return NULL; | ||
| 271 | INIT_LIST_HEAD(&req->list); | ||
| 272 | res = &req->res; | ||
| 273 | |||
| 196 | res->start = start; | 274 | res->start = start; |
| 197 | res->end = start + n - 1; | 275 | res->end = start + n - 1; |
| 198 | res->name = name; | 276 | res->name = name; |
| 199 | res->flags = resource_type(parent); | 277 | res->flags = resource_type(parent); |
| 200 | res->flags |= IORESOURCE_BUSY | flags; | 278 | res->flags |= IORESOURCE_BUSY | flags; |
| 279 | spin_lock(&nfit_res->lock); | ||
| 280 | list_add(&req->list, &nfit_res->requests); | ||
| 281 | spin_unlock(&nfit_res->lock); | ||
| 282 | |||
| 283 | if (dev) { | ||
| 284 | struct resource **d; | ||
| 285 | |||
| 286 | d = devres_alloc(nfit_devres_release, | ||
| 287 | sizeof(struct resource *), | ||
| 288 | GFP_KERNEL); | ||
| 289 | if (!d) | ||
| 290 | return NULL; | ||
| 291 | *d = res; | ||
| 292 | devres_add(dev, d); | ||
| 293 | } | ||
| 294 | |||
| 201 | pr_debug("%s: %pr\n", __func__, res); | 295 | pr_debug("%s: %pr\n", __func__, res); |
| 202 | return res; | 296 | return res; |
| 203 | } | 297 | } |
| @@ -241,29 +335,10 @@ struct resource *__wrap___devm_request_region(struct device *dev, | |||
| 241 | } | 335 | } |
| 242 | EXPORT_SYMBOL(__wrap___devm_request_region); | 336 | EXPORT_SYMBOL(__wrap___devm_request_region); |
| 243 | 337 | ||
| 244 | static bool nfit_test_release_region(struct resource *parent, | ||
| 245 | resource_size_t start, resource_size_t n) | ||
| 246 | { | ||
| 247 | if (parent == &iomem_resource) { | ||
| 248 | struct nfit_test_resource *nfit_res = get_nfit_res(start); | ||
| 249 | if (nfit_res) { | ||
| 250 | struct resource *res = nfit_res->res + 1; | ||
| 251 | |||
| 252 | if (start != res->start || resource_size(res) != n) | ||
| 253 | pr_info("%s: start: %llx n: %llx mismatch: %pr\n", | ||
| 254 | __func__, start, n, res); | ||
| 255 | else | ||
| 256 | memset(res, 0, sizeof(*res)); | ||
| 257 | return true; | ||
| 258 | } | ||
| 259 | } | ||
| 260 | return false; | ||
| 261 | } | ||
| 262 | |||
| 263 | void __wrap___release_region(struct resource *parent, resource_size_t start, | 338 | void __wrap___release_region(struct resource *parent, resource_size_t start, |
| 264 | resource_size_t n) | 339 | resource_size_t n) |
| 265 | { | 340 | { |
| 266 | if (!nfit_test_release_region(parent, start, n)) | 341 | if (!nfit_test_release_region(NULL, parent, start, n)) |
| 267 | __release_region(parent, start, n); | 342 | __release_region(parent, start, n); |
| 268 | } | 343 | } |
| 269 | EXPORT_SYMBOL(__wrap___release_region); | 344 | EXPORT_SYMBOL(__wrap___release_region); |
| @@ -271,9 +346,25 @@ EXPORT_SYMBOL(__wrap___release_region); | |||
| 271 | void __wrap___devm_release_region(struct device *dev, struct resource *parent, | 346 | void __wrap___devm_release_region(struct device *dev, struct resource *parent, |
| 272 | resource_size_t start, resource_size_t n) | 347 | resource_size_t start, resource_size_t n) |
| 273 | { | 348 | { |
| 274 | if (!nfit_test_release_region(parent, start, n)) | 349 | if (!nfit_test_release_region(dev, parent, start, n)) |
| 275 | __devm_release_region(dev, parent, start, n); | 350 | __devm_release_region(dev, parent, start, n); |
| 276 | } | 351 | } |
| 277 | EXPORT_SYMBOL(__wrap___devm_release_region); | 352 | EXPORT_SYMBOL(__wrap___devm_release_region); |
| 278 | 353 | ||
| 354 | acpi_status __wrap_acpi_evaluate_object(acpi_handle handle, acpi_string path, | ||
| 355 | struct acpi_object_list *p, struct acpi_buffer *buf) | ||
| 356 | { | ||
| 357 | struct nfit_test_resource *nfit_res = get_nfit_res((long) handle); | ||
| 358 | union acpi_object **obj; | ||
| 359 | |||
| 360 | if (!nfit_res || strcmp(path, "_FIT") || !buf) | ||
| 361 | return acpi_evaluate_object(handle, path, p, buf); | ||
| 362 | |||
| 363 | obj = nfit_res->buf; | ||
| 364 | buf->length = sizeof(union acpi_object); | ||
| 365 | buf->pointer = *obj; | ||
| 366 | return AE_OK; | ||
| 367 | } | ||
| 368 | EXPORT_SYMBOL(__wrap_acpi_evaluate_object); | ||
| 369 | |||
| 279 | MODULE_LICENSE("GPL v2"); | 370 | MODULE_LICENSE("GPL v2"); |
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c index f64c57bf1d4b..c9a6458cb63e 100644 --- a/tools/testing/nvdimm/test/nfit.c +++ b/tools/testing/nvdimm/test/nfit.c | |||
| @@ -132,6 +132,8 @@ static u32 handle[NUM_DCR] = { | |||
| 132 | [4] = NFIT_DIMM_HANDLE(0, 1, 0, 0, 0), | 132 | [4] = NFIT_DIMM_HANDLE(0, 1, 0, 0, 0), |
| 133 | }; | 133 | }; |
| 134 | 134 | ||
| 135 | static unsigned long dimm_fail_cmd_flags[NUM_DCR]; | ||
| 136 | |||
| 135 | struct nfit_test { | 137 | struct nfit_test { |
| 136 | struct acpi_nfit_desc acpi_desc; | 138 | struct acpi_nfit_desc acpi_desc; |
| 137 | struct platform_device pdev; | 139 | struct platform_device pdev; |
| @@ -154,11 +156,14 @@ struct nfit_test { | |||
| 154 | int (*alloc)(struct nfit_test *t); | 156 | int (*alloc)(struct nfit_test *t); |
| 155 | void (*setup)(struct nfit_test *t); | 157 | void (*setup)(struct nfit_test *t); |
| 156 | int setup_hotplug; | 158 | int setup_hotplug; |
| 159 | union acpi_object **_fit; | ||
| 160 | dma_addr_t _fit_dma; | ||
| 157 | struct ars_state { | 161 | struct ars_state { |
| 158 | struct nd_cmd_ars_status *ars_status; | 162 | struct nd_cmd_ars_status *ars_status; |
| 159 | unsigned long deadline; | 163 | unsigned long deadline; |
| 160 | spinlock_t lock; | 164 | spinlock_t lock; |
| 161 | } ars_state; | 165 | } ars_state; |
| 166 | struct device *dimm_dev[NUM_DCR]; | ||
| 162 | }; | 167 | }; |
| 163 | 168 | ||
| 164 | static struct nfit_test *to_nfit_test(struct device *dev) | 169 | static struct nfit_test *to_nfit_test(struct device *dev) |
| @@ -411,6 +416,9 @@ static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc, | |||
| 411 | if (i >= ARRAY_SIZE(handle)) | 416 | if (i >= ARRAY_SIZE(handle)) |
| 412 | return -ENXIO; | 417 | return -ENXIO; |
| 413 | 418 | ||
| 419 | if ((1 << func) & dimm_fail_cmd_flags[i]) | ||
| 420 | return -EIO; | ||
| 421 | |||
| 414 | switch (func) { | 422 | switch (func) { |
| 415 | case ND_CMD_GET_CONFIG_SIZE: | 423 | case ND_CMD_GET_CONFIG_SIZE: |
| 416 | rc = nfit_test_cmd_get_config_size(buf, buf_len); | 424 | rc = nfit_test_cmd_get_config_size(buf, buf_len); |
| @@ -428,6 +436,9 @@ static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc, | |||
| 428 | break; | 436 | break; |
| 429 | case ND_CMD_SMART_THRESHOLD: | 437 | case ND_CMD_SMART_THRESHOLD: |
| 430 | rc = nfit_test_cmd_smart_threshold(buf, buf_len); | 438 | rc = nfit_test_cmd_smart_threshold(buf, buf_len); |
| 439 | device_lock(&t->pdev.dev); | ||
| 440 | __acpi_nvdimm_notify(t->dimm_dev[i], 0x81); | ||
| 441 | device_unlock(&t->pdev.dev); | ||
| 431 | break; | 442 | break; |
| 432 | default: | 443 | default: |
| 433 | return -ENOTTY; | 444 | return -ENOTTY; |
| @@ -467,14 +478,12 @@ static struct nfit_test *instances[NUM_NFITS]; | |||
| 467 | static void release_nfit_res(void *data) | 478 | static void release_nfit_res(void *data) |
| 468 | { | 479 | { |
| 469 | struct nfit_test_resource *nfit_res = data; | 480 | struct nfit_test_resource *nfit_res = data; |
| 470 | struct resource *res = nfit_res->res; | ||
| 471 | 481 | ||
| 472 | spin_lock(&nfit_test_lock); | 482 | spin_lock(&nfit_test_lock); |
| 473 | list_del(&nfit_res->list); | 483 | list_del(&nfit_res->list); |
| 474 | spin_unlock(&nfit_test_lock); | 484 | spin_unlock(&nfit_test_lock); |
| 475 | 485 | ||
| 476 | vfree(nfit_res->buf); | 486 | vfree(nfit_res->buf); |
| 477 | kfree(res); | ||
| 478 | kfree(nfit_res); | 487 | kfree(nfit_res); |
| 479 | } | 488 | } |
| 480 | 489 | ||
| @@ -482,12 +491,11 @@ static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma, | |||
| 482 | void *buf) | 491 | void *buf) |
| 483 | { | 492 | { |
| 484 | struct device *dev = &t->pdev.dev; | 493 | struct device *dev = &t->pdev.dev; |
| 485 | struct resource *res = kzalloc(sizeof(*res) * 2, GFP_KERNEL); | ||
| 486 | struct nfit_test_resource *nfit_res = kzalloc(sizeof(*nfit_res), | 494 | struct nfit_test_resource *nfit_res = kzalloc(sizeof(*nfit_res), |
| 487 | GFP_KERNEL); | 495 | GFP_KERNEL); |
| 488 | int rc; | 496 | int rc; |
| 489 | 497 | ||
| 490 | if (!res || !buf || !nfit_res) | 498 | if (!buf || !nfit_res) |
| 491 | goto err; | 499 | goto err; |
| 492 | rc = devm_add_action(dev, release_nfit_res, nfit_res); | 500 | rc = devm_add_action(dev, release_nfit_res, nfit_res); |
| 493 | if (rc) | 501 | if (rc) |
| @@ -496,10 +504,11 @@ static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma, | |||
| 496 | memset(buf, 0, size); | 504 | memset(buf, 0, size); |
| 497 | nfit_res->dev = dev; | 505 | nfit_res->dev = dev; |
| 498 | nfit_res->buf = buf; | 506 | nfit_res->buf = buf; |
| 499 | nfit_res->res = res; | 507 | nfit_res->res.start = *dma; |
| 500 | res->start = *dma; | 508 | nfit_res->res.end = *dma + size - 1; |
| 501 | res->end = *dma + size - 1; | 509 | nfit_res->res.name = "NFIT"; |
| 502 | res->name = "NFIT"; | 510 | spin_lock_init(&nfit_res->lock); |
| 511 | INIT_LIST_HEAD(&nfit_res->requests); | ||
| 503 | spin_lock(&nfit_test_lock); | 512 | spin_lock(&nfit_test_lock); |
| 504 | list_add(&nfit_res->list, &t->resources); | 513 | list_add(&nfit_res->list, &t->resources); |
| 505 | spin_unlock(&nfit_test_lock); | 514 | spin_unlock(&nfit_test_lock); |
| @@ -508,7 +517,6 @@ static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma, | |||
| 508 | err: | 517 | err: |
| 509 | if (buf) | 518 | if (buf) |
| 510 | vfree(buf); | 519 | vfree(buf); |
| 511 | kfree(res); | ||
| 512 | kfree(nfit_res); | 520 | kfree(nfit_res); |
| 513 | return NULL; | 521 | return NULL; |
| 514 | } | 522 | } |
| @@ -533,13 +541,13 @@ static struct nfit_test_resource *nfit_test_lookup(resource_size_t addr) | |||
| 533 | continue; | 541 | continue; |
| 534 | spin_lock(&nfit_test_lock); | 542 | spin_lock(&nfit_test_lock); |
| 535 | list_for_each_entry(n, &t->resources, list) { | 543 | list_for_each_entry(n, &t->resources, list) { |
| 536 | if (addr >= n->res->start && (addr < n->res->start | 544 | if (addr >= n->res.start && (addr < n->res.start |
| 537 | + resource_size(n->res))) { | 545 | + resource_size(&n->res))) { |
| 538 | nfit_res = n; | 546 | nfit_res = n; |
| 539 | break; | 547 | break; |
| 540 | } else if (addr >= (unsigned long) n->buf | 548 | } else if (addr >= (unsigned long) n->buf |
| 541 | && (addr < (unsigned long) n->buf | 549 | && (addr < (unsigned long) n->buf |
| 542 | + resource_size(n->res))) { | 550 | + resource_size(&n->res))) { |
| 543 | nfit_res = n; | 551 | nfit_res = n; |
| 544 | break; | 552 | break; |
| 545 | } | 553 | } |
| @@ -564,6 +572,86 @@ static int ars_state_init(struct device *dev, struct ars_state *ars_state) | |||
| 564 | return 0; | 572 | return 0; |
| 565 | } | 573 | } |
| 566 | 574 | ||
| 575 | static void put_dimms(void *data) | ||
| 576 | { | ||
| 577 | struct device **dimm_dev = data; | ||
| 578 | int i; | ||
| 579 | |||
| 580 | for (i = 0; i < NUM_DCR; i++) | ||
| 581 | if (dimm_dev[i]) | ||
| 582 | device_unregister(dimm_dev[i]); | ||
| 583 | } | ||
| 584 | |||
| 585 | static struct class *nfit_test_dimm; | ||
| 586 | |||
| 587 | static int dimm_name_to_id(struct device *dev) | ||
| 588 | { | ||
| 589 | int dimm; | ||
| 590 | |||
| 591 | if (sscanf(dev_name(dev), "test_dimm%d", &dimm) != 1 | ||
| 592 | || dimm >= NUM_DCR || dimm < 0) | ||
| 593 | return -ENXIO; | ||
| 594 | return dimm; | ||
| 595 | } | ||
| 596 | |||
| 597 | |||
| 598 | static ssize_t handle_show(struct device *dev, struct device_attribute *attr, | ||
| 599 | char *buf) | ||
| 600 | { | ||
| 601 | int dimm = dimm_name_to_id(dev); | ||
| 602 | |||
| 603 | if (dimm < 0) | ||
| 604 | return dimm; | ||
| 605 | |||
| 606 | return sprintf(buf, "%#x", handle[dimm]); | ||
| 607 | } | ||
| 608 | DEVICE_ATTR_RO(handle); | ||
| 609 | |||
| 610 | static ssize_t fail_cmd_show(struct device *dev, struct device_attribute *attr, | ||
| 611 | char *buf) | ||
| 612 | { | ||
| 613 | int dimm = dimm_name_to_id(dev); | ||
| 614 | |||
| 615 | if (dimm < 0) | ||
| 616 | return dimm; | ||
| 617 | |||
| 618 | return sprintf(buf, "%#lx\n", dimm_fail_cmd_flags[dimm]); | ||
| 619 | } | ||
| 620 | |||
| 621 | static ssize_t fail_cmd_store(struct device *dev, struct device_attribute *attr, | ||
| 622 | const char *buf, size_t size) | ||
| 623 | { | ||
| 624 | int dimm = dimm_name_to_id(dev); | ||
| 625 | unsigned long val; | ||
| 626 | ssize_t rc; | ||
| 627 | |||
| 628 | if (dimm < 0) | ||
| 629 | return dimm; | ||
| 630 | |||
| 631 | rc = kstrtol(buf, 0, &val); | ||
| 632 | if (rc) | ||
| 633 | return rc; | ||
| 634 | |||
| 635 | dimm_fail_cmd_flags[dimm] = val; | ||
| 636 | return size; | ||
| 637 | } | ||
| 638 | static DEVICE_ATTR_RW(fail_cmd); | ||
| 639 | |||
| 640 | static struct attribute *nfit_test_dimm_attributes[] = { | ||
| 641 | &dev_attr_fail_cmd.attr, | ||
| 642 | &dev_attr_handle.attr, | ||
| 643 | NULL, | ||
| 644 | }; | ||
| 645 | |||
| 646 | static struct attribute_group nfit_test_dimm_attribute_group = { | ||
| 647 | .attrs = nfit_test_dimm_attributes, | ||
| 648 | }; | ||
| 649 | |||
| 650 | static const struct attribute_group *nfit_test_dimm_attribute_groups[] = { | ||
| 651 | &nfit_test_dimm_attribute_group, | ||
| 652 | NULL, | ||
| 653 | }; | ||
| 654 | |||
| 567 | static int nfit_test0_alloc(struct nfit_test *t) | 655 | static int nfit_test0_alloc(struct nfit_test *t) |
| 568 | { | 656 | { |
| 569 | size_t nfit_size = sizeof(struct acpi_nfit_system_address) * NUM_SPA | 657 | size_t nfit_size = sizeof(struct acpi_nfit_system_address) * NUM_SPA |
| @@ -616,6 +704,21 @@ static int nfit_test0_alloc(struct nfit_test *t) | |||
| 616 | return -ENOMEM; | 704 | return -ENOMEM; |
| 617 | } | 705 | } |
| 618 | 706 | ||
| 707 | t->_fit = test_alloc(t, sizeof(union acpi_object **), &t->_fit_dma); | ||
| 708 | if (!t->_fit) | ||
| 709 | return -ENOMEM; | ||
| 710 | |||
| 711 | if (devm_add_action_or_reset(&t->pdev.dev, put_dimms, t->dimm_dev)) | ||
| 712 | return -ENOMEM; | ||
| 713 | for (i = 0; i < NUM_DCR; i++) { | ||
| 714 | t->dimm_dev[i] = device_create_with_groups(nfit_test_dimm, | ||
| 715 | &t->pdev.dev, 0, NULL, | ||
| 716 | nfit_test_dimm_attribute_groups, | ||
| 717 | "test_dimm%d", i); | ||
| 718 | if (!t->dimm_dev[i]) | ||
| 719 | return -ENOMEM; | ||
| 720 | } | ||
| 721 | |||
| 619 | return ars_state_init(&t->pdev.dev, &t->ars_state); | 722 | return ars_state_init(&t->pdev.dev, &t->ars_state); |
| 620 | } | 723 | } |
| 621 | 724 | ||
| @@ -1409,6 +1512,8 @@ static int nfit_test_probe(struct platform_device *pdev) | |||
| 1409 | struct acpi_nfit_desc *acpi_desc; | 1512 | struct acpi_nfit_desc *acpi_desc; |
| 1410 | struct device *dev = &pdev->dev; | 1513 | struct device *dev = &pdev->dev; |
| 1411 | struct nfit_test *nfit_test; | 1514 | struct nfit_test *nfit_test; |
| 1515 | struct nfit_mem *nfit_mem; | ||
| 1516 | union acpi_object *obj; | ||
| 1412 | int rc; | 1517 | int rc; |
| 1413 | 1518 | ||
| 1414 | nfit_test = to_nfit_test(&pdev->dev); | 1519 | nfit_test = to_nfit_test(&pdev->dev); |
| @@ -1476,14 +1581,30 @@ static int nfit_test_probe(struct platform_device *pdev) | |||
| 1476 | if (nfit_test->setup != nfit_test0_setup) | 1581 | if (nfit_test->setup != nfit_test0_setup) |
| 1477 | return 0; | 1582 | return 0; |
| 1478 | 1583 | ||
| 1479 | flush_work(&acpi_desc->work); | ||
| 1480 | nfit_test->setup_hotplug = 1; | 1584 | nfit_test->setup_hotplug = 1; |
| 1481 | nfit_test->setup(nfit_test); | 1585 | nfit_test->setup(nfit_test); |
| 1482 | 1586 | ||
| 1483 | rc = acpi_nfit_init(acpi_desc, nfit_test->nfit_buf, | 1587 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); |
| 1484 | nfit_test->nfit_size); | 1588 | if (!obj) |
| 1485 | if (rc) | 1589 | return -ENOMEM; |
| 1486 | return rc; | 1590 | obj->type = ACPI_TYPE_BUFFER; |
| 1591 | obj->buffer.length = nfit_test->nfit_size; | ||
| 1592 | obj->buffer.pointer = nfit_test->nfit_buf; | ||
| 1593 | *(nfit_test->_fit) = obj; | ||
| 1594 | __acpi_nfit_notify(&pdev->dev, nfit_test, 0x80); | ||
| 1595 | |||
| 1596 | /* associate dimm devices with nfit_mem data for notification testing */ | ||
| 1597 | mutex_lock(&acpi_desc->init_mutex); | ||
| 1598 | list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { | ||
| 1599 | u32 nfit_handle = __to_nfit_memdev(nfit_mem)->device_handle; | ||
| 1600 | int i; | ||
| 1601 | |||
| 1602 | for (i = 0; i < NUM_DCR; i++) | ||
| 1603 | if (nfit_handle == handle[i]) | ||
| 1604 | dev_set_drvdata(nfit_test->dimm_dev[i], | ||
| 1605 | nfit_mem); | ||
| 1606 | } | ||
| 1607 | mutex_unlock(&acpi_desc->init_mutex); | ||
| 1487 | 1608 | ||
| 1488 | return 0; | 1609 | return 0; |
| 1489 | } | 1610 | } |
| @@ -1518,6 +1639,10 @@ static __init int nfit_test_init(void) | |||
| 1518 | { | 1639 | { |
| 1519 | int rc, i; | 1640 | int rc, i; |
| 1520 | 1641 | ||
| 1642 | nfit_test_dimm = class_create(THIS_MODULE, "nfit_test_dimm"); | ||
| 1643 | if (IS_ERR(nfit_test_dimm)) | ||
| 1644 | return PTR_ERR(nfit_test_dimm); | ||
| 1645 | |||
| 1521 | nfit_test_setup(nfit_test_lookup); | 1646 | nfit_test_setup(nfit_test_lookup); |
| 1522 | 1647 | ||
| 1523 | for (i = 0; i < NUM_NFITS; i++) { | 1648 | for (i = 0; i < NUM_NFITS; i++) { |
| @@ -1584,6 +1709,7 @@ static __exit void nfit_test_exit(void) | |||
| 1584 | for (i = 0; i < NUM_NFITS; i++) | 1709 | for (i = 0; i < NUM_NFITS; i++) |
| 1585 | platform_device_unregister(&instances[i]->pdev); | 1710 | platform_device_unregister(&instances[i]->pdev); |
| 1586 | nfit_test_teardown(); | 1711 | nfit_test_teardown(); |
| 1712 | class_destroy(nfit_test_dimm); | ||
| 1587 | } | 1713 | } |
| 1588 | 1714 | ||
| 1589 | module_init(nfit_test_init); | 1715 | module_init(nfit_test_init); |
diff --git a/tools/testing/nvdimm/test/nfit_test.h b/tools/testing/nvdimm/test/nfit_test.h index 9f18e2a4a862..c281dd2e5e2d 100644 --- a/tools/testing/nvdimm/test/nfit_test.h +++ b/tools/testing/nvdimm/test/nfit_test.h | |||
| @@ -13,11 +13,21 @@ | |||
| 13 | #ifndef __NFIT_TEST_H__ | 13 | #ifndef __NFIT_TEST_H__ |
| 14 | #define __NFIT_TEST_H__ | 14 | #define __NFIT_TEST_H__ |
| 15 | #include <linux/list.h> | 15 | #include <linux/list.h> |
| 16 | #include <linux/ioport.h> | ||
| 17 | #include <linux/spinlock_types.h> | ||
| 18 | |||
| 19 | struct nfit_test_request { | ||
| 20 | struct list_head list; | ||
| 21 | struct resource res; | ||
| 22 | }; | ||
| 16 | 23 | ||
| 17 | struct nfit_test_resource { | 24 | struct nfit_test_resource { |
| 25 | struct list_head requests; | ||
| 18 | struct list_head list; | 26 | struct list_head list; |
| 19 | struct resource *res; | 27 | struct resource res; |
| 20 | struct device *dev; | 28 | struct device *dev; |
| 29 | spinlock_t lock; | ||
| 30 | int req_count; | ||
| 21 | void *buf; | 31 | void *buf; |
| 22 | }; | 32 | }; |
| 23 | 33 | ||
