aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2016-09-22 18:42:59 -0400
committerDan Williams <dan.j.williams@intel.com>2016-10-05 23:24:18 -0400
commit8a5f50d3b7f2f601c200f84827c2c9220cd69f71 (patch)
treed1fdd4cd3751b9aecd644b9ee7d9648f8a3ad71a
parentf95b4bca9e7d29db284f9b175edf8deca1489def (diff)
libnvdimm, namespace: unify blk and pmem label scanning
In preparation for allowing multiple namespace per pmem region, unify blk and pmem label scanning. Given that blk regions already support multiple namespaces, teaching that path how to do pmem namespace scanning is an incremental step towards multiple pmem namespace support. This should be functionally equivalent to the previous state in that stops after finding the first valid pmem label set. Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r--drivers/nvdimm/namespace_devs.c385
1 files changed, 207 insertions, 178 deletions
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 0e62f46755e7..fbcadc7cb8fd 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -1550,7 +1550,7 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
1550 u64 hw_start, hw_end, pmem_start, pmem_end; 1550 u64 hw_start, hw_end, pmem_start, pmem_end;
1551 struct nd_label_ent *label_ent; 1551 struct nd_label_ent *label_ent;
1552 1552
1553 mutex_lock(&nd_mapping->lock); 1553 WARN_ON(!mutex_is_locked(&nd_mapping->lock));
1554 list_for_each_entry(label_ent, &nd_mapping->labels, list) { 1554 list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1555 nd_label = label_ent->label; 1555 nd_label = label_ent->label;
1556 if (!nd_label) 1556 if (!nd_label)
@@ -1559,7 +1559,6 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
1559 break; 1559 break;
1560 nd_label = NULL; 1560 nd_label = NULL;
1561 } 1561 }
1562 mutex_unlock(&nd_mapping->lock);
1563 1562
1564 if (!nd_label) { 1563 if (!nd_label) {
1565 WARN_ON(1); 1564 WARN_ON(1);
@@ -1579,88 +1578,65 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
1579 else 1578 else
1580 return -EINVAL; 1579 return -EINVAL;
1581 1580
1582 mutex_lock(&nd_mapping->lock); 1581 /* move recently validated label to the front of the list */
1583 label_ent = list_first_entry(&nd_mapping->labels, 1582 list_move(&label_ent->list, &nd_mapping->labels);
1584 typeof(*label_ent), list);
1585 label_ent->label = nd_label;
1586 list_del(&label_ent->list);
1587 nd_mapping_free_labels(nd_mapping);
1588 list_add(&label_ent->list, &nd_mapping->labels);
1589 mutex_unlock(&nd_mapping->lock);
1590 } 1583 }
1591 return 0; 1584 return 0;
1592} 1585}
1593 1586
1594/** 1587/**
1595 * find_pmem_label_set - validate interleave set labelling, retrieve label0 1588 * create_namespace_pmem - validate interleave set labelling, retrieve label0
1596 * @nd_region: region with mappings to validate 1589 * @nd_region: region with mappings to validate
1590 * @nspm: target namespace to create
1591 * @nd_label: target pmem namespace label to evaluate
1597 */ 1592 */
1598static int find_pmem_label_set(struct nd_region *nd_region, 1593struct device *create_namespace_pmem(struct nd_region *nd_region,
1599 struct nd_namespace_pmem *nspm) 1594 struct nd_namespace_label *nd_label)
1600{ 1595{
1601 u64 cookie = nd_region_interleave_set_cookie(nd_region); 1596 u64 cookie = nd_region_interleave_set_cookie(nd_region);
1602 u8 select_id[NSLABEL_UUID_LEN];
1603 struct nd_label_ent *label_ent; 1597 struct nd_label_ent *label_ent;
1598 struct nd_namespace_pmem *nspm;
1604 struct nd_mapping *nd_mapping; 1599 struct nd_mapping *nd_mapping;
1605 resource_size_t size = 0; 1600 resource_size_t size = 0;
1606 u8 *pmem_id = NULL; 1601 struct resource *res;
1602 struct device *dev;
1607 int rc = 0; 1603 int rc = 0;
1608 u16 i; 1604 u16 i;
1609 1605
1610 if (cookie == 0) { 1606 if (cookie == 0) {
1611 dev_dbg(&nd_region->dev, "invalid interleave-set-cookie\n"); 1607 dev_dbg(&nd_region->dev, "invalid interleave-set-cookie\n");
1612 return -ENXIO; 1608 return ERR_PTR(-ENXIO);
1613 } 1609 }
1614 1610
1615 /* 1611 if (__le64_to_cpu(nd_label->isetcookie) != cookie) {
1616 * Find a complete set of labels by uuid. By definition we can start 1612 dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb\n",
1617 * with any mapping as the reference label 1613 nd_label->uuid);
1618 */ 1614 return ERR_PTR(-EAGAIN);
1619 for (i = 0; i < nd_region->ndr_mappings; i++) {
1620 nd_mapping = &nd_region->mapping[i];
1621 mutex_lock_nested(&nd_mapping->lock, i);
1622 } 1615 }
1623 list_for_each_entry(label_ent, &nd_region->mapping[0].labels, list) {
1624 struct nd_namespace_label *nd_label = label_ent->label;
1625 1616
1626 if (!nd_label) 1617 nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
1627 continue; 1618 if (!nspm)
1628 if (__le64_to_cpu(nd_label->isetcookie) != cookie) 1619 return ERR_PTR(-ENOMEM);
1629 continue;
1630
1631 for (i = 0; i < nd_region->ndr_mappings; i++)
1632 if (!has_uuid_at_pos(nd_region, nd_label->uuid,
1633 cookie, i))
1634 break;
1635 if (i < nd_region->ndr_mappings) {
1636 /*
1637 * Give up if we don't find an instance of a
1638 * uuid at each position (from 0 to
1639 * nd_region->ndr_mappings - 1), or if we find a
1640 * dimm with two instances of the same uuid.
1641 */
1642 rc = -EINVAL;
1643 break;
1644 } else if (pmem_id) {
1645 /*
1646 * If there is more than one valid uuid set, we
1647 * need userspace to clean this up.
1648 */
1649 rc = -EBUSY;
1650 break;
1651 }
1652 memcpy(select_id, nd_label->uuid, NSLABEL_UUID_LEN);
1653 pmem_id = select_id;
1654 }
1655 for (i = 0; i < nd_region->ndr_mappings; i++) {
1656 int reverse = nd_region->ndr_mappings - 1 - i;
1657 1620
1658 nd_mapping = &nd_region->mapping[reverse]; 1621 dev = &nspm->nsio.common.dev;
1659 mutex_unlock(&nd_mapping->lock); 1622 dev->type = &namespace_pmem_device_type;
1660 } 1623 dev->parent = &nd_region->dev;
1624 res = &nspm->nsio.res;
1625 res->name = dev_name(&nd_region->dev);
1626 res->flags = IORESOURCE_MEM;
1661 1627
1662 if (rc) 1628 for (i = 0; i < nd_region->ndr_mappings; i++)
1629 if (!has_uuid_at_pos(nd_region, nd_label->uuid, cookie, i))
1630 break;
1631 if (i < nd_region->ndr_mappings) {
1632 /*
1633 * Give up if we don't find an instance of a uuid at each
1634 * position (from 0 to nd_region->ndr_mappings - 1), or if we
1635 * find a dimm with two instances of the same uuid.
1636 */
1637 rc = -EINVAL;
1663 goto err; 1638 goto err;
1639 }
1664 1640
1665 /* 1641 /*
1666 * Fix up each mapping's 'labels' to have the validated pmem label for 1642 * Fix up each mapping's 'labels' to have the validated pmem label for
@@ -1670,7 +1646,7 @@ static int find_pmem_label_set(struct nd_region *nd_region,
1670 * the dimm being enabled (i.e. nd_label_reserve_dpa() 1646 * the dimm being enabled (i.e. nd_label_reserve_dpa()
1671 * succeeded). 1647 * succeeded).
1672 */ 1648 */
1673 rc = select_pmem_id(nd_region, pmem_id); 1649 rc = select_pmem_id(nd_region, nd_label->uuid);
1674 if (rc) 1650 if (rc)
1675 goto err; 1651 goto err;
1676 1652
@@ -1679,11 +1655,9 @@ static int find_pmem_label_set(struct nd_region *nd_region,
1679 struct nd_namespace_label *label0; 1655 struct nd_namespace_label *label0;
1680 1656
1681 nd_mapping = &nd_region->mapping[i]; 1657 nd_mapping = &nd_region->mapping[i];
1682 mutex_lock(&nd_mapping->lock);
1683 label_ent = list_first_entry_or_null(&nd_mapping->labels, 1658 label_ent = list_first_entry_or_null(&nd_mapping->labels,
1684 typeof(*label_ent), list); 1659 typeof(*label_ent), list);
1685 label0 = label_ent ? label_ent->label : 0; 1660 label0 = label_ent ? label_ent->label : 0;
1686 mutex_unlock(&nd_mapping->lock);
1687 1661
1688 if (!label0) { 1662 if (!label0) {
1689 WARN_ON(1); 1663 WARN_ON(1);
@@ -1707,8 +1681,9 @@ static int find_pmem_label_set(struct nd_region *nd_region,
1707 1681
1708 nd_namespace_pmem_set_size(nd_region, nspm, size); 1682 nd_namespace_pmem_set_size(nd_region, nspm, size);
1709 1683
1710 return 0; 1684 return dev;
1711 err: 1685 err:
1686 namespace_pmem_release(dev);
1712 switch (rc) { 1687 switch (rc) {
1713 case -EINVAL: 1688 case -EINVAL:
1714 dev_dbg(&nd_region->dev, "%s: invalid label(s)\n", __func__); 1689 dev_dbg(&nd_region->dev, "%s: invalid label(s)\n", __func__);
@@ -1721,56 +1696,7 @@ static int find_pmem_label_set(struct nd_region *nd_region,
1721 __func__, rc); 1696 __func__, rc);
1722 break; 1697 break;
1723 } 1698 }
1724 return rc; 1699 return ERR_PTR(rc);
1725}
1726
1727static struct device **create_namespace_pmem(struct nd_region *nd_region)
1728{
1729 struct nd_namespace_pmem *nspm;
1730 struct device *dev, **devs;
1731 struct resource *res;
1732 int rc;
1733
1734 nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
1735 if (!nspm)
1736 return NULL;
1737
1738 dev = &nspm->nsio.common.dev;
1739 dev->type = &namespace_pmem_device_type;
1740 dev->parent = &nd_region->dev;
1741 res = &nspm->nsio.res;
1742 res->name = dev_name(&nd_region->dev);
1743 res->flags = IORESOURCE_MEM;
1744 rc = find_pmem_label_set(nd_region, nspm);
1745 if (rc == -ENODEV) {
1746 int i;
1747
1748 /* Pass, try to permit namespace creation... */
1749 for (i = 0; i < nd_region->ndr_mappings; i++) {
1750 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1751
1752 mutex_lock(&nd_mapping->lock);
1753 nd_mapping_free_labels(nd_mapping);
1754 mutex_unlock(&nd_mapping->lock);
1755 }
1756
1757 /* Publish a zero-sized namespace for userspace to configure. */
1758 nd_namespace_pmem_set_size(nd_region, nspm, 0);
1759
1760 rc = 0;
1761 } else if (rc)
1762 goto err;
1763
1764 devs = kcalloc(2, sizeof(struct device *), GFP_KERNEL);
1765 if (!devs)
1766 goto err;
1767
1768 devs[0] = dev;
1769 return devs;
1770
1771 err:
1772 namespace_pmem_release(&nspm->nsio.common.dev);
1773 return NULL;
1774} 1700}
1775 1701
1776struct resource *nsblk_add_resource(struct nd_region *nd_region, 1702struct resource *nsblk_add_resource(struct nd_region *nd_region,
@@ -1872,43 +1798,107 @@ void nd_region_create_btt_seed(struct nd_region *nd_region)
1872 dev_err(&nd_region->dev, "failed to create btt namespace\n"); 1798 dev_err(&nd_region->dev, "failed to create btt namespace\n");
1873} 1799}
1874 1800
1875static struct device **scan_labels(struct nd_region *nd_region, 1801static int add_namespace_resource(struct nd_region *nd_region,
1876 struct nd_mapping *nd_mapping) 1802 struct nd_namespace_label *nd_label, struct device **devs,
1803 int count)
1877{ 1804{
1805 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
1806 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1807 int i;
1808
1809 for (i = 0; i < count; i++) {
1810 u8 *uuid = namespace_to_uuid(devs[i]);
1811 struct resource *res;
1812
1813 if (IS_ERR_OR_NULL(uuid)) {
1814 WARN_ON(1);
1815 continue;
1816 }
1817
1818 if (memcmp(uuid, nd_label->uuid, NSLABEL_UUID_LEN) != 0)
1819 continue;
1820 if (is_namespace_blk(devs[i])) {
1821 res = nsblk_add_resource(nd_region, ndd,
1822 to_nd_namespace_blk(devs[i]),
1823 __le64_to_cpu(nd_label->dpa));
1824 if (!res)
1825 return -ENXIO;
1826 nd_dbg_dpa(nd_region, ndd, res, "%d assign\n", count);
1827 } else {
1828 dev_err(&nd_region->dev,
1829 "error: conflicting extents for uuid: %pUb\n",
1830 nd_label->uuid);
1831 return -ENXIO;
1832 }
1833 break;
1834 }
1835
1836 return i;
1837}
1838
1839struct device *create_namespace_blk(struct nd_region *nd_region,
1840 struct nd_namespace_label *nd_label, int count)
1841{
1842
1843 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
1878 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 1844 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1879 struct device *dev, **devs = NULL;
1880 struct nd_namespace_blk *nsblk; 1845 struct nd_namespace_blk *nsblk;
1881 struct nd_label_ent *label_ent; 1846 char *name[NSLABEL_NAME_LEN];
1847 struct device *dev = NULL;
1848 struct resource *res;
1849
1850 nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
1851 if (!nsblk)
1852 return ERR_PTR(-ENOMEM);
1853 dev = &nsblk->common.dev;
1854 dev->type = &namespace_blk_device_type;
1855 dev->parent = &nd_region->dev;
1856 nsblk->id = -1;
1857 nsblk->lbasize = __le64_to_cpu(nd_label->lbasize);
1858 nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN,
1859 GFP_KERNEL);
1860 if (!nsblk->uuid)
1861 goto blk_err;
1862 memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
1863 if (name[0])
1864 nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN,
1865 GFP_KERNEL);
1866 res = nsblk_add_resource(nd_region, ndd, nsblk,
1867 __le64_to_cpu(nd_label->dpa));
1868 if (!res)
1869 goto blk_err;
1870 nd_dbg_dpa(nd_region, ndd, res, "%d: assign\n", count);
1871 return dev;
1872 blk_err:
1873 namespace_blk_release(dev);
1874 return ERR_PTR(-ENXIO);
1875}
1876
1877static struct device **scan_labels(struct nd_region *nd_region)
1878{
1879 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
1880 struct device *dev, **devs = NULL;
1881 struct nd_label_ent *label_ent, *e;
1882 int i, count = 0; 1882 int i, count = 0;
1883 1883
1884 list_for_each_entry(label_ent, &nd_mapping->labels, list) { 1884 /* "safe" because create_namespace_pmem() might list_move() label_ent */
1885 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
1885 struct nd_namespace_label *nd_label = label_ent->label; 1886 struct nd_namespace_label *nd_label = label_ent->label;
1886 char *name[NSLABEL_NAME_LEN];
1887 struct device **__devs; 1887 struct device **__devs;
1888 struct resource *res;
1889 u32 flags; 1888 u32 flags;
1890 1889
1891 if (!nd_label) 1890 if (!nd_label)
1892 continue; 1891 continue;
1893 flags = __le32_to_cpu(nd_label->flags); 1892 flags = __le32_to_cpu(nd_label->flags);
1894 if (flags & NSLABEL_FLAG_LOCAL) 1893 if (is_nd_blk(&nd_region->dev)
1895 /* pass */; 1894 == !!(flags & NSLABEL_FLAG_LOCAL))
1895 /* pass, region matches label type */;
1896 else 1896 else
1897 continue; 1897 continue;
1898 1898
1899 for (i = 0; i < count; i++) { 1899 i = add_namespace_resource(nd_region, nd_label, devs, count);
1900 nsblk = to_nd_namespace_blk(devs[i]); 1900 if (i < 0)
1901 if (memcmp(nsblk->uuid, nd_label->uuid, 1901 goto err;
1902 NSLABEL_UUID_LEN) == 0) {
1903 res = nsblk_add_resource(nd_region, ndd, nsblk,
1904 __le64_to_cpu(nd_label->dpa));
1905 if (!res)
1906 goto err;
1907 nd_dbg_dpa(nd_region, ndd, res, "%s assign\n",
1908 dev_name(&nsblk->common.dev));
1909 break;
1910 }
1911 }
1912 if (i < count) 1902 if (i < count)
1913 continue; 1903 continue;
1914 __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL); 1904 __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL);
@@ -1918,34 +1908,35 @@ static struct device **scan_labels(struct nd_region *nd_region,
1918 kfree(devs); 1908 kfree(devs);
1919 devs = __devs; 1909 devs = __devs;
1920 1910
1921 nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL); 1911 if (is_nd_blk(&nd_region->dev)) {
1922 if (!nsblk) 1912 dev = create_namespace_blk(nd_region, nd_label, count);
1923 goto err; 1913 if (IS_ERR(dev))
1924 dev = &nsblk->common.dev; 1914 goto err;
1925 dev->type = &namespace_blk_device_type; 1915 devs[count++] = dev;
1926 dev->parent = &nd_region->dev; 1916 } else {
1927 dev_set_name(dev, "namespace%d.%d", nd_region->id, count); 1917 dev = create_namespace_pmem(nd_region, nd_label);
1928 devs[count++] = dev; 1918 if (IS_ERR(dev)) {
1929 nsblk->id = -1; 1919 switch (PTR_ERR(dev)) {
1930 nsblk->lbasize = __le64_to_cpu(nd_label->lbasize); 1920 case -EAGAIN:
1931 nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN, 1921 /* skip invalid labels */
1932 GFP_KERNEL); 1922 continue;
1933 if (!nsblk->uuid) 1923 case -ENODEV:
1934 goto err; 1924 /* fallthrough to seed creation */
1935 memcpy(name, nd_label->name, NSLABEL_NAME_LEN); 1925 break;
1936 if (name[0]) 1926 default:
1937 nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN, 1927 goto err;
1938 GFP_KERNEL); 1928 }
1939 res = nsblk_add_resource(nd_region, ndd, nsblk, 1929 } else
1940 __le64_to_cpu(nd_label->dpa)); 1930 devs[count++] = dev;
1941 if (!res) 1931
1942 goto err; 1932 /* we only expect one valid pmem label set per region */
1943 nd_dbg_dpa(nd_region, ndd, res, "%s assign\n", 1933 break;
1944 dev_name(&nsblk->common.dev)); 1934 }
1945 } 1935 }
1946 1936
1947 dev_dbg(&nd_region->dev, "%s: discovered %d blk namespace%s\n", 1937 dev_dbg(&nd_region->dev, "%s: discovered %d %s namespace%s\n",
1948 __func__, count, count == 1 ? "" : "s"); 1938 __func__, count, is_nd_blk(&nd_region->dev)
1939 ? "blk" : "pmem", count == 1 ? "" : "s");
1949 1940
1950 if (count == 0) { 1941 if (count == 0) {
1951 /* Publish a zero-sized namespace for userspace to configure. */ 1942 /* Publish a zero-sized namespace for userspace to configure. */
@@ -1954,37 +1945,77 @@ static struct device **scan_labels(struct nd_region *nd_region,
1954 devs = kcalloc(2, sizeof(dev), GFP_KERNEL); 1945 devs = kcalloc(2, sizeof(dev), GFP_KERNEL);
1955 if (!devs) 1946 if (!devs)
1956 goto err; 1947 goto err;
1957 nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL); 1948 if (is_nd_blk(&nd_region->dev)) {
1958 if (!nsblk) 1949 struct nd_namespace_blk *nsblk;
1959 goto err; 1950
1960 dev = &nsblk->common.dev; 1951 nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
1961 dev->type = &namespace_blk_device_type; 1952 if (!nsblk)
1953 goto err;
1954 dev = &nsblk->common.dev;
1955 dev->type = &namespace_blk_device_type;
1956 } else {
1957 struct nd_namespace_pmem *nspm;
1958
1959 nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
1960 if (!nspm)
1961 goto err;
1962 dev = &nspm->nsio.common.dev;
1963 dev->type = &namespace_pmem_device_type;
1964 nd_namespace_pmem_set_size(nd_region, nspm, 0);
1965 }
1962 dev->parent = &nd_region->dev; 1966 dev->parent = &nd_region->dev;
1963 devs[count++] = dev; 1967 devs[count++] = dev;
1968 } else if (is_nd_pmem(&nd_region->dev)) {
1969 /* clean unselected labels */
1970 for (i = 0; i < nd_region->ndr_mappings; i++) {
1971 nd_mapping = &nd_region->mapping[i];
1972 if (list_empty(&nd_mapping->labels)) {
1973 WARN_ON(1);
1974 continue;
1975 }
1976 label_ent = list_first_entry(&nd_mapping->labels,
1977 typeof(*label_ent), list);
1978 list_del(&label_ent->list);
1979 nd_mapping_free_labels(nd_mapping);
1980 list_add(&label_ent->list, &nd_mapping->labels);
1981 }
1964 } 1982 }
1965 1983
1966 return devs; 1984 return devs;
1967 1985
1968 err: 1986 err:
1969 for (i = 0; devs[i]; i++) { 1987 for (i = 0; devs[i]; i++)
1970 nsblk = to_nd_namespace_blk(devs[i]); 1988 if (is_nd_blk(&nd_region->dev))
1971 namespace_blk_release(&nsblk->common.dev); 1989 namespace_blk_release(devs[i]);
1972 } 1990 else
1991 namespace_pmem_release(devs[i]);
1973 kfree(devs); 1992 kfree(devs);
1974 return NULL; 1993 return NULL;
1975} 1994}
1976 1995
1977static struct device **create_namespace_blk(struct nd_region *nd_region) 1996static struct device **create_namespaces(struct nd_region *nd_region)
1978{ 1997{
1979 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; 1998 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
1980 struct device **devs; 1999 struct device **devs;
2000 int i;
1981 2001
1982 if (nd_region->ndr_mappings == 0) 2002 if (nd_region->ndr_mappings == 0)
1983 return NULL; 2003 return NULL;
1984 2004
1985 mutex_lock(&nd_mapping->lock); 2005 /* lock down all mappings while we scan labels */
1986 devs = scan_labels(nd_region, nd_mapping); 2006 for (i = 0; i < nd_region->ndr_mappings; i++) {
1987 mutex_unlock(&nd_mapping->lock); 2007 nd_mapping = &nd_region->mapping[i];
2008 mutex_lock_nested(&nd_mapping->lock, i);
2009 }
2010
2011 devs = scan_labels(nd_region);
2012
2013 for (i = 0; i < nd_region->ndr_mappings; i++) {
2014 int reverse = nd_region->ndr_mappings - 1 - i;
2015
2016 nd_mapping = &nd_region->mapping[reverse];
2017 mutex_unlock(&nd_mapping->lock);
2018 }
1988 2019
1989 return devs; 2020 return devs;
1990} 2021}
@@ -2064,10 +2095,8 @@ int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
2064 devs = create_namespace_io(nd_region); 2095 devs = create_namespace_io(nd_region);
2065 break; 2096 break;
2066 case ND_DEVICE_NAMESPACE_PMEM: 2097 case ND_DEVICE_NAMESPACE_PMEM:
2067 devs = create_namespace_pmem(nd_region);
2068 break;
2069 case ND_DEVICE_NAMESPACE_BLK: 2098 case ND_DEVICE_NAMESPACE_BLK:
2070 devs = create_namespace_blk(nd_region); 2099 devs = create_namespaces(nd_region);
2071 break; 2100 break;
2072 default: 2101 default:
2073 break; 2102 break;