aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHannes Reinecke <hare@suse.de>2013-12-17 03:18:50 -0500
committerNicholas Bellinger <nab@linux-iscsi.org>2014-01-10 00:48:35 -0500
commit229d4f112fd6d1562b6d5324c4cb8f8d097bac54 (patch)
tree60319c1326b6a7b5e77ea34d85e8cf53f10cadce
parentc66094bf325ee406b92298d73089ee25484a0263 (diff)
target_core_alua: Referrals configfs integration
Referrals need an LBA map, which needs to be kept consistent across all target port groups. So instead of tying the map to the target port groups I've implemented a single attribute containing the entire map. Signed-off-by: Hannes Reinecke <hare@suse.de> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
-rw-r--r--drivers/target/target_core_alua.c101
-rw-r--r--drivers/target/target_core_alua.h8
-rw-r--r--drivers/target/target_core_configfs.c171
-rw-r--r--drivers/target/target_core_device.c1
-rw-r--r--drivers/target/target_core_transport.c28
5 files changed, 308 insertions, 1 deletions
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 0843c8f4b94e..e73edcad7930 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -1351,6 +1351,107 @@ static int core_alua_set_tg_pt_secondary_state(
1351 return 0; 1351 return 0;
1352} 1352}
1353 1353
1354struct t10_alua_lba_map *
1355core_alua_allocate_lba_map(struct list_head *list,
1356 u64 first_lba, u64 last_lba)
1357{
1358 struct t10_alua_lba_map *lba_map;
1359
1360 lba_map = kmem_cache_zalloc(t10_alua_lba_map_cache, GFP_KERNEL);
1361 if (!lba_map) {
1362 pr_err("Unable to allocate struct t10_alua_lba_map\n");
1363 return ERR_PTR(-ENOMEM);
1364 }
1365 INIT_LIST_HEAD(&lba_map->lba_map_mem_list);
1366 lba_map->lba_map_first_lba = first_lba;
1367 lba_map->lba_map_last_lba = last_lba;
1368
1369 list_add_tail(&lba_map->lba_map_list, list);
1370 return lba_map;
1371}
1372
1373int
1374core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map,
1375 int pg_id, int state)
1376{
1377 struct t10_alua_lba_map_member *lba_map_mem;
1378
1379 list_for_each_entry(lba_map_mem, &lba_map->lba_map_mem_list,
1380 lba_map_mem_list) {
1381 if (lba_map_mem->lba_map_mem_alua_pg_id == pg_id) {
1382 pr_err("Duplicate pg_id %d in lba_map\n", pg_id);
1383 return -EINVAL;
1384 }
1385 }
1386
1387 lba_map_mem = kmem_cache_zalloc(t10_alua_lba_map_mem_cache, GFP_KERNEL);
1388 if (!lba_map_mem) {
1389 pr_err("Unable to allocate struct t10_alua_lba_map_mem\n");
1390 return -ENOMEM;
1391 }
1392 lba_map_mem->lba_map_mem_alua_state = state;
1393 lba_map_mem->lba_map_mem_alua_pg_id = pg_id;
1394
1395 list_add_tail(&lba_map_mem->lba_map_mem_list,
1396 &lba_map->lba_map_mem_list);
1397 return 0;
1398}
1399
1400void
1401core_alua_free_lba_map(struct list_head *lba_list)
1402{
1403 struct t10_alua_lba_map *lba_map, *lba_map_tmp;
1404 struct t10_alua_lba_map_member *lba_map_mem, *lba_map_mem_tmp;
1405
1406 list_for_each_entry_safe(lba_map, lba_map_tmp, lba_list,
1407 lba_map_list) {
1408 list_for_each_entry_safe(lba_map_mem, lba_map_mem_tmp,
1409 &lba_map->lba_map_mem_list,
1410 lba_map_mem_list) {
1411 list_del(&lba_map_mem->lba_map_mem_list);
1412 kmem_cache_free(t10_alua_lba_map_mem_cache,
1413 lba_map_mem);
1414 }
1415 list_del(&lba_map->lba_map_list);
1416 kmem_cache_free(t10_alua_lba_map_cache, lba_map);
1417 }
1418}
1419
1420void
1421core_alua_set_lba_map(struct se_device *dev, struct list_head *lba_map_list,
1422 int segment_size, int segment_mult)
1423{
1424 struct list_head old_lba_map_list;
1425 struct t10_alua_tg_pt_gp *tg_pt_gp;
1426 int activate = 0, supported;
1427
1428 INIT_LIST_HEAD(&old_lba_map_list);
1429 spin_lock(&dev->t10_alua.lba_map_lock);
1430 dev->t10_alua.lba_map_segment_size = segment_size;
1431 dev->t10_alua.lba_map_segment_multiplier = segment_mult;
1432 list_splice_init(&dev->t10_alua.lba_map_list, &old_lba_map_list);
1433 if (lba_map_list) {
1434 list_splice_init(lba_map_list, &dev->t10_alua.lba_map_list);
1435 activate = 1;
1436 }
1437 spin_unlock(&dev->t10_alua.lba_map_lock);
1438 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1439 list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
1440 tg_pt_gp_list) {
1441
1442 if (!tg_pt_gp->tg_pt_gp_valid_id)
1443 continue;
1444 supported = tg_pt_gp->tg_pt_gp_alua_supported_states;
1445 if (activate)
1446 supported |= ALUA_LBD_SUP;
1447 else
1448 supported &= ~ALUA_LBD_SUP;
1449 tg_pt_gp->tg_pt_gp_alua_supported_states = supported;
1450 }
1451 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1452 core_alua_free_lba_map(&old_lba_map_list);
1453}
1454
1354struct t10_alua_lu_gp * 1455struct t10_alua_lu_gp *
1355core_alua_allocate_lu_gp(const char *name, int def_group) 1456core_alua_allocate_lu_gp(const char *name, int def_group)
1356{ 1457{
diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h
index 47950cdc6f8b..0a7d65e80404 100644
--- a/drivers/target/target_core_alua.h
+++ b/drivers/target/target_core_alua.h
@@ -86,6 +86,8 @@ extern struct kmem_cache *t10_alua_lu_gp_cache;
86extern struct kmem_cache *t10_alua_lu_gp_mem_cache; 86extern struct kmem_cache *t10_alua_lu_gp_mem_cache;
87extern struct kmem_cache *t10_alua_tg_pt_gp_cache; 87extern struct kmem_cache *t10_alua_tg_pt_gp_cache;
88extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; 88extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
89extern struct kmem_cache *t10_alua_lba_map_cache;
90extern struct kmem_cache *t10_alua_lba_map_mem_cache;
89 91
90extern sense_reason_t target_emulate_report_target_port_groups(struct se_cmd *); 92extern sense_reason_t target_emulate_report_target_port_groups(struct se_cmd *);
91extern sense_reason_t target_emulate_set_target_port_groups(struct se_cmd *); 93extern sense_reason_t target_emulate_set_target_port_groups(struct se_cmd *);
@@ -95,6 +97,12 @@ extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,
95 struct se_device *, struct se_port *, 97 struct se_device *, struct se_port *,
96 struct se_node_acl *, int, int); 98 struct se_node_acl *, int, int);
97extern char *core_alua_dump_status(int); 99extern char *core_alua_dump_status(int);
100extern struct t10_alua_lba_map *core_alua_allocate_lba_map(
101 struct list_head *, u64, u64);
102extern int core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *, int, int);
103extern void core_alua_free_lba_map(struct list_head *);
104extern void core_alua_set_lba_map(struct se_device *, struct list_head *,
105 int, int);
98extern struct t10_alua_lu_gp *core_alua_allocate_lu_gp(const char *, int); 106extern struct t10_alua_lu_gp *core_alua_allocate_lu_gp(const char *, int);
99extern int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *, u16); 107extern int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *, u16);
100extern void core_alua_free_lu_gp(struct t10_alua_lu_gp *); 108extern void core_alua_free_lu_gp(struct t10_alua_lu_gp *);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index e0a47f524700..5cf613544eeb 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -1741,6 +1741,176 @@ static struct target_core_configfs_attribute target_core_attr_dev_alua_lu_gp = {
1741 .store = target_core_store_alua_lu_gp, 1741 .store = target_core_store_alua_lu_gp,
1742}; 1742};
1743 1743
1744static ssize_t target_core_show_dev_lba_map(void *p, char *page)
1745{
1746 struct se_device *dev = p;
1747 struct t10_alua_lba_map *map;
1748 struct t10_alua_lba_map_member *mem;
1749 char *b = page;
1750 int bl = 0;
1751 char state;
1752
1753 spin_lock(&dev->t10_alua.lba_map_lock);
1754 if (!list_empty(&dev->t10_alua.lba_map_list))
1755 bl += sprintf(b + bl, "%u %u\n",
1756 dev->t10_alua.lba_map_segment_size,
1757 dev->t10_alua.lba_map_segment_multiplier);
1758 list_for_each_entry(map, &dev->t10_alua.lba_map_list, lba_map_list) {
1759 bl += sprintf(b + bl, "%llu %llu",
1760 map->lba_map_first_lba, map->lba_map_last_lba);
1761 list_for_each_entry(mem, &map->lba_map_mem_list,
1762 lba_map_mem_list) {
1763 switch (mem->lba_map_mem_alua_state) {
1764 case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
1765 state = 'O';
1766 break;
1767 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
1768 state = 'A';
1769 break;
1770 case ALUA_ACCESS_STATE_STANDBY:
1771 state = 'S';
1772 break;
1773 case ALUA_ACCESS_STATE_UNAVAILABLE:
1774 state = 'U';
1775 break;
1776 default:
1777 state = '.';
1778 break;
1779 }
1780 bl += sprintf(b + bl, " %d:%c",
1781 mem->lba_map_mem_alua_pg_id, state);
1782 }
1783 bl += sprintf(b + bl, "\n");
1784 }
1785 spin_unlock(&dev->t10_alua.lba_map_lock);
1786 return bl;
1787}
1788
1789static ssize_t target_core_store_dev_lba_map(
1790 void *p,
1791 const char *page,
1792 size_t count)
1793{
1794 struct se_device *dev = p;
1795 struct t10_alua_lba_map *lba_map = NULL;
1796 struct list_head lba_list;
1797 char *map_entries, *ptr;
1798 char state;
1799 int pg_num = -1, pg;
1800 int ret = 0, num = 0, pg_id, alua_state;
1801 unsigned long start_lba = -1, end_lba = -1;
1802 unsigned long segment_size = -1, segment_mult = -1;
1803
1804 map_entries = kstrdup(page, GFP_KERNEL);
1805 if (!map_entries)
1806 return -ENOMEM;
1807
1808 INIT_LIST_HEAD(&lba_list);
1809 while ((ptr = strsep(&map_entries, "\n")) != NULL) {
1810 if (!*ptr)
1811 continue;
1812
1813 if (num == 0) {
1814 if (sscanf(ptr, "%lu %lu\n",
1815 &segment_size, &segment_mult) != 2) {
1816 pr_err("Invalid line %d\n", num);
1817 ret = -EINVAL;
1818 break;
1819 }
1820 num++;
1821 continue;
1822 }
1823 if (sscanf(ptr, "%lu %lu", &start_lba, &end_lba) != 2) {
1824 pr_err("Invalid line %d\n", num);
1825 ret = -EINVAL;
1826 break;
1827 }
1828 ptr = strchr(ptr, ' ');
1829 if (!ptr) {
1830 pr_err("Invalid line %d, missing end lba\n", num);
1831 ret = -EINVAL;
1832 break;
1833 }
1834 ptr++;
1835 ptr = strchr(ptr, ' ');
1836 if (!ptr) {
1837 pr_err("Invalid line %d, missing state definitions\n",
1838 num);
1839 ret = -EINVAL;
1840 break;
1841 }
1842 ptr++;
1843 lba_map = core_alua_allocate_lba_map(&lba_list,
1844 start_lba, end_lba);
1845 if (IS_ERR(lba_map)) {
1846 ret = PTR_ERR(lba_map);
1847 break;
1848 }
1849 pg = 0;
1850 while (sscanf(ptr, "%d:%c", &pg_id, &state) == 2) {
1851 switch (state) {
1852 case 'O':
1853 alua_state = ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
1854 break;
1855 case 'A':
1856 alua_state = ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED;
1857 break;
1858 case 'S':
1859 alua_state = ALUA_ACCESS_STATE_STANDBY;
1860 break;
1861 case 'U':
1862 alua_state = ALUA_ACCESS_STATE_UNAVAILABLE;
1863 break;
1864 default:
1865 pr_err("Invalid ALUA state '%c'\n", state);
1866 ret = -EINVAL;
1867 goto out;
1868 }
1869
1870 ret = core_alua_allocate_lba_map_mem(lba_map,
1871 pg_id, alua_state);
1872 if (ret) {
1873 pr_err("Invalid target descriptor %d:%c "
1874 "at line %d\n",
1875 pg_id, state, num);
1876 break;
1877 }
1878 pg++;
1879 ptr = strchr(ptr, ' ');
1880 if (ptr)
1881 ptr++;
1882 else
1883 break;
1884 }
1885 if (pg_num == -1)
1886 pg_num = pg;
1887 else if (pg != pg_num) {
1888 pr_err("Only %d from %d port groups definitions "
1889 "at line %d\n", pg, pg_num, num);
1890 ret = -EINVAL;
1891 break;
1892 }
1893 num++;
1894 }
1895out:
1896 if (ret) {
1897 core_alua_free_lba_map(&lba_list);
1898 count = ret;
1899 } else
1900 core_alua_set_lba_map(dev, &lba_list,
1901 segment_size, segment_mult);
1902 kfree(map_entries);
1903 return count;
1904}
1905
1906static struct target_core_configfs_attribute target_core_attr_dev_lba_map = {
1907 .attr = { .ca_owner = THIS_MODULE,
1908 .ca_name = "lba_map",
1909 .ca_mode = S_IRUGO | S_IWUSR },
1910 .show = target_core_show_dev_lba_map,
1911 .store = target_core_store_dev_lba_map,
1912};
1913
1744static struct configfs_attribute *lio_core_dev_attrs[] = { 1914static struct configfs_attribute *lio_core_dev_attrs[] = {
1745 &target_core_attr_dev_info.attr, 1915 &target_core_attr_dev_info.attr,
1746 &target_core_attr_dev_control.attr, 1916 &target_core_attr_dev_control.attr,
@@ -1748,6 +1918,7 @@ static struct configfs_attribute *lio_core_dev_attrs[] = {
1748 &target_core_attr_dev_udev_path.attr, 1918 &target_core_attr_dev_udev_path.attr,
1749 &target_core_attr_dev_enable.attr, 1919 &target_core_attr_dev_enable.attr,
1750 &target_core_attr_dev_alua_lu_gp.attr, 1920 &target_core_attr_dev_alua_lu_gp.attr,
1921 &target_core_attr_dev_lba_map.attr,
1751 NULL, 1922 NULL,
1752}; 1923};
1753 1924
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 88b4fb2f6e1a..32440584ebbb 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -1585,6 +1585,7 @@ void target_free_device(struct se_device *dev)
1585 } 1585 }
1586 1586
1587 core_alua_free_lu_gp_mem(dev); 1587 core_alua_free_lu_gp_mem(dev);
1588 core_alua_set_lba_map(dev, NULL, 0, 0);
1588 core_scsi3_free_all_registrations(dev); 1589 core_scsi3_free_all_registrations(dev);
1589 se_release_vpd_for_dev(dev); 1590 se_release_vpd_for_dev(dev);
1590 1591
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 91953da0f623..18c828dc3b9e 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -62,6 +62,8 @@ struct kmem_cache *t10_alua_lu_gp_cache;
62struct kmem_cache *t10_alua_lu_gp_mem_cache; 62struct kmem_cache *t10_alua_lu_gp_mem_cache;
63struct kmem_cache *t10_alua_tg_pt_gp_cache; 63struct kmem_cache *t10_alua_tg_pt_gp_cache;
64struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; 64struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
65struct kmem_cache *t10_alua_lba_map_cache;
66struct kmem_cache *t10_alua_lba_map_mem_cache;
65 67
66static void transport_complete_task_attr(struct se_cmd *cmd); 68static void transport_complete_task_attr(struct se_cmd *cmd);
67static void transport_handle_queue_full(struct se_cmd *cmd, 69static void transport_handle_queue_full(struct se_cmd *cmd,
@@ -128,14 +130,36 @@ int init_se_kmem_caches(void)
128 "mem_t failed\n"); 130 "mem_t failed\n");
129 goto out_free_tg_pt_gp_cache; 131 goto out_free_tg_pt_gp_cache;
130 } 132 }
133 t10_alua_lba_map_cache = kmem_cache_create(
134 "t10_alua_lba_map_cache",
135 sizeof(struct t10_alua_lba_map),
136 __alignof__(struct t10_alua_lba_map), 0, NULL);
137 if (!t10_alua_lba_map_cache) {
138 pr_err("kmem_cache_create() for t10_alua_lba_map_"
139 "cache failed\n");
140 goto out_free_tg_pt_gp_mem_cache;
141 }
142 t10_alua_lba_map_mem_cache = kmem_cache_create(
143 "t10_alua_lba_map_mem_cache",
144 sizeof(struct t10_alua_lba_map_member),
145 __alignof__(struct t10_alua_lba_map_member), 0, NULL);
146 if (!t10_alua_lba_map_mem_cache) {
147 pr_err("kmem_cache_create() for t10_alua_lba_map_mem_"
148 "cache failed\n");
149 goto out_free_lba_map_cache;
150 }
131 151
132 target_completion_wq = alloc_workqueue("target_completion", 152 target_completion_wq = alloc_workqueue("target_completion",
133 WQ_MEM_RECLAIM, 0); 153 WQ_MEM_RECLAIM, 0);
134 if (!target_completion_wq) 154 if (!target_completion_wq)
135 goto out_free_tg_pt_gp_mem_cache; 155 goto out_free_lba_map_mem_cache;
136 156
137 return 0; 157 return 0;
138 158
159out_free_lba_map_mem_cache:
160 kmem_cache_destroy(t10_alua_lba_map_mem_cache);
161out_free_lba_map_cache:
162 kmem_cache_destroy(t10_alua_lba_map_cache);
139out_free_tg_pt_gp_mem_cache: 163out_free_tg_pt_gp_mem_cache:
140 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); 164 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
141out_free_tg_pt_gp_cache: 165out_free_tg_pt_gp_cache:
@@ -164,6 +188,8 @@ void release_se_kmem_caches(void)
164 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 188 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
165 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 189 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
166 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); 190 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
191 kmem_cache_destroy(t10_alua_lba_map_cache);
192 kmem_cache_destroy(t10_alua_lba_map_mem_cache);
167} 193}
168 194
169/* This code ensures unique mib indexes are handed out. */ 195/* This code ensures unique mib indexes are handed out. */