diff options
Diffstat (limited to 'drivers/target/target_core_tpg.c')
-rw-r--r-- | drivers/target/target_core_tpg.c | 577 |
1 files changed, 211 insertions, 366 deletions
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 47f064415bf6..babde4ad841f 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c | |||
@@ -32,14 +32,14 @@ | |||
32 | #include <linux/export.h> | 32 | #include <linux/export.h> |
33 | #include <net/sock.h> | 33 | #include <net/sock.h> |
34 | #include <net/tcp.h> | 34 | #include <net/tcp.h> |
35 | #include <scsi/scsi.h> | 35 | #include <scsi/scsi_proto.h> |
36 | #include <scsi/scsi_cmnd.h> | ||
37 | 36 | ||
38 | #include <target/target_core_base.h> | 37 | #include <target/target_core_base.h> |
39 | #include <target/target_core_backend.h> | 38 | #include <target/target_core_backend.h> |
40 | #include <target/target_core_fabric.h> | 39 | #include <target/target_core_fabric.h> |
41 | 40 | ||
42 | #include "target_core_internal.h" | 41 | #include "target_core_internal.h" |
42 | #include "target_core_alua.h" | ||
43 | #include "target_core_pr.h" | 43 | #include "target_core_pr.h" |
44 | 44 | ||
45 | extern struct se_device *g_lun0_dev; | 45 | extern struct se_device *g_lun0_dev; |
@@ -47,45 +47,9 @@ extern struct se_device *g_lun0_dev; | |||
47 | static DEFINE_SPINLOCK(tpg_lock); | 47 | static DEFINE_SPINLOCK(tpg_lock); |
48 | static LIST_HEAD(tpg_list); | 48 | static LIST_HEAD(tpg_list); |
49 | 49 | ||
50 | /* core_clear_initiator_node_from_tpg(): | ||
51 | * | ||
52 | * | ||
53 | */ | ||
54 | static void core_clear_initiator_node_from_tpg( | ||
55 | struct se_node_acl *nacl, | ||
56 | struct se_portal_group *tpg) | ||
57 | { | ||
58 | int i; | ||
59 | struct se_dev_entry *deve; | ||
60 | struct se_lun *lun; | ||
61 | |||
62 | spin_lock_irq(&nacl->device_list_lock); | ||
63 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | ||
64 | deve = nacl->device_list[i]; | ||
65 | |||
66 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) | ||
67 | continue; | ||
68 | |||
69 | if (!deve->se_lun) { | ||
70 | pr_err("%s device entries device pointer is" | ||
71 | " NULL, but Initiator has access.\n", | ||
72 | tpg->se_tpg_tfo->get_fabric_name()); | ||
73 | continue; | ||
74 | } | ||
75 | |||
76 | lun = deve->se_lun; | ||
77 | spin_unlock_irq(&nacl->device_list_lock); | ||
78 | core_disable_device_list_for_node(lun, NULL, deve->mapped_lun, | ||
79 | TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg); | ||
80 | |||
81 | spin_lock_irq(&nacl->device_list_lock); | ||
82 | } | ||
83 | spin_unlock_irq(&nacl->device_list_lock); | ||
84 | } | ||
85 | |||
86 | /* __core_tpg_get_initiator_node_acl(): | 50 | /* __core_tpg_get_initiator_node_acl(): |
87 | * | 51 | * |
88 | * spin_lock_bh(&tpg->acl_node_lock); must be held when calling | 52 | * mutex_lock(&tpg->acl_node_mutex); must be held when calling |
89 | */ | 53 | */ |
90 | struct se_node_acl *__core_tpg_get_initiator_node_acl( | 54 | struct se_node_acl *__core_tpg_get_initiator_node_acl( |
91 | struct se_portal_group *tpg, | 55 | struct se_portal_group *tpg, |
@@ -111,9 +75,9 @@ struct se_node_acl *core_tpg_get_initiator_node_acl( | |||
111 | { | 75 | { |
112 | struct se_node_acl *acl; | 76 | struct se_node_acl *acl; |
113 | 77 | ||
114 | spin_lock_irq(&tpg->acl_node_lock); | 78 | mutex_lock(&tpg->acl_node_mutex); |
115 | acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); | 79 | acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); |
116 | spin_unlock_irq(&tpg->acl_node_lock); | 80 | mutex_unlock(&tpg->acl_node_mutex); |
117 | 81 | ||
118 | return acl; | 82 | return acl; |
119 | } | 83 | } |
@@ -125,22 +89,20 @@ EXPORT_SYMBOL(core_tpg_get_initiator_node_acl); | |||
125 | */ | 89 | */ |
126 | void core_tpg_add_node_to_devs( | 90 | void core_tpg_add_node_to_devs( |
127 | struct se_node_acl *acl, | 91 | struct se_node_acl *acl, |
128 | struct se_portal_group *tpg) | 92 | struct se_portal_group *tpg, |
93 | struct se_lun *lun_orig) | ||
129 | { | 94 | { |
130 | int i = 0; | ||
131 | u32 lun_access = 0; | 95 | u32 lun_access = 0; |
132 | struct se_lun *lun; | 96 | struct se_lun *lun; |
133 | struct se_device *dev; | 97 | struct se_device *dev; |
134 | 98 | ||
135 | spin_lock(&tpg->tpg_lun_lock); | 99 | mutex_lock(&tpg->tpg_lun_mutex); |
136 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | 100 | hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) { |
137 | lun = tpg->tpg_lun_list[i]; | 101 | if (lun_orig && lun != lun_orig) |
138 | if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) | ||
139 | continue; | 102 | continue; |
140 | 103 | ||
141 | spin_unlock(&tpg->tpg_lun_lock); | 104 | dev = rcu_dereference_check(lun->lun_se_dev, |
142 | 105 | lockdep_is_held(&tpg->tpg_lun_mutex)); | |
143 | dev = lun->lun_se_dev; | ||
144 | /* | 106 | /* |
145 | * By default in LIO-Target $FABRIC_MOD, | 107 | * By default in LIO-Target $FABRIC_MOD, |
146 | * demo_mode_write_protect is ON, or READ_ONLY; | 108 | * demo_mode_write_protect is ON, or READ_ONLY; |
@@ -158,7 +120,7 @@ void core_tpg_add_node_to_devs( | |||
158 | lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; | 120 | lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; |
159 | } | 121 | } |
160 | 122 | ||
161 | pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s" | 123 | pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s" |
162 | " access for LUN in Demo Mode\n", | 124 | " access for LUN in Demo Mode\n", |
163 | tpg->se_tpg_tfo->get_fabric_name(), | 125 | tpg->se_tpg_tfo->get_fabric_name(), |
164 | tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, | 126 | tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, |
@@ -166,7 +128,7 @@ void core_tpg_add_node_to_devs( | |||
166 | "READ-WRITE" : "READ-ONLY"); | 128 | "READ-WRITE" : "READ-ONLY"); |
167 | 129 | ||
168 | core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun, | 130 | core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun, |
169 | lun_access, acl, tpg); | 131 | lun_access, acl, tpg); |
170 | /* | 132 | /* |
171 | * Check to see if there are any existing persistent reservation | 133 | * Check to see if there are any existing persistent reservation |
172 | * APTPL pre-registrations that need to be enabled for this dynamic | 134 | * APTPL pre-registrations that need to be enabled for this dynamic |
@@ -174,9 +136,8 @@ void core_tpg_add_node_to_devs( | |||
174 | */ | 136 | */ |
175 | core_scsi3_check_aptpl_registration(dev, tpg, lun, acl, | 137 | core_scsi3_check_aptpl_registration(dev, tpg, lun, acl, |
176 | lun->unpacked_lun); | 138 | lun->unpacked_lun); |
177 | spin_lock(&tpg->tpg_lun_lock); | ||
178 | } | 139 | } |
179 | spin_unlock(&tpg->tpg_lun_lock); | 140 | mutex_unlock(&tpg->tpg_lun_mutex); |
180 | } | 141 | } |
181 | 142 | ||
182 | /* core_set_queue_depth_for_node(): | 143 | /* core_set_queue_depth_for_node(): |
@@ -197,67 +158,63 @@ static int core_set_queue_depth_for_node( | |||
197 | return 0; | 158 | return 0; |
198 | } | 159 | } |
199 | 160 | ||
200 | void array_free(void *array, int n) | 161 | static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg, |
162 | const unsigned char *initiatorname) | ||
201 | { | 163 | { |
202 | void **a = array; | 164 | struct se_node_acl *acl; |
203 | int i; | ||
204 | 165 | ||
205 | for (i = 0; i < n; i++) | 166 | acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size), |
206 | kfree(a[i]); | 167 | GFP_KERNEL); |
207 | kfree(a); | 168 | if (!acl) |
208 | } | 169 | return NULL; |
209 | 170 | ||
210 | static void *array_zalloc(int n, size_t size, gfp_t flags) | 171 | INIT_LIST_HEAD(&acl->acl_list); |
211 | { | 172 | INIT_LIST_HEAD(&acl->acl_sess_list); |
212 | void **a; | 173 | INIT_HLIST_HEAD(&acl->lun_entry_hlist); |
213 | int i; | 174 | kref_init(&acl->acl_kref); |
175 | init_completion(&acl->acl_free_comp); | ||
176 | spin_lock_init(&acl->nacl_sess_lock); | ||
177 | mutex_init(&acl->lun_entry_mutex); | ||
178 | atomic_set(&acl->acl_pr_ref_count, 0); | ||
179 | if (tpg->se_tpg_tfo->tpg_get_default_depth) | ||
180 | acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg); | ||
181 | else | ||
182 | acl->queue_depth = 1; | ||
183 | snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); | ||
184 | acl->se_tpg = tpg; | ||
185 | acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); | ||
214 | 186 | ||
215 | a = kzalloc(n * sizeof(void*), flags); | 187 | tpg->se_tpg_tfo->set_default_node_attributes(acl); |
216 | if (!a) | 188 | |
217 | return NULL; | 189 | if (core_set_queue_depth_for_node(tpg, acl) < 0) |
218 | for (i = 0; i < n; i++) { | 190 | goto out_free_acl; |
219 | a[i] = kzalloc(size, flags); | 191 | |
220 | if (!a[i]) { | 192 | return acl; |
221 | array_free(a, n); | 193 | |
222 | return NULL; | 194 | out_free_acl: |
223 | } | 195 | kfree(acl); |
224 | } | 196 | return NULL; |
225 | return a; | ||
226 | } | 197 | } |
227 | 198 | ||
228 | /* core_create_device_list_for_node(): | 199 | static void target_add_node_acl(struct se_node_acl *acl) |
229 | * | ||
230 | * | ||
231 | */ | ||
232 | static int core_create_device_list_for_node(struct se_node_acl *nacl) | ||
233 | { | 200 | { |
234 | struct se_dev_entry *deve; | 201 | struct se_portal_group *tpg = acl->se_tpg; |
235 | int i; | ||
236 | |||
237 | nacl->device_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG, | ||
238 | sizeof(struct se_dev_entry), GFP_KERNEL); | ||
239 | if (!nacl->device_list) { | ||
240 | pr_err("Unable to allocate memory for" | ||
241 | " struct se_node_acl->device_list\n"); | ||
242 | return -ENOMEM; | ||
243 | } | ||
244 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | ||
245 | deve = nacl->device_list[i]; | ||
246 | |||
247 | atomic_set(&deve->ua_count, 0); | ||
248 | atomic_set(&deve->pr_ref_count, 0); | ||
249 | spin_lock_init(&deve->ua_lock); | ||
250 | INIT_LIST_HEAD(&deve->alua_port_list); | ||
251 | INIT_LIST_HEAD(&deve->ua_list); | ||
252 | } | ||
253 | 202 | ||
254 | return 0; | 203 | mutex_lock(&tpg->acl_node_mutex); |
204 | list_add_tail(&acl->acl_list, &tpg->acl_node_list); | ||
205 | tpg->num_node_acls++; | ||
206 | mutex_unlock(&tpg->acl_node_mutex); | ||
207 | |||
208 | pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s" | ||
209 | " Initiator Node: %s\n", | ||
210 | tpg->se_tpg_tfo->get_fabric_name(), | ||
211 | tpg->se_tpg_tfo->tpg_get_tag(tpg), | ||
212 | acl->dynamic_node_acl ? "DYNAMIC" : "", | ||
213 | acl->queue_depth, | ||
214 | tpg->se_tpg_tfo->get_fabric_name(), | ||
215 | acl->initiatorname); | ||
255 | } | 216 | } |
256 | 217 | ||
257 | /* core_tpg_check_initiator_node_acl() | ||
258 | * | ||
259 | * | ||
260 | */ | ||
261 | struct se_node_acl *core_tpg_check_initiator_node_acl( | 218 | struct se_node_acl *core_tpg_check_initiator_node_acl( |
262 | struct se_portal_group *tpg, | 219 | struct se_portal_group *tpg, |
263 | unsigned char *initiatorname) | 220 | unsigned char *initiatorname) |
@@ -271,35 +228,11 @@ struct se_node_acl *core_tpg_check_initiator_node_acl( | |||
271 | if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) | 228 | if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) |
272 | return NULL; | 229 | return NULL; |
273 | 230 | ||
274 | acl = tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg); | 231 | acl = target_alloc_node_acl(tpg, initiatorname); |
275 | if (!acl) | 232 | if (!acl) |
276 | return NULL; | 233 | return NULL; |
277 | |||
278 | INIT_LIST_HEAD(&acl->acl_list); | ||
279 | INIT_LIST_HEAD(&acl->acl_sess_list); | ||
280 | kref_init(&acl->acl_kref); | ||
281 | init_completion(&acl->acl_free_comp); | ||
282 | spin_lock_init(&acl->device_list_lock); | ||
283 | spin_lock_init(&acl->nacl_sess_lock); | ||
284 | atomic_set(&acl->acl_pr_ref_count, 0); | ||
285 | acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg); | ||
286 | snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); | ||
287 | acl->se_tpg = tpg; | ||
288 | acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); | ||
289 | acl->dynamic_node_acl = 1; | 234 | acl->dynamic_node_acl = 1; |
290 | 235 | ||
291 | tpg->se_tpg_tfo->set_default_node_attributes(acl); | ||
292 | |||
293 | if (core_create_device_list_for_node(acl) < 0) { | ||
294 | tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); | ||
295 | return NULL; | ||
296 | } | ||
297 | |||
298 | if (core_set_queue_depth_for_node(tpg, acl) < 0) { | ||
299 | core_free_device_list_for_node(acl, tpg); | ||
300 | tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); | ||
301 | return NULL; | ||
302 | } | ||
303 | /* | 236 | /* |
304 | * Here we only create demo-mode MappedLUNs from the active | 237 | * Here we only create demo-mode MappedLUNs from the active |
305 | * TPG LUNs if the fabric is not explicitly asking for | 238 | * TPG LUNs if the fabric is not explicitly asking for |
@@ -307,18 +240,9 @@ struct se_node_acl *core_tpg_check_initiator_node_acl( | |||
307 | */ | 240 | */ |
308 | if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) || | 241 | if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) || |
309 | (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1)) | 242 | (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1)) |
310 | core_tpg_add_node_to_devs(acl, tpg); | 243 | core_tpg_add_node_to_devs(acl, tpg, NULL); |
311 | |||
312 | spin_lock_irq(&tpg->acl_node_lock); | ||
313 | list_add_tail(&acl->acl_list, &tpg->acl_node_list); | ||
314 | tpg->num_node_acls++; | ||
315 | spin_unlock_irq(&tpg->acl_node_lock); | ||
316 | |||
317 | pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s" | ||
318 | " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), | ||
319 | tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, | ||
320 | tpg->se_tpg_tfo->get_fabric_name(), initiatorname); | ||
321 | 244 | ||
245 | target_add_node_acl(acl); | ||
322 | return acl; | 246 | return acl; |
323 | } | 247 | } |
324 | EXPORT_SYMBOL(core_tpg_check_initiator_node_acl); | 248 | EXPORT_SYMBOL(core_tpg_check_initiator_node_acl); |
@@ -329,40 +253,13 @@ void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl) | |||
329 | cpu_relax(); | 253 | cpu_relax(); |
330 | } | 254 | } |
331 | 255 | ||
332 | void core_tpg_clear_object_luns(struct se_portal_group *tpg) | ||
333 | { | ||
334 | int i; | ||
335 | struct se_lun *lun; | ||
336 | |||
337 | spin_lock(&tpg->tpg_lun_lock); | ||
338 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | ||
339 | lun = tpg->tpg_lun_list[i]; | ||
340 | |||
341 | if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) || | ||
342 | (lun->lun_se_dev == NULL)) | ||
343 | continue; | ||
344 | |||
345 | spin_unlock(&tpg->tpg_lun_lock); | ||
346 | core_dev_del_lun(tpg, lun); | ||
347 | spin_lock(&tpg->tpg_lun_lock); | ||
348 | } | ||
349 | spin_unlock(&tpg->tpg_lun_lock); | ||
350 | } | ||
351 | EXPORT_SYMBOL(core_tpg_clear_object_luns); | ||
352 | |||
353 | /* core_tpg_add_initiator_node_acl(): | ||
354 | * | ||
355 | * | ||
356 | */ | ||
357 | struct se_node_acl *core_tpg_add_initiator_node_acl( | 256 | struct se_node_acl *core_tpg_add_initiator_node_acl( |
358 | struct se_portal_group *tpg, | 257 | struct se_portal_group *tpg, |
359 | struct se_node_acl *se_nacl, | 258 | const char *initiatorname) |
360 | const char *initiatorname, | ||
361 | u32 queue_depth) | ||
362 | { | 259 | { |
363 | struct se_node_acl *acl = NULL; | 260 | struct se_node_acl *acl; |
364 | 261 | ||
365 | spin_lock_irq(&tpg->acl_node_lock); | 262 | mutex_lock(&tpg->acl_node_mutex); |
366 | acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); | 263 | acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); |
367 | if (acl) { | 264 | if (acl) { |
368 | if (acl->dynamic_node_acl) { | 265 | if (acl->dynamic_node_acl) { |
@@ -370,99 +267,42 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( | |||
370 | pr_debug("%s_TPG[%u] - Replacing dynamic ACL" | 267 | pr_debug("%s_TPG[%u] - Replacing dynamic ACL" |
371 | " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), | 268 | " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), |
372 | tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname); | 269 | tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname); |
373 | spin_unlock_irq(&tpg->acl_node_lock); | 270 | mutex_unlock(&tpg->acl_node_mutex); |
374 | /* | 271 | return acl; |
375 | * Release the locally allocated struct se_node_acl | ||
376 | * because * core_tpg_add_initiator_node_acl() returned | ||
377 | * a pointer to an existing demo mode node ACL. | ||
378 | */ | ||
379 | if (se_nacl) | ||
380 | tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, | ||
381 | se_nacl); | ||
382 | goto done; | ||
383 | } | 272 | } |
384 | 273 | ||
385 | pr_err("ACL entry for %s Initiator" | 274 | pr_err("ACL entry for %s Initiator" |
386 | " Node %s already exists for TPG %u, ignoring" | 275 | " Node %s already exists for TPG %u, ignoring" |
387 | " request.\n", tpg->se_tpg_tfo->get_fabric_name(), | 276 | " request.\n", tpg->se_tpg_tfo->get_fabric_name(), |
388 | initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); | 277 | initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
389 | spin_unlock_irq(&tpg->acl_node_lock); | 278 | mutex_unlock(&tpg->acl_node_mutex); |
390 | return ERR_PTR(-EEXIST); | 279 | return ERR_PTR(-EEXIST); |
391 | } | 280 | } |
392 | spin_unlock_irq(&tpg->acl_node_lock); | 281 | mutex_unlock(&tpg->acl_node_mutex); |
393 | |||
394 | if (!se_nacl) { | ||
395 | pr_err("struct se_node_acl pointer is NULL\n"); | ||
396 | return ERR_PTR(-EINVAL); | ||
397 | } | ||
398 | /* | ||
399 | * For v4.x logic the se_node_acl_s is hanging off a fabric | ||
400 | * dependent structure allocated via | ||
401 | * struct target_core_fabric_ops->fabric_make_nodeacl() | ||
402 | */ | ||
403 | acl = se_nacl; | ||
404 | 282 | ||
405 | INIT_LIST_HEAD(&acl->acl_list); | 283 | acl = target_alloc_node_acl(tpg, initiatorname); |
406 | INIT_LIST_HEAD(&acl->acl_sess_list); | 284 | if (!acl) |
407 | kref_init(&acl->acl_kref); | ||
408 | init_completion(&acl->acl_free_comp); | ||
409 | spin_lock_init(&acl->device_list_lock); | ||
410 | spin_lock_init(&acl->nacl_sess_lock); | ||
411 | atomic_set(&acl->acl_pr_ref_count, 0); | ||
412 | acl->queue_depth = queue_depth; | ||
413 | snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); | ||
414 | acl->se_tpg = tpg; | ||
415 | acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); | ||
416 | |||
417 | tpg->se_tpg_tfo->set_default_node_attributes(acl); | ||
418 | |||
419 | if (core_create_device_list_for_node(acl) < 0) { | ||
420 | tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); | ||
421 | return ERR_PTR(-ENOMEM); | 285 | return ERR_PTR(-ENOMEM); |
422 | } | ||
423 | |||
424 | if (core_set_queue_depth_for_node(tpg, acl) < 0) { | ||
425 | core_free_device_list_for_node(acl, tpg); | ||
426 | tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); | ||
427 | return ERR_PTR(-EINVAL); | ||
428 | } | ||
429 | |||
430 | spin_lock_irq(&tpg->acl_node_lock); | ||
431 | list_add_tail(&acl->acl_list, &tpg->acl_node_list); | ||
432 | tpg->num_node_acls++; | ||
433 | spin_unlock_irq(&tpg->acl_node_lock); | ||
434 | |||
435 | done: | ||
436 | pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s" | ||
437 | " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), | ||
438 | tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, | ||
439 | tpg->se_tpg_tfo->get_fabric_name(), initiatorname); | ||
440 | 286 | ||
287 | target_add_node_acl(acl); | ||
441 | return acl; | 288 | return acl; |
442 | } | 289 | } |
443 | EXPORT_SYMBOL(core_tpg_add_initiator_node_acl); | ||
444 | 290 | ||
445 | /* core_tpg_del_initiator_node_acl(): | 291 | void core_tpg_del_initiator_node_acl(struct se_node_acl *acl) |
446 | * | ||
447 | * | ||
448 | */ | ||
449 | int core_tpg_del_initiator_node_acl( | ||
450 | struct se_portal_group *tpg, | ||
451 | struct se_node_acl *acl, | ||
452 | int force) | ||
453 | { | 292 | { |
293 | struct se_portal_group *tpg = acl->se_tpg; | ||
454 | LIST_HEAD(sess_list); | 294 | LIST_HEAD(sess_list); |
455 | struct se_session *sess, *sess_tmp; | 295 | struct se_session *sess, *sess_tmp; |
456 | unsigned long flags; | 296 | unsigned long flags; |
457 | int rc; | 297 | int rc; |
458 | 298 | ||
459 | spin_lock_irq(&tpg->acl_node_lock); | 299 | mutex_lock(&tpg->acl_node_mutex); |
460 | if (acl->dynamic_node_acl) { | 300 | if (acl->dynamic_node_acl) { |
461 | acl->dynamic_node_acl = 0; | 301 | acl->dynamic_node_acl = 0; |
462 | } | 302 | } |
463 | list_del(&acl->acl_list); | 303 | list_del(&acl->acl_list); |
464 | tpg->num_node_acls--; | 304 | tpg->num_node_acls--; |
465 | spin_unlock_irq(&tpg->acl_node_lock); | 305 | mutex_unlock(&tpg->acl_node_mutex); |
466 | 306 | ||
467 | spin_lock_irqsave(&acl->nacl_sess_lock, flags); | 307 | spin_lock_irqsave(&acl->nacl_sess_lock, flags); |
468 | acl->acl_stop = 1; | 308 | acl->acl_stop = 1; |
@@ -494,7 +334,6 @@ int core_tpg_del_initiator_node_acl( | |||
494 | wait_for_completion(&acl->acl_free_comp); | 334 | wait_for_completion(&acl->acl_free_comp); |
495 | 335 | ||
496 | core_tpg_wait_for_nacl_pr_ref(acl); | 336 | core_tpg_wait_for_nacl_pr_ref(acl); |
497 | core_clear_initiator_node_from_tpg(acl, tpg); | ||
498 | core_free_device_list_for_node(acl, tpg); | 337 | core_free_device_list_for_node(acl, tpg); |
499 | 338 | ||
500 | pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s" | 339 | pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s" |
@@ -502,9 +341,8 @@ int core_tpg_del_initiator_node_acl( | |||
502 | tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, | 341 | tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, |
503 | tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname); | 342 | tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname); |
504 | 343 | ||
505 | return 0; | 344 | kfree(acl); |
506 | } | 345 | } |
507 | EXPORT_SYMBOL(core_tpg_del_initiator_node_acl); | ||
508 | 346 | ||
509 | /* core_tpg_set_initiator_node_queue_depth(): | 347 | /* core_tpg_set_initiator_node_queue_depth(): |
510 | * | 348 | * |
@@ -521,21 +359,21 @@ int core_tpg_set_initiator_node_queue_depth( | |||
521 | unsigned long flags; | 359 | unsigned long flags; |
522 | int dynamic_acl = 0; | 360 | int dynamic_acl = 0; |
523 | 361 | ||
524 | spin_lock_irq(&tpg->acl_node_lock); | 362 | mutex_lock(&tpg->acl_node_mutex); |
525 | acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); | 363 | acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); |
526 | if (!acl) { | 364 | if (!acl) { |
527 | pr_err("Access Control List entry for %s Initiator" | 365 | pr_err("Access Control List entry for %s Initiator" |
528 | " Node %s does not exists for TPG %hu, ignoring" | 366 | " Node %s does not exists for TPG %hu, ignoring" |
529 | " request.\n", tpg->se_tpg_tfo->get_fabric_name(), | 367 | " request.\n", tpg->se_tpg_tfo->get_fabric_name(), |
530 | initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); | 368 | initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
531 | spin_unlock_irq(&tpg->acl_node_lock); | 369 | mutex_unlock(&tpg->acl_node_mutex); |
532 | return -ENODEV; | 370 | return -ENODEV; |
533 | } | 371 | } |
534 | if (acl->dynamic_node_acl) { | 372 | if (acl->dynamic_node_acl) { |
535 | acl->dynamic_node_acl = 0; | 373 | acl->dynamic_node_acl = 0; |
536 | dynamic_acl = 1; | 374 | dynamic_acl = 1; |
537 | } | 375 | } |
538 | spin_unlock_irq(&tpg->acl_node_lock); | 376 | mutex_unlock(&tpg->acl_node_mutex); |
539 | 377 | ||
540 | spin_lock_irqsave(&tpg->session_lock, flags); | 378 | spin_lock_irqsave(&tpg->session_lock, flags); |
541 | list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) { | 379 | list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) { |
@@ -551,10 +389,10 @@ int core_tpg_set_initiator_node_queue_depth( | |||
551 | tpg->se_tpg_tfo->get_fabric_name(), initiatorname); | 389 | tpg->se_tpg_tfo->get_fabric_name(), initiatorname); |
552 | spin_unlock_irqrestore(&tpg->session_lock, flags); | 390 | spin_unlock_irqrestore(&tpg->session_lock, flags); |
553 | 391 | ||
554 | spin_lock_irq(&tpg->acl_node_lock); | 392 | mutex_lock(&tpg->acl_node_mutex); |
555 | if (dynamic_acl) | 393 | if (dynamic_acl) |
556 | acl->dynamic_node_acl = 1; | 394 | acl->dynamic_node_acl = 1; |
557 | spin_unlock_irq(&tpg->acl_node_lock); | 395 | mutex_unlock(&tpg->acl_node_mutex); |
558 | return -EEXIST; | 396 | return -EEXIST; |
559 | } | 397 | } |
560 | /* | 398 | /* |
@@ -589,10 +427,10 @@ int core_tpg_set_initiator_node_queue_depth( | |||
589 | if (init_sess) | 427 | if (init_sess) |
590 | tpg->se_tpg_tfo->close_session(init_sess); | 428 | tpg->se_tpg_tfo->close_session(init_sess); |
591 | 429 | ||
592 | spin_lock_irq(&tpg->acl_node_lock); | 430 | mutex_lock(&tpg->acl_node_mutex); |
593 | if (dynamic_acl) | 431 | if (dynamic_acl) |
594 | acl->dynamic_node_acl = 1; | 432 | acl->dynamic_node_acl = 1; |
595 | spin_unlock_irq(&tpg->acl_node_lock); | 433 | mutex_unlock(&tpg->acl_node_mutex); |
596 | return -EINVAL; | 434 | return -EINVAL; |
597 | } | 435 | } |
598 | spin_unlock_irqrestore(&tpg->session_lock, flags); | 436 | spin_unlock_irqrestore(&tpg->session_lock, flags); |
@@ -608,10 +446,10 @@ int core_tpg_set_initiator_node_queue_depth( | |||
608 | initiatorname, tpg->se_tpg_tfo->get_fabric_name(), | 446 | initiatorname, tpg->se_tpg_tfo->get_fabric_name(), |
609 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); | 447 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
610 | 448 | ||
611 | spin_lock_irq(&tpg->acl_node_lock); | 449 | mutex_lock(&tpg->acl_node_mutex); |
612 | if (dynamic_acl) | 450 | if (dynamic_acl) |
613 | acl->dynamic_node_acl = 1; | 451 | acl->dynamic_node_acl = 1; |
614 | spin_unlock_irq(&tpg->acl_node_lock); | 452 | mutex_unlock(&tpg->acl_node_mutex); |
615 | 453 | ||
616 | return 0; | 454 | return 0; |
617 | } | 455 | } |
@@ -647,78 +485,54 @@ static void core_tpg_lun_ref_release(struct percpu_ref *ref) | |||
647 | complete(&lun->lun_ref_comp); | 485 | complete(&lun->lun_ref_comp); |
648 | } | 486 | } |
649 | 487 | ||
650 | static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg) | ||
651 | { | ||
652 | /* Set in core_dev_setup_virtual_lun0() */ | ||
653 | struct se_device *dev = g_lun0_dev; | ||
654 | struct se_lun *lun = &se_tpg->tpg_virt_lun0; | ||
655 | u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; | ||
656 | int ret; | ||
657 | |||
658 | lun->unpacked_lun = 0; | ||
659 | lun->lun_status = TRANSPORT_LUN_STATUS_FREE; | ||
660 | atomic_set(&lun->lun_acl_count, 0); | ||
661 | init_completion(&lun->lun_shutdown_comp); | ||
662 | INIT_LIST_HEAD(&lun->lun_acl_list); | ||
663 | spin_lock_init(&lun->lun_acl_lock); | ||
664 | spin_lock_init(&lun->lun_sep_lock); | ||
665 | init_completion(&lun->lun_ref_comp); | ||
666 | |||
667 | ret = core_tpg_add_lun(se_tpg, lun, lun_access, dev); | ||
668 | if (ret < 0) | ||
669 | return ret; | ||
670 | |||
671 | return 0; | ||
672 | } | ||
673 | |||
674 | int core_tpg_register( | 488 | int core_tpg_register( |
675 | const struct target_core_fabric_ops *tfo, | ||
676 | struct se_wwn *se_wwn, | 489 | struct se_wwn *se_wwn, |
677 | struct se_portal_group *se_tpg, | 490 | struct se_portal_group *se_tpg, |
678 | void *tpg_fabric_ptr, | 491 | int proto_id) |
679 | int se_tpg_type) | ||
680 | { | 492 | { |
681 | struct se_lun *lun; | 493 | int ret; |
682 | u32 i; | 494 | |
683 | 495 | if (!se_tpg) | |
684 | se_tpg->tpg_lun_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG, | 496 | return -EINVAL; |
685 | sizeof(struct se_lun), GFP_KERNEL); | 497 | /* |
686 | if (!se_tpg->tpg_lun_list) { | 498 | * For the typical case where core_tpg_register() is called by a |
687 | pr_err("Unable to allocate struct se_portal_group->" | 499 | * fabric driver from target_core_fabric_ops->fabric_make_tpg() |
688 | "tpg_lun_list\n"); | 500 | * configfs context, use the original tf_ops pointer already saved |
689 | return -ENOMEM; | 501 | * by target-core in target_fabric_make_wwn(). |
690 | } | 502 | * |
503 | * Otherwise, for special cases like iscsi-target discovery TPGs | ||
504 | * the caller is responsible for setting ->se_tpg_tfo ahead of | ||
505 | * calling core_tpg_register(). | ||
506 | */ | ||
507 | if (se_wwn) | ||
508 | se_tpg->se_tpg_tfo = se_wwn->wwn_tf->tf_ops; | ||
691 | 509 | ||
692 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | 510 | if (!se_tpg->se_tpg_tfo) { |
693 | lun = se_tpg->tpg_lun_list[i]; | 511 | pr_err("Unable to locate se_tpg->se_tpg_tfo pointer\n"); |
694 | lun->unpacked_lun = i; | 512 | return -EINVAL; |
695 | lun->lun_link_magic = SE_LUN_LINK_MAGIC; | ||
696 | lun->lun_status = TRANSPORT_LUN_STATUS_FREE; | ||
697 | atomic_set(&lun->lun_acl_count, 0); | ||
698 | init_completion(&lun->lun_shutdown_comp); | ||
699 | INIT_LIST_HEAD(&lun->lun_acl_list); | ||
700 | spin_lock_init(&lun->lun_acl_lock); | ||
701 | spin_lock_init(&lun->lun_sep_lock); | ||
702 | init_completion(&lun->lun_ref_comp); | ||
703 | } | 513 | } |
704 | 514 | ||
705 | se_tpg->se_tpg_type = se_tpg_type; | 515 | INIT_HLIST_HEAD(&se_tpg->tpg_lun_hlist); |
706 | se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr; | 516 | se_tpg->proto_id = proto_id; |
707 | se_tpg->se_tpg_tfo = tfo; | ||
708 | se_tpg->se_tpg_wwn = se_wwn; | 517 | se_tpg->se_tpg_wwn = se_wwn; |
709 | atomic_set(&se_tpg->tpg_pr_ref_count, 0); | 518 | atomic_set(&se_tpg->tpg_pr_ref_count, 0); |
710 | INIT_LIST_HEAD(&se_tpg->acl_node_list); | 519 | INIT_LIST_HEAD(&se_tpg->acl_node_list); |
711 | INIT_LIST_HEAD(&se_tpg->se_tpg_node); | 520 | INIT_LIST_HEAD(&se_tpg->se_tpg_node); |
712 | INIT_LIST_HEAD(&se_tpg->tpg_sess_list); | 521 | INIT_LIST_HEAD(&se_tpg->tpg_sess_list); |
713 | spin_lock_init(&se_tpg->acl_node_lock); | ||
714 | spin_lock_init(&se_tpg->session_lock); | 522 | spin_lock_init(&se_tpg->session_lock); |
715 | spin_lock_init(&se_tpg->tpg_lun_lock); | 523 | mutex_init(&se_tpg->tpg_lun_mutex); |
716 | 524 | mutex_init(&se_tpg->acl_node_mutex); | |
717 | if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) { | 525 | |
718 | if (core_tpg_setup_virtual_lun0(se_tpg) < 0) { | 526 | if (se_tpg->proto_id >= 0) { |
719 | array_free(se_tpg->tpg_lun_list, | 527 | se_tpg->tpg_virt_lun0 = core_tpg_alloc_lun(se_tpg, 0); |
720 | TRANSPORT_MAX_LUNS_PER_TPG); | 528 | if (IS_ERR(se_tpg->tpg_virt_lun0)) |
721 | return -ENOMEM; | 529 | return PTR_ERR(se_tpg->tpg_virt_lun0); |
530 | |||
531 | ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0, | ||
532 | TRANSPORT_LUNFLAGS_READ_ONLY, g_lun0_dev); | ||
533 | if (ret < 0) { | ||
534 | kfree(se_tpg->tpg_virt_lun0); | ||
535 | return ret; | ||
722 | } | 536 | } |
723 | } | 537 | } |
724 | 538 | ||
@@ -726,11 +540,11 @@ int core_tpg_register( | |||
726 | list_add_tail(&se_tpg->se_tpg_node, &tpg_list); | 540 | list_add_tail(&se_tpg->se_tpg_node, &tpg_list); |
727 | spin_unlock_bh(&tpg_lock); | 541 | spin_unlock_bh(&tpg_lock); |
728 | 542 | ||
729 | pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for" | 543 | pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, " |
730 | " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(), | 544 | "Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->get_fabric_name(), |
731 | (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? | 545 | se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) ? |
732 | "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ? | 546 | se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) : NULL, |
733 | "None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg)); | 547 | se_tpg->proto_id, se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); |
734 | 548 | ||
735 | return 0; | 549 | return 0; |
736 | } | 550 | } |
@@ -738,14 +552,14 @@ EXPORT_SYMBOL(core_tpg_register); | |||
738 | 552 | ||
739 | int core_tpg_deregister(struct se_portal_group *se_tpg) | 553 | int core_tpg_deregister(struct se_portal_group *se_tpg) |
740 | { | 554 | { |
555 | const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo; | ||
741 | struct se_node_acl *nacl, *nacl_tmp; | 556 | struct se_node_acl *nacl, *nacl_tmp; |
557 | LIST_HEAD(node_list); | ||
742 | 558 | ||
743 | pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group" | 559 | pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, " |
744 | " for endpoint: %s Portal Tag %u\n", | 560 | "Proto: %d, Portal Tag: %u\n", tfo->get_fabric_name(), |
745 | (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? | 561 | tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL, |
746 | "Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(), | 562 | se_tpg->proto_id, tfo->tpg_get_tag(se_tpg)); |
747 | se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg), | ||
748 | se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); | ||
749 | 563 | ||
750 | spin_lock_bh(&tpg_lock); | 564 | spin_lock_bh(&tpg_lock); |
751 | list_del(&se_tpg->se_tpg_node); | 565 | list_del(&se_tpg->se_tpg_node); |
@@ -753,61 +567,56 @@ int core_tpg_deregister(struct se_portal_group *se_tpg) | |||
753 | 567 | ||
754 | while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0) | 568 | while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0) |
755 | cpu_relax(); | 569 | cpu_relax(); |
570 | |||
571 | mutex_lock(&se_tpg->acl_node_mutex); | ||
572 | list_splice_init(&se_tpg->acl_node_list, &node_list); | ||
573 | mutex_unlock(&se_tpg->acl_node_mutex); | ||
756 | /* | 574 | /* |
757 | * Release any remaining demo-mode generated se_node_acl that have | 575 | * Release any remaining demo-mode generated se_node_acl that have |
758 | * not been released because of TFO->tpg_check_demo_mode_cache() == 1 | 576 | * not been released because of TFO->tpg_check_demo_mode_cache() == 1 |
759 | * in transport_deregister_session(). | 577 | * in transport_deregister_session(). |
760 | */ | 578 | */ |
761 | spin_lock_irq(&se_tpg->acl_node_lock); | 579 | list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) { |
762 | list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list, | ||
763 | acl_list) { | ||
764 | list_del(&nacl->acl_list); | 580 | list_del(&nacl->acl_list); |
765 | se_tpg->num_node_acls--; | 581 | se_tpg->num_node_acls--; |
766 | spin_unlock_irq(&se_tpg->acl_node_lock); | ||
767 | 582 | ||
768 | core_tpg_wait_for_nacl_pr_ref(nacl); | 583 | core_tpg_wait_for_nacl_pr_ref(nacl); |
769 | core_free_device_list_for_node(nacl, se_tpg); | 584 | core_free_device_list_for_node(nacl, se_tpg); |
770 | se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl); | 585 | kfree(nacl); |
771 | |||
772 | spin_lock_irq(&se_tpg->acl_node_lock); | ||
773 | } | 586 | } |
774 | spin_unlock_irq(&se_tpg->acl_node_lock); | ||
775 | 587 | ||
776 | if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) | 588 | if (se_tpg->proto_id >= 0) { |
777 | core_tpg_remove_lun(se_tpg, &se_tpg->tpg_virt_lun0); | 589 | core_tpg_remove_lun(se_tpg, se_tpg->tpg_virt_lun0); |
590 | kfree_rcu(se_tpg->tpg_virt_lun0, rcu_head); | ||
591 | } | ||
778 | 592 | ||
779 | se_tpg->se_tpg_fabric_ptr = NULL; | ||
780 | array_free(se_tpg->tpg_lun_list, TRANSPORT_MAX_LUNS_PER_TPG); | ||
781 | return 0; | 593 | return 0; |
782 | } | 594 | } |
783 | EXPORT_SYMBOL(core_tpg_deregister); | 595 | EXPORT_SYMBOL(core_tpg_deregister); |
784 | 596 | ||
785 | struct se_lun *core_tpg_alloc_lun( | 597 | struct se_lun *core_tpg_alloc_lun( |
786 | struct se_portal_group *tpg, | 598 | struct se_portal_group *tpg, |
787 | u32 unpacked_lun) | 599 | u64 unpacked_lun) |
788 | { | 600 | { |
789 | struct se_lun *lun; | 601 | struct se_lun *lun; |
790 | 602 | ||
791 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { | 603 | lun = kzalloc(sizeof(*lun), GFP_KERNEL); |
792 | pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG" | 604 | if (!lun) { |
793 | "-1: %u for Target Portal Group: %u\n", | 605 | pr_err("Unable to allocate se_lun memory\n"); |
794 | tpg->se_tpg_tfo->get_fabric_name(), | 606 | return ERR_PTR(-ENOMEM); |
795 | unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1, | ||
796 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); | ||
797 | return ERR_PTR(-EOVERFLOW); | ||
798 | } | ||
799 | |||
800 | spin_lock(&tpg->tpg_lun_lock); | ||
801 | lun = tpg->tpg_lun_list[unpacked_lun]; | ||
802 | if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) { | ||
803 | pr_err("TPG Logical Unit Number: %u is already active" | ||
804 | " on %s Target Portal Group: %u, ignoring request.\n", | ||
805 | unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(), | ||
806 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); | ||
807 | spin_unlock(&tpg->tpg_lun_lock); | ||
808 | return ERR_PTR(-EINVAL); | ||
809 | } | 607 | } |
810 | spin_unlock(&tpg->tpg_lun_lock); | 608 | lun->unpacked_lun = unpacked_lun; |
609 | lun->lun_link_magic = SE_LUN_LINK_MAGIC; | ||
610 | atomic_set(&lun->lun_acl_count, 0); | ||
611 | init_completion(&lun->lun_ref_comp); | ||
612 | INIT_LIST_HEAD(&lun->lun_deve_list); | ||
613 | INIT_LIST_HEAD(&lun->lun_dev_link); | ||
614 | atomic_set(&lun->lun_tg_pt_secondary_offline, 0); | ||
615 | spin_lock_init(&lun->lun_deve_lock); | ||
616 | mutex_init(&lun->lun_tg_pt_md_mutex); | ||
617 | INIT_LIST_HEAD(&lun->lun_tg_pt_gp_link); | ||
618 | spin_lock_init(&lun->lun_tg_pt_gp_lock); | ||
619 | lun->lun_tpg = tpg; | ||
811 | 620 | ||
812 | return lun; | 621 | return lun; |
813 | } | 622 | } |
@@ -823,34 +632,70 @@ int core_tpg_add_lun( | |||
823 | ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0, | 632 | ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0, |
824 | GFP_KERNEL); | 633 | GFP_KERNEL); |
825 | if (ret < 0) | 634 | if (ret < 0) |
826 | return ret; | 635 | goto out; |
827 | 636 | ||
828 | ret = core_dev_export(dev, tpg, lun); | 637 | ret = core_alloc_rtpi(lun, dev); |
829 | if (ret < 0) { | 638 | if (ret) |
830 | percpu_ref_exit(&lun->lun_ref); | 639 | goto out_kill_ref; |
831 | return ret; | 640 | |
832 | } | 641 | if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) && |
642 | !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) | ||
643 | target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp); | ||
644 | |||
645 | mutex_lock(&tpg->tpg_lun_mutex); | ||
646 | |||
647 | spin_lock(&dev->se_port_lock); | ||
648 | lun->lun_index = dev->dev_index; | ||
649 | rcu_assign_pointer(lun->lun_se_dev, dev); | ||
650 | dev->export_count++; | ||
651 | list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list); | ||
652 | spin_unlock(&dev->se_port_lock); | ||
833 | 653 | ||
834 | spin_lock(&tpg->tpg_lun_lock); | ||
835 | lun->lun_access = lun_access; | 654 | lun->lun_access = lun_access; |
836 | lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE; | 655 | if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) |
837 | spin_unlock(&tpg->tpg_lun_lock); | 656 | hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist); |
657 | mutex_unlock(&tpg->tpg_lun_mutex); | ||
838 | 658 | ||
839 | return 0; | 659 | return 0; |
660 | |||
661 | out_kill_ref: | ||
662 | percpu_ref_exit(&lun->lun_ref); | ||
663 | out: | ||
664 | return ret; | ||
840 | } | 665 | } |
841 | 666 | ||
842 | void core_tpg_remove_lun( | 667 | void core_tpg_remove_lun( |
843 | struct se_portal_group *tpg, | 668 | struct se_portal_group *tpg, |
844 | struct se_lun *lun) | 669 | struct se_lun *lun) |
845 | { | 670 | { |
671 | /* | ||
672 | * rcu_dereference_raw protected by se_lun->lun_group symlink | ||
673 | * reference to se_device->dev_group. | ||
674 | */ | ||
675 | struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); | ||
676 | |||
846 | core_clear_lun_from_tpg(lun, tpg); | 677 | core_clear_lun_from_tpg(lun, tpg); |
678 | /* | ||
679 | * Wait for any active I/O references to percpu se_lun->lun_ref to | ||
680 | * be released. Also, se_lun->lun_ref is now used by PR and ALUA | ||
681 | * logic when referencing a remote target port during ALL_TGT_PT=1 | ||
682 | * and generating UNIT_ATTENTIONs for ALUA access state transition. | ||
683 | */ | ||
847 | transport_clear_lun_ref(lun); | 684 | transport_clear_lun_ref(lun); |
848 | 685 | ||
849 | core_dev_unexport(lun->lun_se_dev, tpg, lun); | 686 | mutex_lock(&tpg->tpg_lun_mutex); |
687 | if (lun->lun_se_dev) { | ||
688 | target_detach_tg_pt_gp(lun); | ||
850 | 689 | ||
851 | spin_lock(&tpg->tpg_lun_lock); | 690 | spin_lock(&dev->se_port_lock); |
852 | lun->lun_status = TRANSPORT_LUN_STATUS_FREE; | 691 | list_del(&lun->lun_dev_link); |
853 | spin_unlock(&tpg->tpg_lun_lock); | 692 | dev->export_count--; |
693 | rcu_assign_pointer(lun->lun_se_dev, NULL); | ||
694 | spin_unlock(&dev->se_port_lock); | ||
695 | } | ||
696 | if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) | ||
697 | hlist_del_rcu(&lun->link); | ||
698 | mutex_unlock(&tpg->tpg_lun_mutex); | ||
854 | 699 | ||
855 | percpu_ref_exit(&lun->lun_ref); | 700 | percpu_ref_exit(&lun->lun_ref); |
856 | } | 701 | } |