diff options
68 files changed, 3163 insertions, 6312 deletions
diff --git a/Documentation/target/tcm_mod_builder.py b/Documentation/target/tcm_mod_builder.py index 6085e1f19c9d..949de191fcdc 100755 --- a/Documentation/target/tcm_mod_builder.py +++ b/Documentation/target/tcm_mod_builder.py | |||
@@ -50,15 +50,6 @@ def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name): | |||
50 | buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n" | 50 | buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n" |
51 | buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n" | 51 | buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n" |
52 | buf += "\n" | 52 | buf += "\n" |
53 | buf += "struct " + fabric_mod_name + "_nacl {\n" | ||
54 | buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n" | ||
55 | buf += " u64 nport_wwpn;\n" | ||
56 | buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n" | ||
57 | buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" | ||
58 | buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n" | ||
59 | buf += " struct se_node_acl se_node_acl;\n" | ||
60 | buf += "};\n" | ||
61 | buf += "\n" | ||
62 | buf += "struct " + fabric_mod_name + "_tpg {\n" | 53 | buf += "struct " + fabric_mod_name + "_tpg {\n" |
63 | buf += " /* FC lport target portal group tag for TCM */\n" | 54 | buf += " /* FC lport target portal group tag for TCM */\n" |
64 | buf += " u16 lport_tpgt;\n" | 55 | buf += " u16 lport_tpgt;\n" |
@@ -69,8 +60,6 @@ def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name): | |||
69 | buf += "};\n" | 60 | buf += "};\n" |
70 | buf += "\n" | 61 | buf += "\n" |
71 | buf += "struct " + fabric_mod_name + "_lport {\n" | 62 | buf += "struct " + fabric_mod_name + "_lport {\n" |
72 | buf += " /* SCSI protocol the lport is providing */\n" | ||
73 | buf += " u8 lport_proto_id;\n" | ||
74 | buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n" | 63 | buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n" |
75 | buf += " u64 lport_wwpn;\n" | 64 | buf += " u64 lport_wwpn;\n" |
76 | buf += " /* ASCII formatted WWPN for FC Target Lport */\n" | 65 | buf += " /* ASCII formatted WWPN for FC Target Lport */\n" |
@@ -105,14 +94,6 @@ def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name): | |||
105 | buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n" | 94 | buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n" |
106 | buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n" | 95 | buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n" |
107 | buf += "\n" | 96 | buf += "\n" |
108 | buf += "struct " + fabric_mod_name + "_nacl {\n" | ||
109 | buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n" | ||
110 | buf += " u64 iport_wwpn;\n" | ||
111 | buf += " /* ASCII formatted WWPN for Sas Initiator port */\n" | ||
112 | buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" | ||
113 | buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n" | ||
114 | buf += " struct se_node_acl se_node_acl;\n" | ||
115 | buf += "};\n\n" | ||
116 | buf += "struct " + fabric_mod_name + "_tpg {\n" | 97 | buf += "struct " + fabric_mod_name + "_tpg {\n" |
117 | buf += " /* SAS port target portal group tag for TCM */\n" | 98 | buf += " /* SAS port target portal group tag for TCM */\n" |
118 | buf += " u16 tport_tpgt;\n" | 99 | buf += " u16 tport_tpgt;\n" |
@@ -122,8 +103,6 @@ def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name): | |||
122 | buf += " struct se_portal_group se_tpg;\n" | 103 | buf += " struct se_portal_group se_tpg;\n" |
123 | buf += "};\n\n" | 104 | buf += "};\n\n" |
124 | buf += "struct " + fabric_mod_name + "_tport {\n" | 105 | buf += "struct " + fabric_mod_name + "_tport {\n" |
125 | buf += " /* SCSI protocol the tport is providing */\n" | ||
126 | buf += " u8 tport_proto_id;\n" | ||
127 | buf += " /* Binary World Wide unique Port Name for SAS Target port */\n" | 106 | buf += " /* Binary World Wide unique Port Name for SAS Target port */\n" |
128 | buf += " u64 tport_wwpn;\n" | 107 | buf += " u64 tport_wwpn;\n" |
129 | buf += " /* ASCII formatted WWPN for SAS Target port */\n" | 108 | buf += " /* ASCII formatted WWPN for SAS Target port */\n" |
@@ -158,12 +137,6 @@ def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name): | |||
158 | buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n" | 137 | buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n" |
159 | buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n" | 138 | buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n" |
160 | buf += "\n" | 139 | buf += "\n" |
161 | buf += "struct " + fabric_mod_name + "_nacl {\n" | ||
162 | buf += " /* ASCII formatted InitiatorName */\n" | ||
163 | buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" | ||
164 | buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n" | ||
165 | buf += " struct se_node_acl se_node_acl;\n" | ||
166 | buf += "};\n\n" | ||
167 | buf += "struct " + fabric_mod_name + "_tpg {\n" | 140 | buf += "struct " + fabric_mod_name + "_tpg {\n" |
168 | buf += " /* iSCSI target portal group tag for TCM */\n" | 141 | buf += " /* iSCSI target portal group tag for TCM */\n" |
169 | buf += " u16 tport_tpgt;\n" | 142 | buf += " u16 tport_tpgt;\n" |
@@ -173,8 +146,6 @@ def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name): | |||
173 | buf += " struct se_portal_group se_tpg;\n" | 146 | buf += " struct se_portal_group se_tpg;\n" |
174 | buf += "};\n\n" | 147 | buf += "};\n\n" |
175 | buf += "struct " + fabric_mod_name + "_tport {\n" | 148 | buf += "struct " + fabric_mod_name + "_tport {\n" |
176 | buf += " /* SCSI protocol the tport is providing */\n" | ||
177 | buf += " u8 tport_proto_id;\n" | ||
178 | buf += " /* ASCII formatted TargetName for IQN */\n" | 149 | buf += " /* ASCII formatted TargetName for IQN */\n" |
179 | buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" | 150 | buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" |
180 | buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n" | 151 | buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n" |
@@ -232,61 +203,12 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name): | |||
232 | buf += "#include <target/target_core_base.h>\n" | 203 | buf += "#include <target/target_core_base.h>\n" |
233 | buf += "#include <target/target_core_fabric.h>\n" | 204 | buf += "#include <target/target_core_fabric.h>\n" |
234 | buf += "#include <target/target_core_fabric_configfs.h>\n" | 205 | buf += "#include <target/target_core_fabric_configfs.h>\n" |
235 | buf += "#include <target/target_core_configfs.h>\n" | ||
236 | buf += "#include <target/configfs_macros.h>\n\n" | 206 | buf += "#include <target/configfs_macros.h>\n\n" |
237 | buf += "#include \"" + fabric_mod_name + "_base.h\"\n" | 207 | buf += "#include \"" + fabric_mod_name + "_base.h\"\n" |
238 | buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n" | 208 | buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n" |
239 | 209 | ||
240 | buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops;\n\n" | 210 | buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops;\n\n" |
241 | 211 | ||
242 | buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n" | ||
243 | buf += " struct se_portal_group *se_tpg,\n" | ||
244 | buf += " struct config_group *group,\n" | ||
245 | buf += " const char *name)\n" | ||
246 | buf += "{\n" | ||
247 | buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n" | ||
248 | buf += " struct " + fabric_mod_name + "_nacl *nacl;\n" | ||
249 | |||
250 | if proto_ident == "FC" or proto_ident == "SAS": | ||
251 | buf += " u64 wwpn = 0;\n" | ||
252 | |||
253 | buf += " u32 nexus_depth;\n\n" | ||
254 | buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n" | ||
255 | buf += " return ERR_PTR(-EINVAL); */\n" | ||
256 | buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n" | ||
257 | buf += " if (!se_nacl_new)\n" | ||
258 | buf += " return ERR_PTR(-ENOMEM);\n" | ||
259 | buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n" | ||
260 | buf += " nexus_depth = 1;\n" | ||
261 | buf += " /*\n" | ||
262 | buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n" | ||
263 | buf += " * when converting a NodeACL from demo mode -> explict\n" | ||
264 | buf += " */\n" | ||
265 | buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n" | ||
266 | buf += " name, nexus_depth);\n" | ||
267 | buf += " if (IS_ERR(se_nacl)) {\n" | ||
268 | buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n" | ||
269 | buf += " return se_nacl;\n" | ||
270 | buf += " }\n" | ||
271 | buf += " /*\n" | ||
272 | buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n" | ||
273 | buf += " */\n" | ||
274 | buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n" | ||
275 | |||
276 | if proto_ident == "FC" or proto_ident == "SAS": | ||
277 | buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n" | ||
278 | |||
279 | buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n" | ||
280 | buf += " return se_nacl;\n" | ||
281 | buf += "}\n\n" | ||
282 | buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n" | ||
283 | buf += "{\n" | ||
284 | buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n" | ||
285 | buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n" | ||
286 | buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n" | ||
287 | buf += " kfree(nacl);\n" | ||
288 | buf += "}\n\n" | ||
289 | |||
290 | buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n" | 212 | buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n" |
291 | buf += " struct se_wwn *wwn,\n" | 213 | buf += " struct se_wwn *wwn,\n" |
292 | buf += " struct config_group *group,\n" | 214 | buf += " struct config_group *group,\n" |
@@ -309,8 +231,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name): | |||
309 | buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n" | 231 | buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n" |
310 | buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n" | 232 | buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n" |
311 | buf += " ret = core_tpg_register(&" + fabric_mod_name + "_ops, wwn,\n" | 233 | buf += " ret = core_tpg_register(&" + fabric_mod_name + "_ops, wwn,\n" |
312 | buf += " &tpg->se_tpg, tpg,\n" | 234 | buf += " &tpg->se_tpg, SCSI_PROTOCOL_SAS);\n" |
313 | buf += " TRANSPORT_TPG_TYPE_NORMAL);\n" | ||
314 | buf += " if (ret < 0) {\n" | 235 | buf += " if (ret < 0) {\n" |
315 | buf += " kfree(tpg);\n" | 236 | buf += " kfree(tpg);\n" |
316 | buf += " return NULL;\n" | 237 | buf += " return NULL;\n" |
@@ -372,21 +293,13 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name): | |||
372 | buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n" | 293 | buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n" |
373 | buf += " .module = THIS_MODULE,\n" | 294 | buf += " .module = THIS_MODULE,\n" |
374 | buf += " .name = " + fabric_mod_name + ",\n" | 295 | buf += " .name = " + fabric_mod_name + ",\n" |
375 | buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n" | ||
376 | buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n" | 296 | buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n" |
377 | buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n" | ||
378 | buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n" | 297 | buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n" |
379 | buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n" | 298 | buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n" |
380 | buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n" | ||
381 | buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n" | ||
382 | buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n" | ||
383 | buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n" | ||
384 | buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n" | 299 | buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n" |
385 | buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n" | 300 | buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n" |
386 | buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n" | 301 | buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n" |
387 | buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n" | 302 | buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n" |
388 | buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n" | ||
389 | buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n" | ||
390 | buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n" | 303 | buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n" |
391 | buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n" | 304 | buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n" |
392 | buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n" | 305 | buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n" |
@@ -396,7 +309,6 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name): | |||
396 | buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n" | 309 | buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n" |
397 | buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n" | 310 | buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n" |
398 | buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n" | 311 | buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n" |
399 | buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n" | ||
400 | buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n" | 312 | buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n" |
401 | buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n" | 313 | buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n" |
402 | buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n" | 314 | buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n" |
@@ -409,12 +321,6 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name): | |||
409 | buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n" | 321 | buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n" |
410 | buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n" | 322 | buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n" |
411 | buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n" | 323 | buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n" |
412 | buf += " .fabric_post_link = NULL,\n" | ||
413 | buf += " .fabric_pre_unlink = NULL,\n" | ||
414 | buf += " .fabric_make_np = NULL,\n" | ||
415 | buf += " .fabric_drop_np = NULL,\n" | ||
416 | buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n" | ||
417 | buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n" | ||
418 | buf += "\n" | 324 | buf += "\n" |
419 | buf += " .tfc_wwn_attrs = " + fabric_mod_name + "_wwn_attrs;\n" | 325 | buf += " .tfc_wwn_attrs = " + fabric_mod_name + "_wwn_attrs;\n" |
420 | buf += "};\n\n" | 326 | buf += "};\n\n" |
@@ -507,7 +413,6 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name): | |||
507 | buf += "#include <scsi/scsi_proto.h>\n" | 413 | buf += "#include <scsi/scsi_proto.h>\n" |
508 | buf += "#include <target/target_core_base.h>\n" | 414 | buf += "#include <target/target_core_base.h>\n" |
509 | buf += "#include <target/target_core_fabric.h>\n" | 415 | buf += "#include <target/target_core_fabric.h>\n" |
510 | buf += "#include <target/target_core_configfs.h>\n\n" | ||
511 | buf += "#include \"" + fabric_mod_name + "_base.h\"\n" | 416 | buf += "#include \"" + fabric_mod_name + "_base.h\"\n" |
512 | buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n" | 417 | buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n" |
513 | 418 | ||
@@ -539,35 +444,6 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name): | |||
539 | bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n" | 444 | bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n" |
540 | continue | 445 | continue |
541 | 446 | ||
542 | if re.search('get_fabric_proto_ident', fo): | ||
543 | buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n" | ||
544 | buf += "{\n" | ||
545 | buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" | ||
546 | buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" | ||
547 | buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n" | ||
548 | buf += " u8 proto_id;\n\n" | ||
549 | buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n" | ||
550 | if proto_ident == "FC": | ||
551 | buf += " case SCSI_PROTOCOL_FCP:\n" | ||
552 | buf += " default:\n" | ||
553 | buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n" | ||
554 | buf += " break;\n" | ||
555 | elif proto_ident == "SAS": | ||
556 | buf += " case SCSI_PROTOCOL_SAS:\n" | ||
557 | buf += " default:\n" | ||
558 | buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n" | ||
559 | buf += " break;\n" | ||
560 | elif proto_ident == "iSCSI": | ||
561 | buf += " case SCSI_PROTOCOL_ISCSI:\n" | ||
562 | buf += " default:\n" | ||
563 | buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n" | ||
564 | buf += " break;\n" | ||
565 | |||
566 | buf += " }\n\n" | ||
567 | buf += " return proto_id;\n" | ||
568 | buf += "}\n\n" | ||
569 | bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n" | ||
570 | |||
571 | if re.search('get_wwn', fo): | 447 | if re.search('get_wwn', fo): |
572 | buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n" | 448 | buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n" |
573 | buf += "{\n" | 449 | buf += "{\n" |
@@ -587,150 +463,6 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name): | |||
587 | buf += "}\n\n" | 463 | buf += "}\n\n" |
588 | bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n" | 464 | bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n" |
589 | 465 | ||
590 | if re.search('get_default_depth', fo): | ||
591 | buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n" | ||
592 | buf += "{\n" | ||
593 | buf += " return 1;\n" | ||
594 | buf += "}\n\n" | ||
595 | bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n" | ||
596 | |||
597 | if re.search('get_pr_transport_id\)\(', fo): | ||
598 | buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n" | ||
599 | buf += " struct se_portal_group *se_tpg,\n" | ||
600 | buf += " struct se_node_acl *se_nacl,\n" | ||
601 | buf += " struct t10_pr_registration *pr_reg,\n" | ||
602 | buf += " int *format_code,\n" | ||
603 | buf += " unsigned char *buf)\n" | ||
604 | buf += "{\n" | ||
605 | buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" | ||
606 | buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" | ||
607 | buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n" | ||
608 | buf += " int ret = 0;\n\n" | ||
609 | buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n" | ||
610 | if proto_ident == "FC": | ||
611 | buf += " case SCSI_PROTOCOL_FCP:\n" | ||
612 | buf += " default:\n" | ||
613 | buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n" | ||
614 | buf += " format_code, buf);\n" | ||
615 | buf += " break;\n" | ||
616 | elif proto_ident == "SAS": | ||
617 | buf += " case SCSI_PROTOCOL_SAS:\n" | ||
618 | buf += " default:\n" | ||
619 | buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n" | ||
620 | buf += " format_code, buf);\n" | ||
621 | buf += " break;\n" | ||
622 | elif proto_ident == "iSCSI": | ||
623 | buf += " case SCSI_PROTOCOL_ISCSI:\n" | ||
624 | buf += " default:\n" | ||
625 | buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n" | ||
626 | buf += " format_code, buf);\n" | ||
627 | buf += " break;\n" | ||
628 | |||
629 | buf += " }\n\n" | ||
630 | buf += " return ret;\n" | ||
631 | buf += "}\n\n" | ||
632 | bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n" | ||
633 | bufi += " struct se_node_acl *, struct t10_pr_registration *,\n" | ||
634 | bufi += " int *, unsigned char *);\n" | ||
635 | |||
636 | if re.search('get_pr_transport_id_len\)\(', fo): | ||
637 | buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n" | ||
638 | buf += " struct se_portal_group *se_tpg,\n" | ||
639 | buf += " struct se_node_acl *se_nacl,\n" | ||
640 | buf += " struct t10_pr_registration *pr_reg,\n" | ||
641 | buf += " int *format_code)\n" | ||
642 | buf += "{\n" | ||
643 | buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" | ||
644 | buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" | ||
645 | buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n" | ||
646 | buf += " int ret = 0;\n\n" | ||
647 | buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n" | ||
648 | if proto_ident == "FC": | ||
649 | buf += " case SCSI_PROTOCOL_FCP:\n" | ||
650 | buf += " default:\n" | ||
651 | buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n" | ||
652 | buf += " format_code);\n" | ||
653 | buf += " break;\n" | ||
654 | elif proto_ident == "SAS": | ||
655 | buf += " case SCSI_PROTOCOL_SAS:\n" | ||
656 | buf += " default:\n" | ||
657 | buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n" | ||
658 | buf += " format_code);\n" | ||
659 | buf += " break;\n" | ||
660 | elif proto_ident == "iSCSI": | ||
661 | buf += " case SCSI_PROTOCOL_ISCSI:\n" | ||
662 | buf += " default:\n" | ||
663 | buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n" | ||
664 | buf += " format_code);\n" | ||
665 | buf += " break;\n" | ||
666 | |||
667 | |||
668 | buf += " }\n\n" | ||
669 | buf += " return ret;\n" | ||
670 | buf += "}\n\n" | ||
671 | bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n" | ||
672 | bufi += " struct se_node_acl *, struct t10_pr_registration *,\n" | ||
673 | bufi += " int *);\n" | ||
674 | |||
675 | if re.search('parse_pr_out_transport_id\)\(', fo): | ||
676 | buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n" | ||
677 | buf += " struct se_portal_group *se_tpg,\n" | ||
678 | buf += " const char *buf,\n" | ||
679 | buf += " u32 *out_tid_len,\n" | ||
680 | buf += " char **port_nexus_ptr)\n" | ||
681 | buf += "{\n" | ||
682 | buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" | ||
683 | buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" | ||
684 | buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n" | ||
685 | buf += " char *tid = NULL;\n\n" | ||
686 | buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n" | ||
687 | if proto_ident == "FC": | ||
688 | buf += " case SCSI_PROTOCOL_FCP:\n" | ||
689 | buf += " default:\n" | ||
690 | buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n" | ||
691 | buf += " port_nexus_ptr);\n" | ||
692 | elif proto_ident == "SAS": | ||
693 | buf += " case SCSI_PROTOCOL_SAS:\n" | ||
694 | buf += " default:\n" | ||
695 | buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n" | ||
696 | buf += " port_nexus_ptr);\n" | ||
697 | elif proto_ident == "iSCSI": | ||
698 | buf += " case SCSI_PROTOCOL_ISCSI:\n" | ||
699 | buf += " default:\n" | ||
700 | buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n" | ||
701 | buf += " port_nexus_ptr);\n" | ||
702 | |||
703 | buf += " }\n\n" | ||
704 | buf += " return tid;\n" | ||
705 | buf += "}\n\n" | ||
706 | bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n" | ||
707 | bufi += " const char *, u32 *, char **);\n" | ||
708 | |||
709 | if re.search('alloc_fabric_acl\)\(', fo): | ||
710 | buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n" | ||
711 | buf += "{\n" | ||
712 | buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n" | ||
713 | buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n" | ||
714 | buf += " if (!nacl) {\n" | ||
715 | buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n" | ||
716 | buf += " return NULL;\n" | ||
717 | buf += " }\n\n" | ||
718 | buf += " return &nacl->se_node_acl;\n" | ||
719 | buf += "}\n\n" | ||
720 | bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n" | ||
721 | |||
722 | if re.search('release_fabric_acl\)\(', fo): | ||
723 | buf += "void " + fabric_mod_name + "_release_fabric_acl(\n" | ||
724 | buf += " struct se_portal_group *se_tpg,\n" | ||
725 | buf += " struct se_node_acl *se_nacl)\n" | ||
726 | buf += "{\n" | ||
727 | buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n" | ||
728 | buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n" | ||
729 | buf += " kfree(nacl);\n" | ||
730 | buf += "}\n\n" | ||
731 | bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n" | ||
732 | bufi += " struct se_node_acl *);\n" | ||
733 | |||
734 | if re.search('tpg_get_inst_index\)\(', fo): | 466 | if re.search('tpg_get_inst_index\)\(', fo): |
735 | buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n" | 467 | buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n" |
736 | buf += "{\n" | 468 | buf += "{\n" |
@@ -787,13 +519,6 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name): | |||
787 | buf += "}\n\n" | 519 | buf += "}\n\n" |
788 | bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n" | 520 | bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n" |
789 | 521 | ||
790 | if re.search('get_task_tag\)\(', fo): | ||
791 | buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n" | ||
792 | buf += "{\n" | ||
793 | buf += " return 0;\n" | ||
794 | buf += "}\n\n" | ||
795 | bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n" | ||
796 | |||
797 | if re.search('get_cmd_state\)\(', fo): | 522 | if re.search('get_cmd_state\)\(', fo): |
798 | buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n" | 523 | buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n" |
799 | buf += "{\n" | 524 | buf += "{\n" |
diff --git a/Documentation/target/tcm_mod_builder.txt b/Documentation/target/tcm_mod_builder.txt index 84533d8e747f..ae22f7005540 100644 --- a/Documentation/target/tcm_mod_builder.txt +++ b/Documentation/target/tcm_mod_builder.txt | |||
@@ -13,8 +13,8 @@ fabric skeleton, by simply using: | |||
13 | This script will create a new drivers/target/$TCM_NEW_MOD/, and will do the following | 13 | This script will create a new drivers/target/$TCM_NEW_MOD/, and will do the following |
14 | 14 | ||
15 | *) Generate new API callers for drivers/target/target_core_fabric_configs.c logic | 15 | *) Generate new API callers for drivers/target/target_core_fabric_configs.c logic |
16 | ->make_nodeacl(), ->drop_nodeacl(), ->make_tpg(), ->drop_tpg() | 16 | ->make_tpg(), ->drop_tpg(), ->make_wwn(), ->drop_wwn(). These are created |
17 | ->make_wwn(), ->drop_wwn(). These are created into $TCM_NEW_MOD/$TCM_NEW_MOD_configfs.c | 17 | into $TCM_NEW_MOD/$TCM_NEW_MOD_configfs.c |
18 | *) Generate basic infrastructure for loading/unloading LKMs and TCM/ConfigFS fabric module | 18 | *) Generate basic infrastructure for loading/unloading LKMs and TCM/ConfigFS fabric module |
19 | using a skeleton struct target_core_fabric_ops API template. | 19 | using a skeleton struct target_core_fabric_ops API template. |
20 | *) Based on user defined T10 Proto_Ident for the new fabric module being built, | 20 | *) Based on user defined T10 Proto_Ident for the new fabric module being built, |
diff --git a/Documentation/target/tcmu-design.txt b/Documentation/target/tcmu-design.txt index 263b907517ac..bef81e42788f 100644 --- a/Documentation/target/tcmu-design.txt +++ b/Documentation/target/tcmu-design.txt | |||
@@ -152,7 +152,7 @@ overall shared memory region, not the entry. The data in/out buffers | |||
152 | are accessible via tht req.iov[] array. iov_cnt contains the number of | 152 | are accessible via tht req.iov[] array. iov_cnt contains the number of |
153 | entries in iov[] needed to describe either the Data-In or Data-Out | 153 | entries in iov[] needed to describe either the Data-In or Data-Out |
154 | buffers. For bidirectional commands, iov_cnt specifies how many iovec | 154 | buffers. For bidirectional commands, iov_cnt specifies how many iovec |
155 | entries cover the Data-Out area, and iov_bidi_count specifies how many | 155 | entries cover the Data-Out area, and iov_bidi_cnt specifies how many |
156 | iovec entries immediately after that in iov[] cover the Data-In | 156 | iovec entries immediately after that in iov[] cover the Data-In |
157 | area. Just like other fields, iov.iov_base is an offset from the start | 157 | area. Just like other fields, iov.iov_base is an offset from the start |
158 | of the region. | 158 | of the region. |
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index f3b7a34e10d8..771700963127 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c | |||
@@ -1356,7 +1356,7 @@ sequence_cmd: | |||
1356 | if (!rc && dump_payload == false && unsol_data) | 1356 | if (!rc && dump_payload == false && unsol_data) |
1357 | iscsit_set_unsoliticed_dataout(cmd); | 1357 | iscsit_set_unsoliticed_dataout(cmd); |
1358 | else if (dump_payload && imm_data) | 1358 | else if (dump_payload && imm_data) |
1359 | target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd); | 1359 | target_put_sess_cmd(&cmd->se_cmd); |
1360 | 1360 | ||
1361 | return 0; | 1361 | return 0; |
1362 | } | 1362 | } |
@@ -1781,7 +1781,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err) | |||
1781 | cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) { | 1781 | cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) { |
1782 | struct se_cmd *se_cmd = &cmd->se_cmd; | 1782 | struct se_cmd *se_cmd = &cmd->se_cmd; |
1783 | 1783 | ||
1784 | target_put_sess_cmd(se_cmd->se_sess, se_cmd); | 1784 | target_put_sess_cmd(se_cmd); |
1785 | } | 1785 | } |
1786 | } | 1786 | } |
1787 | 1787 | ||
@@ -1954,7 +1954,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc, | |||
1954 | spin_unlock_bh(&cmd->istate_lock); | 1954 | spin_unlock_bh(&cmd->istate_lock); |
1955 | 1955 | ||
1956 | if (ret) { | 1956 | if (ret) { |
1957 | target_put_sess_cmd(se_cmd->se_sess, se_cmd); | 1957 | target_put_sess_cmd(se_cmd); |
1958 | transport_send_check_condition_and_sense(se_cmd, | 1958 | transport_send_check_condition_and_sense(se_cmd, |
1959 | se_cmd->pi_err, 0); | 1959 | se_cmd->pi_err, 0); |
1960 | } else { | 1960 | } else { |
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 4556cd11288e..82897ca17f32 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c | |||
@@ -47,7 +47,6 @@ | |||
47 | #include <target/target_core_base.h> | 47 | #include <target/target_core_base.h> |
48 | #include <target/target_core_fabric_configfs.h> | 48 | #include <target/target_core_fabric_configfs.h> |
49 | #include <target/target_core_fabric.h> | 49 | #include <target/target_core_fabric.h> |
50 | #include <target/target_core_configfs.h> | ||
51 | #include "ib_srpt.h" | 50 | #include "ib_srpt.h" |
52 | 51 | ||
53 | /* Name of this kernel module. */ | 52 | /* Name of this kernel module. */ |
@@ -94,7 +93,6 @@ MODULE_PARM_DESC(srpt_service_guid, | |||
94 | " instead of using the node_guid of the first HCA."); | 93 | " instead of using the node_guid of the first HCA."); |
95 | 94 | ||
96 | static struct ib_client srpt_client; | 95 | static struct ib_client srpt_client; |
97 | static const struct target_core_fabric_ops srpt_template; | ||
98 | static void srpt_release_channel(struct srpt_rdma_ch *ch); | 96 | static void srpt_release_channel(struct srpt_rdma_ch *ch); |
99 | static int srpt_queue_status(struct se_cmd *cmd); | 97 | static int srpt_queue_status(struct se_cmd *cmd); |
100 | 98 | ||
@@ -1336,12 +1334,12 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx) | |||
1336 | 1334 | ||
1337 | BUG_ON(ch->sess == NULL); | 1335 | BUG_ON(ch->sess == NULL); |
1338 | 1336 | ||
1339 | target_put_sess_cmd(ch->sess, &ioctx->cmd); | 1337 | target_put_sess_cmd(&ioctx->cmd); |
1340 | goto out; | 1338 | goto out; |
1341 | } | 1339 | } |
1342 | 1340 | ||
1343 | pr_debug("Aborting cmd with state %d and tag %lld\n", state, | 1341 | pr_debug("Aborting cmd with state %d and tag %lld\n", state, |
1344 | ioctx->tag); | 1342 | ioctx->cmd.tag); |
1345 | 1343 | ||
1346 | switch (state) { | 1344 | switch (state) { |
1347 | case SRPT_STATE_NEW: | 1345 | case SRPT_STATE_NEW: |
@@ -1367,11 +1365,11 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx) | |||
1367 | * not been received in time. | 1365 | * not been received in time. |
1368 | */ | 1366 | */ |
1369 | srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx); | 1367 | srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx); |
1370 | target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd); | 1368 | target_put_sess_cmd(&ioctx->cmd); |
1371 | break; | 1369 | break; |
1372 | case SRPT_STATE_MGMT_RSP_SENT: | 1370 | case SRPT_STATE_MGMT_RSP_SENT: |
1373 | srpt_set_cmd_state(ioctx, SRPT_STATE_DONE); | 1371 | srpt_set_cmd_state(ioctx, SRPT_STATE_DONE); |
1374 | target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd); | 1372 | target_put_sess_cmd(&ioctx->cmd); |
1375 | break; | 1373 | break; |
1376 | default: | 1374 | default: |
1377 | WARN(1, "Unexpected command state (%d)", state); | 1375 | WARN(1, "Unexpected command state (%d)", state); |
@@ -1389,7 +1387,6 @@ static void srpt_handle_send_err_comp(struct srpt_rdma_ch *ch, u64 wr_id) | |||
1389 | { | 1387 | { |
1390 | struct srpt_send_ioctx *ioctx; | 1388 | struct srpt_send_ioctx *ioctx; |
1391 | enum srpt_command_state state; | 1389 | enum srpt_command_state state; |
1392 | struct se_cmd *cmd; | ||
1393 | u32 index; | 1390 | u32 index; |
1394 | 1391 | ||
1395 | atomic_inc(&ch->sq_wr_avail); | 1392 | atomic_inc(&ch->sq_wr_avail); |
@@ -1397,7 +1394,6 @@ static void srpt_handle_send_err_comp(struct srpt_rdma_ch *ch, u64 wr_id) | |||
1397 | index = idx_from_wr_id(wr_id); | 1394 | index = idx_from_wr_id(wr_id); |
1398 | ioctx = ch->ioctx_ring[index]; | 1395 | ioctx = ch->ioctx_ring[index]; |
1399 | state = srpt_get_cmd_state(ioctx); | 1396 | state = srpt_get_cmd_state(ioctx); |
1400 | cmd = &ioctx->cmd; | ||
1401 | 1397 | ||
1402 | WARN_ON(state != SRPT_STATE_CMD_RSP_SENT | 1398 | WARN_ON(state != SRPT_STATE_CMD_RSP_SENT |
1403 | && state != SRPT_STATE_MGMT_RSP_SENT | 1399 | && state != SRPT_STATE_MGMT_RSP_SENT |
@@ -1474,10 +1470,8 @@ static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch, | |||
1474 | struct srpt_send_ioctx *ioctx, | 1470 | struct srpt_send_ioctx *ioctx, |
1475 | enum srpt_opcode opcode) | 1471 | enum srpt_opcode opcode) |
1476 | { | 1472 | { |
1477 | struct se_cmd *cmd; | ||
1478 | enum srpt_command_state state; | 1473 | enum srpt_command_state state; |
1479 | 1474 | ||
1480 | cmd = &ioctx->cmd; | ||
1481 | state = srpt_get_cmd_state(ioctx); | 1475 | state = srpt_get_cmd_state(ioctx); |
1482 | switch (opcode) { | 1476 | switch (opcode) { |
1483 | case SRPT_RDMA_READ_LAST: | 1477 | case SRPT_RDMA_READ_LAST: |
@@ -1681,7 +1675,7 @@ static int srpt_check_stop_free(struct se_cmd *cmd) | |||
1681 | struct srpt_send_ioctx *ioctx = container_of(cmd, | 1675 | struct srpt_send_ioctx *ioctx = container_of(cmd, |
1682 | struct srpt_send_ioctx, cmd); | 1676 | struct srpt_send_ioctx, cmd); |
1683 | 1677 | ||
1684 | return target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd); | 1678 | return target_put_sess_cmd(&ioctx->cmd); |
1685 | } | 1679 | } |
1686 | 1680 | ||
1687 | /** | 1681 | /** |
@@ -1703,7 +1697,7 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch, | |||
1703 | 1697 | ||
1704 | srp_cmd = recv_ioctx->ioctx.buf; | 1698 | srp_cmd = recv_ioctx->ioctx.buf; |
1705 | cmd = &send_ioctx->cmd; | 1699 | cmd = &send_ioctx->cmd; |
1706 | send_ioctx->tag = srp_cmd->tag; | 1700 | cmd->tag = srp_cmd->tag; |
1707 | 1701 | ||
1708 | switch (srp_cmd->task_attr) { | 1702 | switch (srp_cmd->task_attr) { |
1709 | case SRP_CMD_SIMPLE_Q: | 1703 | case SRP_CMD_SIMPLE_Q: |
@@ -1774,7 +1768,7 @@ static int srpt_rx_mgmt_fn_tag(struct srpt_send_ioctx *ioctx, u64 tag) | |||
1774 | for (i = 0; i < ch->rq_size; ++i) { | 1768 | for (i = 0; i < ch->rq_size; ++i) { |
1775 | target = ch->ioctx_ring[i]; | 1769 | target = ch->ioctx_ring[i]; |
1776 | if (target->cmd.se_lun == ioctx->cmd.se_lun && | 1770 | if (target->cmd.se_lun == ioctx->cmd.se_lun && |
1777 | target->tag == tag && | 1771 | target->cmd.tag == tag && |
1778 | srpt_get_cmd_state(target) != SRPT_STATE_DONE) { | 1772 | srpt_get_cmd_state(target) != SRPT_STATE_DONE) { |
1779 | ret = 0; | 1773 | ret = 0; |
1780 | /* now let the target core abort &target->cmd; */ | 1774 | /* now let the target core abort &target->cmd; */ |
@@ -1833,7 +1827,7 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch, | |||
1833 | srp_tsk->task_tag, srp_tsk->tag, ch->cm_id, ch->sess); | 1827 | srp_tsk->task_tag, srp_tsk->tag, ch->cm_id, ch->sess); |
1834 | 1828 | ||
1835 | srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT); | 1829 | srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT); |
1836 | send_ioctx->tag = srp_tsk->tag; | 1830 | send_ioctx->cmd.tag = srp_tsk->tag; |
1837 | tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func); | 1831 | tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func); |
1838 | if (tcm_tmr < 0) { | 1832 | if (tcm_tmr < 0) { |
1839 | send_ioctx->cmd.se_tmr_req->response = | 1833 | send_ioctx->cmd.se_tmr_req->response = |
@@ -2180,12 +2174,9 @@ static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch) | |||
2180 | */ | 2174 | */ |
2181 | static void __srpt_close_ch(struct srpt_rdma_ch *ch) | 2175 | static void __srpt_close_ch(struct srpt_rdma_ch *ch) |
2182 | { | 2176 | { |
2183 | struct srpt_device *sdev; | ||
2184 | enum rdma_ch_state prev_state; | 2177 | enum rdma_ch_state prev_state; |
2185 | unsigned long flags; | 2178 | unsigned long flags; |
2186 | 2179 | ||
2187 | sdev = ch->sport->sdev; | ||
2188 | |||
2189 | spin_lock_irqsave(&ch->spinlock, flags); | 2180 | spin_lock_irqsave(&ch->spinlock, flags); |
2190 | prev_state = ch->state; | 2181 | prev_state = ch->state; |
2191 | switch (prev_state) { | 2182 | switch (prev_state) { |
@@ -2983,7 +2974,7 @@ static int srpt_write_pending(struct se_cmd *se_cmd) | |||
2983 | case CH_DRAINING: | 2974 | case CH_DRAINING: |
2984 | case CH_RELEASING: | 2975 | case CH_RELEASING: |
2985 | pr_debug("cmd with tag %lld: channel disconnecting\n", | 2976 | pr_debug("cmd with tag %lld: channel disconnecting\n", |
2986 | ioctx->tag); | 2977 | ioctx->cmd.tag); |
2987 | srpt_set_cmd_state(ioctx, SRPT_STATE_DATA_IN); | 2978 | srpt_set_cmd_state(ioctx, SRPT_STATE_DATA_IN); |
2988 | ret = -EINVAL; | 2979 | ret = -EINVAL; |
2989 | goto out; | 2980 | goto out; |
@@ -3058,27 +3049,27 @@ static void srpt_queue_response(struct se_cmd *cmd) | |||
3058 | ret = srpt_xfer_data(ch, ioctx); | 3049 | ret = srpt_xfer_data(ch, ioctx); |
3059 | if (ret) { | 3050 | if (ret) { |
3060 | pr_err("xfer_data failed for tag %llu\n", | 3051 | pr_err("xfer_data failed for tag %llu\n", |
3061 | ioctx->tag); | 3052 | ioctx->cmd.tag); |
3062 | return; | 3053 | return; |
3063 | } | 3054 | } |
3064 | } | 3055 | } |
3065 | 3056 | ||
3066 | if (state != SRPT_STATE_MGMT) | 3057 | if (state != SRPT_STATE_MGMT) |
3067 | resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->tag, | 3058 | resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->cmd.tag, |
3068 | cmd->scsi_status); | 3059 | cmd->scsi_status); |
3069 | else { | 3060 | else { |
3070 | srp_tm_status | 3061 | srp_tm_status |
3071 | = tcm_to_srp_tsk_mgmt_status(cmd->se_tmr_req->response); | 3062 | = tcm_to_srp_tsk_mgmt_status(cmd->se_tmr_req->response); |
3072 | resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status, | 3063 | resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status, |
3073 | ioctx->tag); | 3064 | ioctx->cmd.tag); |
3074 | } | 3065 | } |
3075 | ret = srpt_post_send(ch, ioctx, resp_len); | 3066 | ret = srpt_post_send(ch, ioctx, resp_len); |
3076 | if (ret) { | 3067 | if (ret) { |
3077 | pr_err("sending cmd response failed for tag %llu\n", | 3068 | pr_err("sending cmd response failed for tag %llu\n", |
3078 | ioctx->tag); | 3069 | ioctx->cmd.tag); |
3079 | srpt_unmap_sg_to_ib_sge(ch, ioctx); | 3070 | srpt_unmap_sg_to_ib_sge(ch, ioctx); |
3080 | srpt_set_cmd_state(ioctx, SRPT_STATE_DONE); | 3071 | srpt_set_cmd_state(ioctx, SRPT_STATE_DONE); |
3081 | target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd); | 3072 | target_put_sess_cmd(&ioctx->cmd); |
3082 | } | 3073 | } |
3083 | } | 3074 | } |
3084 | 3075 | ||
@@ -3398,11 +3389,6 @@ static char *srpt_get_fabric_name(void) | |||
3398 | return "srpt"; | 3389 | return "srpt"; |
3399 | } | 3390 | } |
3400 | 3391 | ||
3401 | static u8 srpt_get_fabric_proto_ident(struct se_portal_group *se_tpg) | ||
3402 | { | ||
3403 | return SCSI_TRANSPORTID_PROTOCOLID_SRP; | ||
3404 | } | ||
3405 | |||
3406 | static char *srpt_get_fabric_wwn(struct se_portal_group *tpg) | 3392 | static char *srpt_get_fabric_wwn(struct se_portal_group *tpg) |
3407 | { | 3393 | { |
3408 | struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1); | 3394 | struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1); |
@@ -3415,69 +3401,6 @@ static u16 srpt_get_tag(struct se_portal_group *tpg) | |||
3415 | return 1; | 3401 | return 1; |
3416 | } | 3402 | } |
3417 | 3403 | ||
3418 | static u32 srpt_get_default_depth(struct se_portal_group *se_tpg) | ||
3419 | { | ||
3420 | return 1; | ||
3421 | } | ||
3422 | |||
3423 | static u32 srpt_get_pr_transport_id(struct se_portal_group *se_tpg, | ||
3424 | struct se_node_acl *se_nacl, | ||
3425 | struct t10_pr_registration *pr_reg, | ||
3426 | int *format_code, unsigned char *buf) | ||
3427 | { | ||
3428 | struct srpt_node_acl *nacl; | ||
3429 | struct spc_rdma_transport_id *tr_id; | ||
3430 | |||
3431 | nacl = container_of(se_nacl, struct srpt_node_acl, nacl); | ||
3432 | tr_id = (void *)buf; | ||
3433 | tr_id->protocol_identifier = SCSI_TRANSPORTID_PROTOCOLID_SRP; | ||
3434 | memcpy(tr_id->i_port_id, nacl->i_port_id, sizeof(tr_id->i_port_id)); | ||
3435 | return sizeof(*tr_id); | ||
3436 | } | ||
3437 | |||
3438 | static u32 srpt_get_pr_transport_id_len(struct se_portal_group *se_tpg, | ||
3439 | struct se_node_acl *se_nacl, | ||
3440 | struct t10_pr_registration *pr_reg, | ||
3441 | int *format_code) | ||
3442 | { | ||
3443 | *format_code = 0; | ||
3444 | return sizeof(struct spc_rdma_transport_id); | ||
3445 | } | ||
3446 | |||
3447 | static char *srpt_parse_pr_out_transport_id(struct se_portal_group *se_tpg, | ||
3448 | const char *buf, u32 *out_tid_len, | ||
3449 | char **port_nexus_ptr) | ||
3450 | { | ||
3451 | struct spc_rdma_transport_id *tr_id; | ||
3452 | |||
3453 | *port_nexus_ptr = NULL; | ||
3454 | *out_tid_len = sizeof(struct spc_rdma_transport_id); | ||
3455 | tr_id = (void *)buf; | ||
3456 | return (char *)tr_id->i_port_id; | ||
3457 | } | ||
3458 | |||
3459 | static struct se_node_acl *srpt_alloc_fabric_acl(struct se_portal_group *se_tpg) | ||
3460 | { | ||
3461 | struct srpt_node_acl *nacl; | ||
3462 | |||
3463 | nacl = kzalloc(sizeof(struct srpt_node_acl), GFP_KERNEL); | ||
3464 | if (!nacl) { | ||
3465 | pr_err("Unable to allocate struct srpt_node_acl\n"); | ||
3466 | return NULL; | ||
3467 | } | ||
3468 | |||
3469 | return &nacl->nacl; | ||
3470 | } | ||
3471 | |||
3472 | static void srpt_release_fabric_acl(struct se_portal_group *se_tpg, | ||
3473 | struct se_node_acl *se_nacl) | ||
3474 | { | ||
3475 | struct srpt_node_acl *nacl; | ||
3476 | |||
3477 | nacl = container_of(se_nacl, struct srpt_node_acl, nacl); | ||
3478 | kfree(nacl); | ||
3479 | } | ||
3480 | |||
3481 | static u32 srpt_tpg_get_inst_index(struct se_portal_group *se_tpg) | 3404 | static u32 srpt_tpg_get_inst_index(struct se_portal_group *se_tpg) |
3482 | { | 3405 | { |
3483 | return 1; | 3406 | return 1; |
@@ -3551,14 +3474,6 @@ static void srpt_set_default_node_attrs(struct se_node_acl *nacl) | |||
3551 | { | 3474 | { |
3552 | } | 3475 | } |
3553 | 3476 | ||
3554 | static u32 srpt_get_task_tag(struct se_cmd *se_cmd) | ||
3555 | { | ||
3556 | struct srpt_send_ioctx *ioctx; | ||
3557 | |||
3558 | ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd); | ||
3559 | return ioctx->tag; | ||
3560 | } | ||
3561 | |||
3562 | /* Note: only used from inside debug printk's by the TCM core. */ | 3477 | /* Note: only used from inside debug printk's by the TCM core. */ |
3563 | static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd) | 3478 | static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd) |
3564 | { | 3479 | { |
@@ -3601,40 +3516,19 @@ out: | |||
3601 | * configfs callback function invoked for | 3516 | * configfs callback function invoked for |
3602 | * mkdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id | 3517 | * mkdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id |
3603 | */ | 3518 | */ |
3604 | static struct se_node_acl *srpt_make_nodeacl(struct se_portal_group *tpg, | 3519 | static int srpt_init_nodeacl(struct se_node_acl *se_nacl, const char *name) |
3605 | struct config_group *group, | ||
3606 | const char *name) | ||
3607 | { | 3520 | { |
3608 | struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1); | 3521 | struct srpt_port *sport = |
3609 | struct se_node_acl *se_nacl, *se_nacl_new; | 3522 | container_of(se_nacl->se_tpg, struct srpt_port, port_tpg_1); |
3610 | struct srpt_node_acl *nacl; | 3523 | struct srpt_node_acl *nacl = |
3611 | int ret = 0; | 3524 | container_of(se_nacl, struct srpt_node_acl, nacl); |
3612 | u32 nexus_depth = 1; | ||
3613 | u8 i_port_id[16]; | 3525 | u8 i_port_id[16]; |
3614 | 3526 | ||
3615 | if (srpt_parse_i_port_id(i_port_id, name) < 0) { | 3527 | if (srpt_parse_i_port_id(i_port_id, name) < 0) { |
3616 | pr_err("invalid initiator port ID %s\n", name); | 3528 | pr_err("invalid initiator port ID %s\n", name); |
3617 | ret = -EINVAL; | 3529 | return -EINVAL; |
3618 | goto err; | ||
3619 | } | 3530 | } |
3620 | 3531 | ||
3621 | se_nacl_new = srpt_alloc_fabric_acl(tpg); | ||
3622 | if (!se_nacl_new) { | ||
3623 | ret = -ENOMEM; | ||
3624 | goto err; | ||
3625 | } | ||
3626 | /* | ||
3627 | * nacl_new may be released by core_tpg_add_initiator_node_acl() | ||
3628 | * when converting a node ACL from demo mode to explict | ||
3629 | */ | ||
3630 | se_nacl = core_tpg_add_initiator_node_acl(tpg, se_nacl_new, name, | ||
3631 | nexus_depth); | ||
3632 | if (IS_ERR(se_nacl)) { | ||
3633 | ret = PTR_ERR(se_nacl); | ||
3634 | goto err; | ||
3635 | } | ||
3636 | /* Locate our struct srpt_node_acl and set sdev and i_port_id. */ | ||
3637 | nacl = container_of(se_nacl, struct srpt_node_acl, nacl); | ||
3638 | memcpy(&nacl->i_port_id[0], &i_port_id[0], 16); | 3532 | memcpy(&nacl->i_port_id[0], &i_port_id[0], 16); |
3639 | nacl->sport = sport; | 3533 | nacl->sport = sport; |
3640 | 3534 | ||
@@ -3642,29 +3536,22 @@ static struct se_node_acl *srpt_make_nodeacl(struct se_portal_group *tpg, | |||
3642 | list_add_tail(&nacl->list, &sport->port_acl_list); | 3536 | list_add_tail(&nacl->list, &sport->port_acl_list); |
3643 | spin_unlock_irq(&sport->port_acl_lock); | 3537 | spin_unlock_irq(&sport->port_acl_lock); |
3644 | 3538 | ||
3645 | return se_nacl; | 3539 | return 0; |
3646 | err: | ||
3647 | return ERR_PTR(ret); | ||
3648 | } | 3540 | } |
3649 | 3541 | ||
3650 | /* | 3542 | /* |
3651 | * configfs callback function invoked for | 3543 | * configfs callback function invoked for |
3652 | * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id | 3544 | * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id |
3653 | */ | 3545 | */ |
3654 | static void srpt_drop_nodeacl(struct se_node_acl *se_nacl) | 3546 | static void srpt_cleanup_nodeacl(struct se_node_acl *se_nacl) |
3655 | { | 3547 | { |
3656 | struct srpt_node_acl *nacl; | 3548 | struct srpt_node_acl *nacl = |
3657 | struct srpt_device *sdev; | 3549 | container_of(se_nacl, struct srpt_node_acl, nacl); |
3658 | struct srpt_port *sport; | 3550 | struct srpt_port *sport = nacl->sport; |
3659 | 3551 | ||
3660 | nacl = container_of(se_nacl, struct srpt_node_acl, nacl); | ||
3661 | sport = nacl->sport; | ||
3662 | sdev = sport->sdev; | ||
3663 | spin_lock_irq(&sport->port_acl_lock); | 3552 | spin_lock_irq(&sport->port_acl_lock); |
3664 | list_del(&nacl->list); | 3553 | list_del(&nacl->list); |
3665 | spin_unlock_irq(&sport->port_acl_lock); | 3554 | spin_unlock_irq(&sport->port_acl_lock); |
3666 | core_tpg_del_initiator_node_acl(&sport->port_tpg_1, se_nacl, 1); | ||
3667 | srpt_release_fabric_acl(NULL, se_nacl); | ||
3668 | } | 3555 | } |
3669 | 3556 | ||
3670 | static ssize_t srpt_tpg_attrib_show_srp_max_rdma_size( | 3557 | static ssize_t srpt_tpg_attrib_show_srp_max_rdma_size( |
@@ -3849,8 +3736,7 @@ static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn, | |||
3849 | int res; | 3736 | int res; |
3850 | 3737 | ||
3851 | /* Initialize sport->port_wwn and sport->port_tpg_1 */ | 3738 | /* Initialize sport->port_wwn and sport->port_tpg_1 */ |
3852 | res = core_tpg_register(&srpt_template, &sport->port_wwn, | 3739 | res = core_tpg_register(&sport->port_wwn, &sport->port_tpg_1, SCSI_PROTOCOL_SRP); |
3853 | &sport->port_tpg_1, sport, TRANSPORT_TPG_TYPE_NORMAL); | ||
3854 | if (res) | 3740 | if (res) |
3855 | return ERR_PTR(res); | 3741 | return ERR_PTR(res); |
3856 | 3742 | ||
@@ -3920,20 +3806,14 @@ static struct configfs_attribute *srpt_wwn_attrs[] = { | |||
3920 | static const struct target_core_fabric_ops srpt_template = { | 3806 | static const struct target_core_fabric_ops srpt_template = { |
3921 | .module = THIS_MODULE, | 3807 | .module = THIS_MODULE, |
3922 | .name = "srpt", | 3808 | .name = "srpt", |
3809 | .node_acl_size = sizeof(struct srpt_node_acl), | ||
3923 | .get_fabric_name = srpt_get_fabric_name, | 3810 | .get_fabric_name = srpt_get_fabric_name, |
3924 | .get_fabric_proto_ident = srpt_get_fabric_proto_ident, | ||
3925 | .tpg_get_wwn = srpt_get_fabric_wwn, | 3811 | .tpg_get_wwn = srpt_get_fabric_wwn, |
3926 | .tpg_get_tag = srpt_get_tag, | 3812 | .tpg_get_tag = srpt_get_tag, |
3927 | .tpg_get_default_depth = srpt_get_default_depth, | ||
3928 | .tpg_get_pr_transport_id = srpt_get_pr_transport_id, | ||
3929 | .tpg_get_pr_transport_id_len = srpt_get_pr_transport_id_len, | ||
3930 | .tpg_parse_pr_out_transport_id = srpt_parse_pr_out_transport_id, | ||
3931 | .tpg_check_demo_mode = srpt_check_false, | 3813 | .tpg_check_demo_mode = srpt_check_false, |
3932 | .tpg_check_demo_mode_cache = srpt_check_true, | 3814 | .tpg_check_demo_mode_cache = srpt_check_true, |
3933 | .tpg_check_demo_mode_write_protect = srpt_check_true, | 3815 | .tpg_check_demo_mode_write_protect = srpt_check_true, |
3934 | .tpg_check_prod_mode_write_protect = srpt_check_false, | 3816 | .tpg_check_prod_mode_write_protect = srpt_check_false, |
3935 | .tpg_alloc_fabric_acl = srpt_alloc_fabric_acl, | ||
3936 | .tpg_release_fabric_acl = srpt_release_fabric_acl, | ||
3937 | .tpg_get_inst_index = srpt_tpg_get_inst_index, | 3817 | .tpg_get_inst_index = srpt_tpg_get_inst_index, |
3938 | .release_cmd = srpt_release_cmd, | 3818 | .release_cmd = srpt_release_cmd, |
3939 | .check_stop_free = srpt_check_stop_free, | 3819 | .check_stop_free = srpt_check_stop_free, |
@@ -3944,7 +3824,6 @@ static const struct target_core_fabric_ops srpt_template = { | |||
3944 | .write_pending = srpt_write_pending, | 3824 | .write_pending = srpt_write_pending, |
3945 | .write_pending_status = srpt_write_pending_status, | 3825 | .write_pending_status = srpt_write_pending_status, |
3946 | .set_default_node_attributes = srpt_set_default_node_attrs, | 3826 | .set_default_node_attributes = srpt_set_default_node_attrs, |
3947 | .get_task_tag = srpt_get_task_tag, | ||
3948 | .get_cmd_state = srpt_get_tcm_cmd_state, | 3827 | .get_cmd_state = srpt_get_tcm_cmd_state, |
3949 | .queue_data_in = srpt_queue_data_in, | 3828 | .queue_data_in = srpt_queue_data_in, |
3950 | .queue_status = srpt_queue_status, | 3829 | .queue_status = srpt_queue_status, |
@@ -3958,12 +3837,8 @@ static const struct target_core_fabric_ops srpt_template = { | |||
3958 | .fabric_drop_wwn = srpt_drop_tport, | 3837 | .fabric_drop_wwn = srpt_drop_tport, |
3959 | .fabric_make_tpg = srpt_make_tpg, | 3838 | .fabric_make_tpg = srpt_make_tpg, |
3960 | .fabric_drop_tpg = srpt_drop_tpg, | 3839 | .fabric_drop_tpg = srpt_drop_tpg, |
3961 | .fabric_post_link = NULL, | 3840 | .fabric_init_nodeacl = srpt_init_nodeacl, |
3962 | .fabric_pre_unlink = NULL, | 3841 | .fabric_cleanup_nodeacl = srpt_cleanup_nodeacl, |
3963 | .fabric_make_np = NULL, | ||
3964 | .fabric_drop_np = NULL, | ||
3965 | .fabric_make_nodeacl = srpt_make_nodeacl, | ||
3966 | .fabric_drop_nodeacl = srpt_drop_nodeacl, | ||
3967 | 3842 | ||
3968 | .tfc_wwn_attrs = srpt_wwn_attrs, | 3843 | .tfc_wwn_attrs = srpt_wwn_attrs, |
3969 | .tfc_tpg_base_attrs = srpt_tpg_attrs, | 3844 | .tfc_tpg_base_attrs = srpt_tpg_attrs, |
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h index d85c0c205625..21f8df67522a 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.h +++ b/drivers/infiniband/ulp/srpt/ib_srpt.h | |||
@@ -238,7 +238,6 @@ struct srpt_send_ioctx { | |||
238 | bool rdma_aborted; | 238 | bool rdma_aborted; |
239 | struct se_cmd cmd; | 239 | struct se_cmd cmd; |
240 | struct completion tx_done; | 240 | struct completion tx_done; |
241 | u64 tag; | ||
242 | int sg_cnt; | 241 | int sg_cnt; |
243 | int mapped_sg_count; | 242 | int mapped_sg_count; |
244 | u16 n_rdma_ius; | 243 | u16 n_rdma_ius; |
@@ -410,34 +409,16 @@ struct srpt_device { | |||
410 | 409 | ||
411 | /** | 410 | /** |
412 | * struct srpt_node_acl - Per-initiator ACL data (managed via configfs). | 411 | * struct srpt_node_acl - Per-initiator ACL data (managed via configfs). |
412 | * @nacl: Target core node ACL information. | ||
413 | * @i_port_id: 128-bit SRP initiator port ID. | 413 | * @i_port_id: 128-bit SRP initiator port ID. |
414 | * @sport: port information. | 414 | * @sport: port information. |
415 | * @nacl: Target core node ACL information. | ||
416 | * @list: Element of the per-HCA ACL list. | 415 | * @list: Element of the per-HCA ACL list. |
417 | */ | 416 | */ |
418 | struct srpt_node_acl { | 417 | struct srpt_node_acl { |
418 | struct se_node_acl nacl; | ||
419 | u8 i_port_id[16]; | 419 | u8 i_port_id[16]; |
420 | struct srpt_port *sport; | 420 | struct srpt_port *sport; |
421 | struct se_node_acl nacl; | ||
422 | struct list_head list; | 421 | struct list_head list; |
423 | }; | 422 | }; |
424 | 423 | ||
425 | /* | ||
426 | * SRP-releated SCSI persistent reservation definitions. | ||
427 | * | ||
428 | * See also SPC4r28, section 7.6.1 (Protocol specific parameters introduction). | ||
429 | * See also SPC4r28, section 7.6.4.5 (TransportID for initiator ports using | ||
430 | * SCSI over an RDMA interface). | ||
431 | */ | ||
432 | |||
433 | enum { | ||
434 | SCSI_TRANSPORTID_PROTOCOLID_SRP = 4, | ||
435 | }; | ||
436 | |||
437 | struct spc_rdma_transport_id { | ||
438 | uint8_t protocol_identifier; | ||
439 | uint8_t reserved[7]; | ||
440 | uint8_t i_port_id[16]; | ||
441 | }; | ||
442 | |||
443 | #endif /* IB_SRPT_H */ | 424 | #endif /* IB_SRPT_H */ |
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 4a484d60be0d..b749026aa592 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c | |||
@@ -1191,7 +1191,7 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, | |||
1191 | list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) { | 1191 | list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) { |
1192 | struct qla_tgt_cmd *cmd = | 1192 | struct qla_tgt_cmd *cmd = |
1193 | container_of(se_cmd, struct qla_tgt_cmd, se_cmd); | 1193 | container_of(se_cmd, struct qla_tgt_cmd, se_cmd); |
1194 | if (cmd->tag == abts->exchange_addr_to_abort) { | 1194 | if (se_cmd->tag == abts->exchange_addr_to_abort) { |
1195 | lun = cmd->unpacked_lun; | 1195 | lun = cmd->unpacked_lun; |
1196 | found_lun = true; | 1196 | found_lun = true; |
1197 | break; | 1197 | break; |
@@ -1728,9 +1728,8 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, | |||
1728 | 1728 | ||
1729 | if (unlikely(cmd->aborted)) { | 1729 | if (unlikely(cmd->aborted)) { |
1730 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014, | 1730 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014, |
1731 | "qla_target(%d): terminating exchange " | 1731 | "qla_target(%d): terminating exchange for aborted cmd=%p (se_cmd=%p, tag=%lld)", |
1732 | "for aborted cmd=%p (se_cmd=%p, tag=%d)", vha->vp_idx, cmd, | 1732 | vha->vp_idx, cmd, se_cmd, se_cmd->tag); |
1733 | se_cmd, cmd->tag); | ||
1734 | 1733 | ||
1735 | cmd->state = QLA_TGT_STATE_ABORTED; | 1734 | cmd->state = QLA_TGT_STATE_ABORTED; |
1736 | cmd->cmd_flags |= BIT_6; | 1735 | cmd->cmd_flags |= BIT_6; |
@@ -1765,18 +1764,17 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, | |||
1765 | if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { | 1764 | if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { |
1766 | prm->residual = se_cmd->residual_count; | 1765 | prm->residual = se_cmd->residual_count; |
1767 | ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x305c, | 1766 | ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x305c, |
1768 | "Residual underflow: %d (tag %d, " | 1767 | "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n", |
1769 | "op %x, bufflen %d, rq_result %x)\n", prm->residual, | 1768 | prm->residual, se_cmd->tag, |
1770 | cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, | 1769 | se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, |
1771 | cmd->bufflen, prm->rq_result); | 1770 | cmd->bufflen, prm->rq_result); |
1772 | prm->rq_result |= SS_RESIDUAL_UNDER; | 1771 | prm->rq_result |= SS_RESIDUAL_UNDER; |
1773 | } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { | 1772 | } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { |
1774 | prm->residual = se_cmd->residual_count; | 1773 | prm->residual = se_cmd->residual_count; |
1775 | ql_dbg(ql_dbg_io, vha, 0x305d, | 1774 | ql_dbg(ql_dbg_io, vha, 0x305d, |
1776 | "Residual overflow: %d (tag %d, " | 1775 | "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n", |
1777 | "op %x, bufflen %d, rq_result %x)\n", prm->residual, | 1776 | prm->residual, se_cmd->tag, se_cmd->t_task_cdb ? |
1778 | cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, | 1777 | se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result); |
1779 | cmd->bufflen, prm->rq_result); | ||
1780 | prm->rq_result |= SS_RESIDUAL_OVER; | 1778 | prm->rq_result |= SS_RESIDUAL_OVER; |
1781 | } | 1779 | } |
1782 | 1780 | ||
@@ -1849,7 +1847,7 @@ static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type) | |||
1849 | == 50) { | 1847 | == 50) { |
1850 | *xmit_type &= ~QLA_TGT_XMIT_STATUS; | 1848 | *xmit_type &= ~QLA_TGT_XMIT_STATUS; |
1851 | ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf015, | 1849 | ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf015, |
1852 | "Dropping cmd %p (tag %d) status", cmd, cmd->tag); | 1850 | "Dropping cmd %p (tag %d) status", cmd, se_cmd->tag); |
1853 | } | 1851 | } |
1854 | #endif | 1852 | #endif |
1855 | /* | 1853 | /* |
@@ -1873,7 +1871,7 @@ static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type) | |||
1873 | ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf016, | 1871 | ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf016, |
1874 | "Cutting cmd %p (tag %d) buffer" | 1872 | "Cutting cmd %p (tag %d) buffer" |
1875 | " tail to len %d, sg_cnt %d (cmd->bufflen %d," | 1873 | " tail to len %d, sg_cnt %d (cmd->bufflen %d," |
1876 | " cmd->sg_cnt %d)", cmd, cmd->tag, tot_len, leave, | 1874 | " cmd->sg_cnt %d)", cmd, se_cmd->tag, tot_len, leave, |
1877 | cmd->bufflen, cmd->sg_cnt); | 1875 | cmd->bufflen, cmd->sg_cnt); |
1878 | 1876 | ||
1879 | cmd->bufflen = tot_len; | 1877 | cmd->bufflen = tot_len; |
@@ -1885,13 +1883,13 @@ static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type) | |||
1885 | 1883 | ||
1886 | ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf017, | 1884 | ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf017, |
1887 | "Cutting cmd %p (tag %d) buffer head " | 1885 | "Cutting cmd %p (tag %d) buffer head " |
1888 | "to offset %d (cmd->bufflen %d)", cmd, cmd->tag, offset, | 1886 | "to offset %d (cmd->bufflen %d)", cmd, se_cmd->tag, offset, |
1889 | cmd->bufflen); | 1887 | cmd->bufflen); |
1890 | if (offset == 0) | 1888 | if (offset == 0) |
1891 | *xmit_type &= ~QLA_TGT_XMIT_DATA; | 1889 | *xmit_type &= ~QLA_TGT_XMIT_DATA; |
1892 | else if (qlt_set_data_offset(cmd, offset)) { | 1890 | else if (qlt_set_data_offset(cmd, offset)) { |
1893 | ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf018, | 1891 | ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf018, |
1894 | "qlt_set_data_offset() failed (tag %d)", cmd->tag); | 1892 | "qlt_set_data_offset() failed (tag %d)", se_cmd->tag); |
1895 | } | 1893 | } |
1896 | } | 1894 | } |
1897 | } | 1895 | } |
@@ -3194,7 +3192,7 @@ skip_term: | |||
3194 | return; | 3192 | return; |
3195 | } else if (cmd->state == QLA_TGT_STATE_ABORTED) { | 3193 | } else if (cmd->state == QLA_TGT_STATE_ABORTED) { |
3196 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e, | 3194 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e, |
3197 | "Aborted command %p (tag %d) finished\n", cmd, cmd->tag); | 3195 | "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag); |
3198 | } else { | 3196 | } else { |
3199 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c, | 3197 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c, |
3200 | "qla_target(%d): A command in state (%d) should " | 3198 | "qla_target(%d): A command in state (%d) should " |
@@ -3266,7 +3264,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd) | |||
3266 | goto out_term; | 3264 | goto out_term; |
3267 | 3265 | ||
3268 | cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; | 3266 | cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; |
3269 | cmd->tag = atio->u.isp24.exchange_addr; | 3267 | cmd->se_cmd.tag = atio->u.isp24.exchange_addr; |
3270 | cmd->unpacked_lun = scsilun_to_int( | 3268 | cmd->unpacked_lun = scsilun_to_int( |
3271 | (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun); | 3269 | (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun); |
3272 | 3270 | ||
@@ -3893,9 +3891,8 @@ static void qlt_handle_srr(struct scsi_qla_host *vha, | |||
3893 | resp = 1; | 3891 | resp = 1; |
3894 | } else { | 3892 | } else { |
3895 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf064, | 3893 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf064, |
3896 | "qla_target(%d): SRR for in data for cmd " | 3894 | "qla_target(%d): SRR for in data for cmd without them (tag %lld, SCSI status %d), reject", |
3897 | "without them (tag %d, SCSI status %d), " | 3895 | vha->vp_idx, se_cmd->tag, |
3898 | "reject", vha->vp_idx, cmd->tag, | ||
3899 | cmd->se_cmd.scsi_status); | 3896 | cmd->se_cmd.scsi_status); |
3900 | goto out_reject; | 3897 | goto out_reject; |
3901 | } | 3898 | } |
@@ -3929,10 +3926,8 @@ static void qlt_handle_srr(struct scsi_qla_host *vha, | |||
3929 | } | 3926 | } |
3930 | } else { | 3927 | } else { |
3931 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066, | 3928 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066, |
3932 | "qla_target(%d): SRR for out data for cmd " | 3929 | "qla_target(%d): SRR for out data for cmd without them (tag %lld, SCSI status %d), reject", |
3933 | "without them (tag %d, SCSI status %d), " | 3930 | vha->vp_idx, se_cmd->tag, cmd->se_cmd.scsi_status); |
3934 | "reject", vha->vp_idx, cmd->tag, | ||
3935 | cmd->se_cmd.scsi_status); | ||
3936 | goto out_reject; | 3931 | goto out_reject; |
3937 | } | 3932 | } |
3938 | break; | 3933 | break; |
@@ -4053,10 +4048,9 @@ restart: | |||
4053 | cmd->sg = se_cmd->t_data_sg; | 4048 | cmd->sg = se_cmd->t_data_sg; |
4054 | 4049 | ||
4055 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c, | 4050 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c, |
4056 | "SRR cmd %p (se_cmd %p, tag %d, op %x), " | 4051 | "SRR cmd %p (se_cmd %p, tag %lld, op %x), sg_cnt=%d, offset=%d", |
4057 | "sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag, | 4052 | cmd, &cmd->se_cmd, se_cmd->tag, se_cmd->t_task_cdb ? |
4058 | se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, | 4053 | se_cmd->t_task_cdb[0] : 0, cmd->sg_cnt, cmd->offset); |
4059 | cmd->sg_cnt, cmd->offset); | ||
4060 | 4054 | ||
4061 | qlt_handle_srr(vha, sctio, imm); | 4055 | qlt_handle_srr(vha, sctio, imm); |
4062 | 4056 | ||
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index 332086776dfe..985d76dd706b 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h | |||
@@ -924,7 +924,6 @@ struct qla_tgt_cmd { | |||
924 | int sg_cnt; /* SG segments count */ | 924 | int sg_cnt; /* SG segments count */ |
925 | int bufflen; /* cmd buffer length */ | 925 | int bufflen; /* cmd buffer length */ |
926 | int offset; | 926 | int offset; |
927 | uint32_t tag; | ||
928 | uint32_t unpacked_lun; | 927 | uint32_t unpacked_lun; |
929 | enum dma_data_direction dma_data_direction; | 928 | enum dma_data_direction dma_data_direction; |
930 | uint32_t reset_count; | 929 | uint32_t reset_count; |
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index e32d24ec7a11..d9a8c6084346 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c | |||
@@ -44,7 +44,6 @@ | |||
44 | #include <target/target_core_base.h> | 44 | #include <target/target_core_base.h> |
45 | #include <target/target_core_fabric.h> | 45 | #include <target/target_core_fabric.h> |
46 | #include <target/target_core_fabric_configfs.h> | 46 | #include <target/target_core_fabric_configfs.h> |
47 | #include <target/target_core_configfs.h> | ||
48 | #include <target/configfs_macros.h> | 47 | #include <target/configfs_macros.h> |
49 | 48 | ||
50 | #include "qla_def.h" | 49 | #include "qla_def.h" |
@@ -54,9 +53,6 @@ | |||
54 | static struct workqueue_struct *tcm_qla2xxx_free_wq; | 53 | static struct workqueue_struct *tcm_qla2xxx_free_wq; |
55 | static struct workqueue_struct *tcm_qla2xxx_cmd_wq; | 54 | static struct workqueue_struct *tcm_qla2xxx_cmd_wq; |
56 | 55 | ||
57 | static const struct target_core_fabric_ops tcm_qla2xxx_ops; | ||
58 | static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops; | ||
59 | |||
60 | /* | 56 | /* |
61 | * Parse WWN. | 57 | * Parse WWN. |
62 | * If strict, we require lower-case hex and colon separators to be sure | 58 | * If strict, we require lower-case hex and colon separators to be sure |
@@ -191,23 +187,6 @@ static char *tcm_qla2xxx_npiv_get_fabric_name(void) | |||
191 | return "qla2xxx_npiv"; | 187 | return "qla2xxx_npiv"; |
192 | } | 188 | } |
193 | 189 | ||
194 | static u8 tcm_qla2xxx_get_fabric_proto_ident(struct se_portal_group *se_tpg) | ||
195 | { | ||
196 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, | ||
197 | struct tcm_qla2xxx_tpg, se_tpg); | ||
198 | struct tcm_qla2xxx_lport *lport = tpg->lport; | ||
199 | u8 proto_id; | ||
200 | |||
201 | switch (lport->lport_proto_id) { | ||
202 | case SCSI_PROTOCOL_FCP: | ||
203 | default: | ||
204 | proto_id = fc_get_fabric_proto_ident(se_tpg); | ||
205 | break; | ||
206 | } | ||
207 | |||
208 | return proto_id; | ||
209 | } | ||
210 | |||
211 | static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg) | 190 | static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg) |
212 | { | 191 | { |
213 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, | 192 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, |
@@ -224,78 +203,6 @@ static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg) | |||
224 | return tpg->lport_tpgt; | 203 | return tpg->lport_tpgt; |
225 | } | 204 | } |
226 | 205 | ||
227 | static u32 tcm_qla2xxx_get_default_depth(struct se_portal_group *se_tpg) | ||
228 | { | ||
229 | return 1; | ||
230 | } | ||
231 | |||
232 | static u32 tcm_qla2xxx_get_pr_transport_id( | ||
233 | struct se_portal_group *se_tpg, | ||
234 | struct se_node_acl *se_nacl, | ||
235 | struct t10_pr_registration *pr_reg, | ||
236 | int *format_code, | ||
237 | unsigned char *buf) | ||
238 | { | ||
239 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, | ||
240 | struct tcm_qla2xxx_tpg, se_tpg); | ||
241 | struct tcm_qla2xxx_lport *lport = tpg->lport; | ||
242 | int ret = 0; | ||
243 | |||
244 | switch (lport->lport_proto_id) { | ||
245 | case SCSI_PROTOCOL_FCP: | ||
246 | default: | ||
247 | ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg, | ||
248 | format_code, buf); | ||
249 | break; | ||
250 | } | ||
251 | |||
252 | return ret; | ||
253 | } | ||
254 | |||
255 | static u32 tcm_qla2xxx_get_pr_transport_id_len( | ||
256 | struct se_portal_group *se_tpg, | ||
257 | struct se_node_acl *se_nacl, | ||
258 | struct t10_pr_registration *pr_reg, | ||
259 | int *format_code) | ||
260 | { | ||
261 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, | ||
262 | struct tcm_qla2xxx_tpg, se_tpg); | ||
263 | struct tcm_qla2xxx_lport *lport = tpg->lport; | ||
264 | int ret = 0; | ||
265 | |||
266 | switch (lport->lport_proto_id) { | ||
267 | case SCSI_PROTOCOL_FCP: | ||
268 | default: | ||
269 | ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, | ||
270 | format_code); | ||
271 | break; | ||
272 | } | ||
273 | |||
274 | return ret; | ||
275 | } | ||
276 | |||
277 | static char *tcm_qla2xxx_parse_pr_out_transport_id( | ||
278 | struct se_portal_group *se_tpg, | ||
279 | const char *buf, | ||
280 | u32 *out_tid_len, | ||
281 | char **port_nexus_ptr) | ||
282 | { | ||
283 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, | ||
284 | struct tcm_qla2xxx_tpg, se_tpg); | ||
285 | struct tcm_qla2xxx_lport *lport = tpg->lport; | ||
286 | char *tid = NULL; | ||
287 | |||
288 | switch (lport->lport_proto_id) { | ||
289 | case SCSI_PROTOCOL_FCP: | ||
290 | default: | ||
291 | tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, | ||
292 | port_nexus_ptr); | ||
293 | break; | ||
294 | } | ||
295 | |||
296 | return tid; | ||
297 | } | ||
298 | |||
299 | static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg) | 206 | static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg) |
300 | { | 207 | { |
301 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, | 208 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, |
@@ -344,29 +251,6 @@ static int tcm_qla2xxx_check_prot_fabric_only(struct se_portal_group *se_tpg) | |||
344 | return tpg->tpg_attrib.fabric_prot_type; | 251 | return tpg->tpg_attrib.fabric_prot_type; |
345 | } | 252 | } |
346 | 253 | ||
347 | static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl( | ||
348 | struct se_portal_group *se_tpg) | ||
349 | { | ||
350 | struct tcm_qla2xxx_nacl *nacl; | ||
351 | |||
352 | nacl = kzalloc(sizeof(struct tcm_qla2xxx_nacl), GFP_KERNEL); | ||
353 | if (!nacl) { | ||
354 | pr_err("Unable to allocate struct tcm_qla2xxx_nacl\n"); | ||
355 | return NULL; | ||
356 | } | ||
357 | |||
358 | return &nacl->se_node_acl; | ||
359 | } | ||
360 | |||
361 | static void tcm_qla2xxx_release_fabric_acl( | ||
362 | struct se_portal_group *se_tpg, | ||
363 | struct se_node_acl *se_nacl) | ||
364 | { | ||
365 | struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl, | ||
366 | struct tcm_qla2xxx_nacl, se_node_acl); | ||
367 | kfree(nacl); | ||
368 | } | ||
369 | |||
370 | static u32 tcm_qla2xxx_tpg_get_inst_index(struct se_portal_group *se_tpg) | 254 | static u32 tcm_qla2xxx_tpg_get_inst_index(struct se_portal_group *se_tpg) |
371 | { | 255 | { |
372 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, | 256 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, |
@@ -430,7 +314,7 @@ static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd) | |||
430 | cmd->cmd_flags |= BIT_14; | 314 | cmd->cmd_flags |= BIT_14; |
431 | } | 315 | } |
432 | 316 | ||
433 | return target_put_sess_cmd(se_cmd->se_sess, se_cmd); | 317 | return target_put_sess_cmd(se_cmd); |
434 | } | 318 | } |
435 | 319 | ||
436 | /* tcm_qla2xxx_release_cmd - Callback from TCM Core to release underlying | 320 | /* tcm_qla2xxx_release_cmd - Callback from TCM Core to release underlying |
@@ -534,19 +418,6 @@ static void tcm_qla2xxx_set_default_node_attrs(struct se_node_acl *nacl) | |||
534 | return; | 418 | return; |
535 | } | 419 | } |
536 | 420 | ||
537 | static u32 tcm_qla2xxx_get_task_tag(struct se_cmd *se_cmd) | ||
538 | { | ||
539 | struct qla_tgt_cmd *cmd; | ||
540 | |||
541 | /* check for task mgmt cmd */ | ||
542 | if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) | ||
543 | return 0xffffffff; | ||
544 | |||
545 | cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); | ||
546 | |||
547 | return cmd->tag; | ||
548 | } | ||
549 | |||
550 | static int tcm_qla2xxx_get_cmd_state(struct se_cmd *se_cmd) | 421 | static int tcm_qla2xxx_get_cmd_state(struct se_cmd *se_cmd) |
551 | { | 422 | { |
552 | return 0; | 423 | return 0; |
@@ -827,17 +698,6 @@ static void tcm_qla2xxx_release_session(struct kref *kref) | |||
827 | qlt_unreg_sess(se_sess->fabric_sess_ptr); | 698 | qlt_unreg_sess(se_sess->fabric_sess_ptr); |
828 | } | 699 | } |
829 | 700 | ||
830 | static void tcm_qla2xxx_put_session(struct se_session *se_sess) | ||
831 | { | ||
832 | struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr; | ||
833 | struct qla_hw_data *ha = sess->vha->hw; | ||
834 | unsigned long flags; | ||
835 | |||
836 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
837 | kref_put(&se_sess->sess_kref, tcm_qla2xxx_release_session); | ||
838 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
839 | } | ||
840 | |||
841 | static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess) | 701 | static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess) |
842 | { | 702 | { |
843 | if (!sess) | 703 | if (!sess) |
@@ -853,53 +713,20 @@ static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess) | |||
853 | target_sess_cmd_list_set_waiting(sess->se_sess); | 713 | target_sess_cmd_list_set_waiting(sess->se_sess); |
854 | } | 714 | } |
855 | 715 | ||
856 | static struct se_node_acl *tcm_qla2xxx_make_nodeacl( | 716 | static int tcm_qla2xxx_init_nodeacl(struct se_node_acl *se_nacl, |
857 | struct se_portal_group *se_tpg, | 717 | const char *name) |
858 | struct config_group *group, | ||
859 | const char *name) | ||
860 | { | 718 | { |
861 | struct se_node_acl *se_nacl, *se_nacl_new; | 719 | struct tcm_qla2xxx_nacl *nacl = |
862 | struct tcm_qla2xxx_nacl *nacl; | 720 | container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); |
863 | u64 wwnn; | 721 | u64 wwnn; |
864 | u32 qla2xxx_nexus_depth; | ||
865 | 722 | ||
866 | if (tcm_qla2xxx_parse_wwn(name, &wwnn, 1) < 0) | 723 | if (tcm_qla2xxx_parse_wwn(name, &wwnn, 1) < 0) |
867 | return ERR_PTR(-EINVAL); | 724 | return -EINVAL; |
868 | |||
869 | se_nacl_new = tcm_qla2xxx_alloc_fabric_acl(se_tpg); | ||
870 | if (!se_nacl_new) | ||
871 | return ERR_PTR(-ENOMEM); | ||
872 | /* #warning FIXME: Hardcoded qla2xxx_nexus depth in tcm_qla2xxx_make_nodeacl */ | ||
873 | qla2xxx_nexus_depth = 1; | ||
874 | 725 | ||
875 | /* | ||
876 | * se_nacl_new may be released by core_tpg_add_initiator_node_acl() | ||
877 | * when converting a NodeACL from demo mode -> explict | ||
878 | */ | ||
879 | se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new, | ||
880 | name, qla2xxx_nexus_depth); | ||
881 | if (IS_ERR(se_nacl)) { | ||
882 | tcm_qla2xxx_release_fabric_acl(se_tpg, se_nacl_new); | ||
883 | return se_nacl; | ||
884 | } | ||
885 | /* | ||
886 | * Locate our struct tcm_qla2xxx_nacl and set the FC Nport WWPN | ||
887 | */ | ||
888 | nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); | ||
889 | nacl->nport_wwnn = wwnn; | 726 | nacl->nport_wwnn = wwnn; |
890 | tcm_qla2xxx_format_wwn(&nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwnn); | 727 | tcm_qla2xxx_format_wwn(&nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwnn); |
891 | 728 | ||
892 | return se_nacl; | 729 | return 0; |
893 | } | ||
894 | |||
895 | static void tcm_qla2xxx_drop_nodeacl(struct se_node_acl *se_acl) | ||
896 | { | ||
897 | struct se_portal_group *se_tpg = se_acl->se_tpg; | ||
898 | struct tcm_qla2xxx_nacl *nacl = container_of(se_acl, | ||
899 | struct tcm_qla2xxx_nacl, se_node_acl); | ||
900 | |||
901 | core_tpg_del_initiator_node_acl(se_tpg, se_acl, 1); | ||
902 | kfree(nacl); | ||
903 | } | 730 | } |
904 | 731 | ||
905 | /* Start items for tcm_qla2xxx_tpg_attrib_cit */ | 732 | /* Start items for tcm_qla2xxx_tpg_attrib_cit */ |
@@ -1175,8 +1002,7 @@ static struct se_portal_group *tcm_qla2xxx_make_tpg( | |||
1175 | tpg->tpg_attrib.cache_dynamic_acls = 1; | 1002 | tpg->tpg_attrib.cache_dynamic_acls = 1; |
1176 | tpg->tpg_attrib.demo_mode_login_only = 1; | 1003 | tpg->tpg_attrib.demo_mode_login_only = 1; |
1177 | 1004 | ||
1178 | ret = core_tpg_register(&tcm_qla2xxx_ops, wwn, | 1005 | ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP); |
1179 | &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); | ||
1180 | if (ret < 0) { | 1006 | if (ret < 0) { |
1181 | kfree(tpg); | 1007 | kfree(tpg); |
1182 | return NULL; | 1008 | return NULL; |
@@ -1295,8 +1121,7 @@ static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg( | |||
1295 | tpg->tpg_attrib.cache_dynamic_acls = 1; | 1121 | tpg->tpg_attrib.cache_dynamic_acls = 1; |
1296 | tpg->tpg_attrib.demo_mode_login_only = 1; | 1122 | tpg->tpg_attrib.demo_mode_login_only = 1; |
1297 | 1123 | ||
1298 | ret = core_tpg_register(&tcm_qla2xxx_npiv_ops, wwn, | 1124 | ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP); |
1299 | &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); | ||
1300 | if (ret < 0) { | 1125 | if (ret < 0) { |
1301 | kfree(tpg); | 1126 | kfree(tpg); |
1302 | return NULL; | 1127 | return NULL; |
@@ -1988,14 +1813,10 @@ static struct configfs_attribute *tcm_qla2xxx_wwn_attrs[] = { | |||
1988 | static const struct target_core_fabric_ops tcm_qla2xxx_ops = { | 1813 | static const struct target_core_fabric_ops tcm_qla2xxx_ops = { |
1989 | .module = THIS_MODULE, | 1814 | .module = THIS_MODULE, |
1990 | .name = "qla2xxx", | 1815 | .name = "qla2xxx", |
1816 | .node_acl_size = sizeof(struct tcm_qla2xxx_nacl), | ||
1991 | .get_fabric_name = tcm_qla2xxx_get_fabric_name, | 1817 | .get_fabric_name = tcm_qla2xxx_get_fabric_name, |
1992 | .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident, | ||
1993 | .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn, | 1818 | .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn, |
1994 | .tpg_get_tag = tcm_qla2xxx_get_tag, | 1819 | .tpg_get_tag = tcm_qla2xxx_get_tag, |
1995 | .tpg_get_default_depth = tcm_qla2xxx_get_default_depth, | ||
1996 | .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id, | ||
1997 | .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len, | ||
1998 | .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id, | ||
1999 | .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode, | 1820 | .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode, |
2000 | .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache, | 1821 | .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache, |
2001 | .tpg_check_demo_mode_write_protect = | 1822 | .tpg_check_demo_mode_write_protect = |
@@ -2004,12 +1825,9 @@ static const struct target_core_fabric_ops tcm_qla2xxx_ops = { | |||
2004 | tcm_qla2xxx_check_prod_write_protect, | 1825 | tcm_qla2xxx_check_prod_write_protect, |
2005 | .tpg_check_prot_fabric_only = tcm_qla2xxx_check_prot_fabric_only, | 1826 | .tpg_check_prot_fabric_only = tcm_qla2xxx_check_prot_fabric_only, |
2006 | .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only, | 1827 | .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only, |
2007 | .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl, | ||
2008 | .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, | ||
2009 | .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, | 1828 | .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, |
2010 | .check_stop_free = tcm_qla2xxx_check_stop_free, | 1829 | .check_stop_free = tcm_qla2xxx_check_stop_free, |
2011 | .release_cmd = tcm_qla2xxx_release_cmd, | 1830 | .release_cmd = tcm_qla2xxx_release_cmd, |
2012 | .put_session = tcm_qla2xxx_put_session, | ||
2013 | .shutdown_session = tcm_qla2xxx_shutdown_session, | 1831 | .shutdown_session = tcm_qla2xxx_shutdown_session, |
2014 | .close_session = tcm_qla2xxx_close_session, | 1832 | .close_session = tcm_qla2xxx_close_session, |
2015 | .sess_get_index = tcm_qla2xxx_sess_get_index, | 1833 | .sess_get_index = tcm_qla2xxx_sess_get_index, |
@@ -2017,7 +1835,6 @@ static const struct target_core_fabric_ops tcm_qla2xxx_ops = { | |||
2017 | .write_pending = tcm_qla2xxx_write_pending, | 1835 | .write_pending = tcm_qla2xxx_write_pending, |
2018 | .write_pending_status = tcm_qla2xxx_write_pending_status, | 1836 | .write_pending_status = tcm_qla2xxx_write_pending_status, |
2019 | .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs, | 1837 | .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs, |
2020 | .get_task_tag = tcm_qla2xxx_get_task_tag, | ||
2021 | .get_cmd_state = tcm_qla2xxx_get_cmd_state, | 1838 | .get_cmd_state = tcm_qla2xxx_get_cmd_state, |
2022 | .queue_data_in = tcm_qla2xxx_queue_data_in, | 1839 | .queue_data_in = tcm_qla2xxx_queue_data_in, |
2023 | .queue_status = tcm_qla2xxx_queue_status, | 1840 | .queue_status = tcm_qla2xxx_queue_status, |
@@ -2031,12 +1848,7 @@ static const struct target_core_fabric_ops tcm_qla2xxx_ops = { | |||
2031 | .fabric_drop_wwn = tcm_qla2xxx_drop_lport, | 1848 | .fabric_drop_wwn = tcm_qla2xxx_drop_lport, |
2032 | .fabric_make_tpg = tcm_qla2xxx_make_tpg, | 1849 | .fabric_make_tpg = tcm_qla2xxx_make_tpg, |
2033 | .fabric_drop_tpg = tcm_qla2xxx_drop_tpg, | 1850 | .fabric_drop_tpg = tcm_qla2xxx_drop_tpg, |
2034 | .fabric_post_link = NULL, | 1851 | .fabric_init_nodeacl = tcm_qla2xxx_init_nodeacl, |
2035 | .fabric_pre_unlink = NULL, | ||
2036 | .fabric_make_np = NULL, | ||
2037 | .fabric_drop_np = NULL, | ||
2038 | .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl, | ||
2039 | .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl, | ||
2040 | 1852 | ||
2041 | .tfc_wwn_attrs = tcm_qla2xxx_wwn_attrs, | 1853 | .tfc_wwn_attrs = tcm_qla2xxx_wwn_attrs, |
2042 | .tfc_tpg_base_attrs = tcm_qla2xxx_tpg_attrs, | 1854 | .tfc_tpg_base_attrs = tcm_qla2xxx_tpg_attrs, |
@@ -2046,26 +1858,19 @@ static const struct target_core_fabric_ops tcm_qla2xxx_ops = { | |||
2046 | static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = { | 1858 | static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = { |
2047 | .module = THIS_MODULE, | 1859 | .module = THIS_MODULE, |
2048 | .name = "qla2xxx_npiv", | 1860 | .name = "qla2xxx_npiv", |
1861 | .node_acl_size = sizeof(struct tcm_qla2xxx_nacl), | ||
2049 | .get_fabric_name = tcm_qla2xxx_npiv_get_fabric_name, | 1862 | .get_fabric_name = tcm_qla2xxx_npiv_get_fabric_name, |
2050 | .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident, | ||
2051 | .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn, | 1863 | .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn, |
2052 | .tpg_get_tag = tcm_qla2xxx_get_tag, | 1864 | .tpg_get_tag = tcm_qla2xxx_get_tag, |
2053 | .tpg_get_default_depth = tcm_qla2xxx_get_default_depth, | ||
2054 | .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id, | ||
2055 | .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len, | ||
2056 | .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id, | ||
2057 | .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode, | 1865 | .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode, |
2058 | .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache, | 1866 | .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache, |
2059 | .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_demo_mode, | 1867 | .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_demo_mode, |
2060 | .tpg_check_prod_mode_write_protect = | 1868 | .tpg_check_prod_mode_write_protect = |
2061 | tcm_qla2xxx_check_prod_write_protect, | 1869 | tcm_qla2xxx_check_prod_write_protect, |
2062 | .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only, | 1870 | .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only, |
2063 | .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl, | ||
2064 | .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, | ||
2065 | .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, | 1871 | .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, |
2066 | .check_stop_free = tcm_qla2xxx_check_stop_free, | 1872 | .check_stop_free = tcm_qla2xxx_check_stop_free, |
2067 | .release_cmd = tcm_qla2xxx_release_cmd, | 1873 | .release_cmd = tcm_qla2xxx_release_cmd, |
2068 | .put_session = tcm_qla2xxx_put_session, | ||
2069 | .shutdown_session = tcm_qla2xxx_shutdown_session, | 1874 | .shutdown_session = tcm_qla2xxx_shutdown_session, |
2070 | .close_session = tcm_qla2xxx_close_session, | 1875 | .close_session = tcm_qla2xxx_close_session, |
2071 | .sess_get_index = tcm_qla2xxx_sess_get_index, | 1876 | .sess_get_index = tcm_qla2xxx_sess_get_index, |
@@ -2073,7 +1878,6 @@ static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = { | |||
2073 | .write_pending = tcm_qla2xxx_write_pending, | 1878 | .write_pending = tcm_qla2xxx_write_pending, |
2074 | .write_pending_status = tcm_qla2xxx_write_pending_status, | 1879 | .write_pending_status = tcm_qla2xxx_write_pending_status, |
2075 | .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs, | 1880 | .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs, |
2076 | .get_task_tag = tcm_qla2xxx_get_task_tag, | ||
2077 | .get_cmd_state = tcm_qla2xxx_get_cmd_state, | 1881 | .get_cmd_state = tcm_qla2xxx_get_cmd_state, |
2078 | .queue_data_in = tcm_qla2xxx_queue_data_in, | 1882 | .queue_data_in = tcm_qla2xxx_queue_data_in, |
2079 | .queue_status = tcm_qla2xxx_queue_status, | 1883 | .queue_status = tcm_qla2xxx_queue_status, |
@@ -2087,12 +1891,7 @@ static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = { | |||
2087 | .fabric_drop_wwn = tcm_qla2xxx_npiv_drop_lport, | 1891 | .fabric_drop_wwn = tcm_qla2xxx_npiv_drop_lport, |
2088 | .fabric_make_tpg = tcm_qla2xxx_npiv_make_tpg, | 1892 | .fabric_make_tpg = tcm_qla2xxx_npiv_make_tpg, |
2089 | .fabric_drop_tpg = tcm_qla2xxx_drop_tpg, | 1893 | .fabric_drop_tpg = tcm_qla2xxx_drop_tpg, |
2090 | .fabric_post_link = NULL, | 1894 | .fabric_init_nodeacl = tcm_qla2xxx_init_nodeacl, |
2091 | .fabric_pre_unlink = NULL, | ||
2092 | .fabric_make_np = NULL, | ||
2093 | .fabric_drop_np = NULL, | ||
2094 | .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl, | ||
2095 | .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl, | ||
2096 | 1895 | ||
2097 | .tfc_wwn_attrs = tcm_qla2xxx_wwn_attrs, | 1896 | .tfc_wwn_attrs = tcm_qla2xxx_wwn_attrs, |
2098 | .tfc_tpg_base_attrs = tcm_qla2xxx_npiv_tpg_attrs, | 1897 | .tfc_tpg_base_attrs = tcm_qla2xxx_npiv_tpg_attrs, |
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h index 23295115c9fc..3bbf4cb6fd97 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h | |||
@@ -13,6 +13,8 @@ | |||
13 | #include "qla_target.h" | 13 | #include "qla_target.h" |
14 | 14 | ||
15 | struct tcm_qla2xxx_nacl { | 15 | struct tcm_qla2xxx_nacl { |
16 | struct se_node_acl se_node_acl; | ||
17 | |||
16 | /* From libfc struct fc_rport->port_id */ | 18 | /* From libfc struct fc_rport->port_id */ |
17 | u32 nport_id; | 19 | u32 nport_id; |
18 | /* Binary World Wide unique Node Name for remote FC Initiator Nport */ | 20 | /* Binary World Wide unique Node Name for remote FC Initiator Nport */ |
@@ -23,8 +25,6 @@ struct tcm_qla2xxx_nacl { | |||
23 | struct qla_tgt_sess *qla_tgt_sess; | 25 | struct qla_tgt_sess *qla_tgt_sess; |
24 | /* Pointer to TCM FC nexus */ | 26 | /* Pointer to TCM FC nexus */ |
25 | struct se_session *nport_nexus; | 27 | struct se_session *nport_nexus; |
26 | /* Returned by tcm_qla2xxx_make_nodeacl() */ | ||
27 | struct se_node_acl se_node_acl; | ||
28 | }; | 28 | }; |
29 | 29 | ||
30 | struct tcm_qla2xxx_tpg_attrib { | 30 | struct tcm_qla2xxx_tpg_attrib { |
@@ -57,8 +57,6 @@ struct tcm_qla2xxx_fc_loopid { | |||
57 | }; | 57 | }; |
58 | 58 | ||
59 | struct tcm_qla2xxx_lport { | 59 | struct tcm_qla2xxx_lport { |
60 | /* SCSI protocol the lport is providing */ | ||
61 | u8 lport_proto_id; | ||
62 | /* Binary World Wide unique Port Name for FC Target Lport */ | 60 | /* Binary World Wide unique Port Name for FC Target Lport */ |
63 | u64 lport_wwpn; | 61 | u64 lport_wwpn; |
64 | /* Binary World Wide unique Port Name for FC NPIV Target Lport */ | 62 | /* Binary World Wide unique Port Name for FC NPIV Target Lport */ |
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index a3fba366cebe..4e68b62193ed 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c | |||
@@ -29,7 +29,6 @@ | |||
29 | #include <scsi/scsi_tcq.h> | 29 | #include <scsi/scsi_tcq.h> |
30 | #include <target/target_core_base.h> | 30 | #include <target/target_core_base.h> |
31 | #include <target/target_core_fabric.h> | 31 | #include <target/target_core_fabric.h> |
32 | #include <target/target_core_configfs.h> | ||
33 | 32 | ||
34 | #include <target/iscsi/iscsi_target_core.h> | 33 | #include <target/iscsi/iscsi_target_core.h> |
35 | #include "iscsi_target_parameters.h" | 34 | #include "iscsi_target_parameters.h" |
@@ -716,7 +715,7 @@ static int iscsit_add_reject_from_cmd( | |||
716 | */ | 715 | */ |
717 | if (cmd->se_cmd.se_tfo != NULL) { | 716 | if (cmd->se_cmd.se_tfo != NULL) { |
718 | pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n"); | 717 | pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n"); |
719 | target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd); | 718 | target_put_sess_cmd(&cmd->se_cmd); |
720 | } | 719 | } |
721 | return -1; | 720 | return -1; |
722 | } | 721 | } |
@@ -1002,13 +1001,15 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
1002 | hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length, | 1001 | hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length, |
1003 | conn->cid); | 1002 | conn->cid); |
1004 | 1003 | ||
1005 | target_get_sess_cmd(conn->sess->se_sess, &cmd->se_cmd, true); | 1004 | target_get_sess_cmd(&cmd->se_cmd, true); |
1006 | 1005 | ||
1007 | cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd, | 1006 | cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd, |
1008 | scsilun_to_int(&hdr->lun)); | 1007 | scsilun_to_int(&hdr->lun)); |
1009 | if (cmd->sense_reason) | 1008 | if (cmd->sense_reason) |
1010 | goto attach_cmd; | 1009 | goto attach_cmd; |
1011 | 1010 | ||
1011 | /* only used for printks or comparing with ->ref_task_tag */ | ||
1012 | cmd->se_cmd.tag = (__force u32)cmd->init_task_tag; | ||
1012 | cmd->sense_reason = target_setup_cmd_from_cdb(&cmd->se_cmd, hdr->cdb); | 1013 | cmd->sense_reason = target_setup_cmd_from_cdb(&cmd->se_cmd, hdr->cdb); |
1013 | if (cmd->sense_reason) { | 1014 | if (cmd->sense_reason) { |
1014 | if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) { | 1015 | if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) { |
@@ -1068,7 +1069,7 @@ int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
1068 | if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) | 1069 | if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) |
1069 | return -1; | 1070 | return -1; |
1070 | else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) { | 1071 | else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) { |
1071 | target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd); | 1072 | target_put_sess_cmd(&cmd->se_cmd); |
1072 | return 0; | 1073 | return 0; |
1073 | } | 1074 | } |
1074 | } | 1075 | } |
@@ -1084,7 +1085,7 @@ int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
1084 | if (!cmd->sense_reason) | 1085 | if (!cmd->sense_reason) |
1085 | return 0; | 1086 | return 0; |
1086 | 1087 | ||
1087 | target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd); | 1088 | target_put_sess_cmd(&cmd->se_cmd); |
1088 | return 0; | 1089 | return 0; |
1089 | } | 1090 | } |
1090 | 1091 | ||
@@ -1115,7 +1116,6 @@ static int | |||
1115 | iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr, | 1116 | iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr, |
1116 | bool dump_payload) | 1117 | bool dump_payload) |
1117 | { | 1118 | { |
1118 | struct iscsi_conn *conn = cmd->conn; | ||
1119 | int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION; | 1119 | int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION; |
1120 | /* | 1120 | /* |
1121 | * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes. | 1121 | * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes. |
@@ -1142,7 +1142,7 @@ after_immediate_data: | |||
1142 | 1142 | ||
1143 | rc = iscsit_dump_data_payload(cmd->conn, | 1143 | rc = iscsit_dump_data_payload(cmd->conn, |
1144 | cmd->first_burst_len, 1); | 1144 | cmd->first_burst_len, 1); |
1145 | target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd); | 1145 | target_put_sess_cmd(&cmd->se_cmd); |
1146 | return rc; | 1146 | return rc; |
1147 | } else if (cmd->unsolicited_data) | 1147 | } else if (cmd->unsolicited_data) |
1148 | iscsit_set_unsoliticed_dataout(cmd); | 1148 | iscsit_set_unsoliticed_dataout(cmd); |
@@ -1811,7 +1811,7 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
1811 | conn->sess->se_sess, 0, DMA_NONE, | 1811 | conn->sess->se_sess, 0, DMA_NONE, |
1812 | TCM_SIMPLE_TAG, cmd->sense_buffer + 2); | 1812 | TCM_SIMPLE_TAG, cmd->sense_buffer + 2); |
1813 | 1813 | ||
1814 | target_get_sess_cmd(conn->sess->se_sess, &cmd->se_cmd, true); | 1814 | target_get_sess_cmd(&cmd->se_cmd, true); |
1815 | sess_ref = true; | 1815 | sess_ref = true; |
1816 | 1816 | ||
1817 | switch (function) { | 1817 | switch (function) { |
@@ -1953,7 +1953,7 @@ attach: | |||
1953 | */ | 1953 | */ |
1954 | if (sess_ref) { | 1954 | if (sess_ref) { |
1955 | pr_debug("Handle TMR, using sess_ref=true check\n"); | 1955 | pr_debug("Handle TMR, using sess_ref=true check\n"); |
1956 | target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd); | 1956 | target_put_sess_cmd(&cmd->se_cmd); |
1957 | } | 1957 | } |
1958 | 1958 | ||
1959 | iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); | 1959 | iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); |
@@ -2737,11 +2737,7 @@ static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn) | |||
2737 | cmd->iov_data_count = iov_count; | 2737 | cmd->iov_data_count = iov_count; |
2738 | cmd->tx_size = tx_size; | 2738 | cmd->tx_size = tx_size; |
2739 | 2739 | ||
2740 | /* sendpage is preferred but can't insert markers */ | 2740 | ret = iscsit_fe_sendpage_sg(cmd, conn); |
2741 | if (!conn->conn_ops->IFMarker) | ||
2742 | ret = iscsit_fe_sendpage_sg(cmd, conn); | ||
2743 | else | ||
2744 | ret = iscsit_send_tx_data(cmd, conn, 0); | ||
2745 | 2741 | ||
2746 | iscsit_unmap_iovec(cmd); | 2742 | iscsit_unmap_iovec(cmd); |
2747 | 2743 | ||
@@ -4073,17 +4069,9 @@ static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf) | |||
4073 | " opcode while ERL=0, closing iSCSI connection.\n"); | 4069 | " opcode while ERL=0, closing iSCSI connection.\n"); |
4074 | return -1; | 4070 | return -1; |
4075 | } | 4071 | } |
4076 | if (!conn->conn_ops->OFMarker) { | 4072 | pr_err("Unable to recover from unknown opcode while OFMarker=No," |
4077 | pr_err("Unable to recover from unknown" | 4073 | " closing iSCSI connection.\n"); |
4078 | " opcode while OFMarker=No, closing iSCSI" | 4074 | ret = -1; |
4079 | " connection.\n"); | ||
4080 | return -1; | ||
4081 | } | ||
4082 | if (iscsit_recover_from_unknown_opcode(conn) < 0) { | ||
4083 | pr_err("Unable to recover from unknown" | ||
4084 | " opcode, closing iSCSI connection.\n"); | ||
4085 | return -1; | ||
4086 | } | ||
4087 | break; | 4075 | break; |
4088 | } | 4076 | } |
4089 | 4077 | ||
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index 469fce44ebad..c1898c84b3d2 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c | |||
@@ -24,7 +24,6 @@ | |||
24 | #include <target/target_core_base.h> | 24 | #include <target/target_core_base.h> |
25 | #include <target/target_core_fabric.h> | 25 | #include <target/target_core_fabric.h> |
26 | #include <target/target_core_fabric_configfs.h> | 26 | #include <target/target_core_fabric_configfs.h> |
27 | #include <target/target_core_configfs.h> | ||
28 | #include <target/configfs_macros.h> | 27 | #include <target/configfs_macros.h> |
29 | #include <target/iscsi/iscsi_transport.h> | 28 | #include <target/iscsi/iscsi_transport.h> |
30 | 29 | ||
@@ -860,57 +859,19 @@ static struct configfs_attribute *lio_target_initiator_attrs[] = { | |||
860 | NULL, | 859 | NULL, |
861 | }; | 860 | }; |
862 | 861 | ||
863 | static struct se_node_acl *lio_tpg_alloc_fabric_acl( | 862 | static int lio_target_init_nodeacl(struct se_node_acl *se_nacl, |
864 | struct se_portal_group *se_tpg) | 863 | const char *name) |
865 | { | 864 | { |
866 | struct iscsi_node_acl *acl; | 865 | struct iscsi_node_acl *acl = |
867 | 866 | container_of(se_nacl, struct iscsi_node_acl, se_node_acl); | |
868 | acl = kzalloc(sizeof(struct iscsi_node_acl), GFP_KERNEL); | 867 | struct config_group *stats_cg = &se_nacl->acl_fabric_stat_group; |
869 | if (!acl) { | ||
870 | pr_err("Unable to allocate memory for struct iscsi_node_acl\n"); | ||
871 | return NULL; | ||
872 | } | ||
873 | |||
874 | return &acl->se_node_acl; | ||
875 | } | ||
876 | |||
877 | static struct se_node_acl *lio_target_make_nodeacl( | ||
878 | struct se_portal_group *se_tpg, | ||
879 | struct config_group *group, | ||
880 | const char *name) | ||
881 | { | ||
882 | struct config_group *stats_cg; | ||
883 | struct iscsi_node_acl *acl; | ||
884 | struct se_node_acl *se_nacl_new, *se_nacl; | ||
885 | struct iscsi_portal_group *tpg = container_of(se_tpg, | ||
886 | struct iscsi_portal_group, tpg_se_tpg); | ||
887 | u32 cmdsn_depth; | ||
888 | |||
889 | se_nacl_new = lio_tpg_alloc_fabric_acl(se_tpg); | ||
890 | if (!se_nacl_new) | ||
891 | return ERR_PTR(-ENOMEM); | ||
892 | |||
893 | cmdsn_depth = tpg->tpg_attrib.default_cmdsn_depth; | ||
894 | /* | ||
895 | * se_nacl_new may be released by core_tpg_add_initiator_node_acl() | ||
896 | * when converting a NdoeACL from demo mode -> explict | ||
897 | */ | ||
898 | se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new, | ||
899 | name, cmdsn_depth); | ||
900 | if (IS_ERR(se_nacl)) | ||
901 | return se_nacl; | ||
902 | |||
903 | acl = container_of(se_nacl, struct iscsi_node_acl, se_node_acl); | ||
904 | stats_cg = &se_nacl->acl_fabric_stat_group; | ||
905 | 868 | ||
906 | stats_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2, | 869 | stats_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2, |
907 | GFP_KERNEL); | 870 | GFP_KERNEL); |
908 | if (!stats_cg->default_groups) { | 871 | if (!stats_cg->default_groups) { |
909 | pr_err("Unable to allocate memory for" | 872 | pr_err("Unable to allocate memory for" |
910 | " stats_cg->default_groups\n"); | 873 | " stats_cg->default_groups\n"); |
911 | core_tpg_del_initiator_node_acl(se_tpg, se_nacl, 1); | 874 | return -ENOMEM; |
912 | kfree(acl); | ||
913 | return ERR_PTR(-ENOMEM); | ||
914 | } | 875 | } |
915 | 876 | ||
916 | stats_cg->default_groups[0] = &acl->node_stat_grps.iscsi_sess_stats_group; | 877 | stats_cg->default_groups[0] = &acl->node_stat_grps.iscsi_sess_stats_group; |
@@ -918,13 +879,11 @@ static struct se_node_acl *lio_target_make_nodeacl( | |||
918 | config_group_init_type_name(&acl->node_stat_grps.iscsi_sess_stats_group, | 879 | config_group_init_type_name(&acl->node_stat_grps.iscsi_sess_stats_group, |
919 | "iscsi_sess_stats", &iscsi_stat_sess_cit); | 880 | "iscsi_sess_stats", &iscsi_stat_sess_cit); |
920 | 881 | ||
921 | return se_nacl; | 882 | return 0; |
922 | } | 883 | } |
923 | 884 | ||
924 | static void lio_target_drop_nodeacl( | 885 | static void lio_target_cleanup_nodeacl( struct se_node_acl *se_nacl) |
925 | struct se_node_acl *se_nacl) | ||
926 | { | 886 | { |
927 | struct se_portal_group *se_tpg = se_nacl->se_tpg; | ||
928 | struct iscsi_node_acl *acl = container_of(se_nacl, | 887 | struct iscsi_node_acl *acl = container_of(se_nacl, |
929 | struct iscsi_node_acl, se_node_acl); | 888 | struct iscsi_node_acl, se_node_acl); |
930 | struct config_item *df_item; | 889 | struct config_item *df_item; |
@@ -938,9 +897,6 @@ static void lio_target_drop_nodeacl( | |||
938 | config_item_put(df_item); | 897 | config_item_put(df_item); |
939 | } | 898 | } |
940 | kfree(stats_cg->default_groups); | 899 | kfree(stats_cg->default_groups); |
941 | |||
942 | core_tpg_del_initiator_node_acl(se_tpg, se_nacl, 1); | ||
943 | kfree(acl); | ||
944 | } | 900 | } |
945 | 901 | ||
946 | /* End items for lio_target_acl_cit */ | 902 | /* End items for lio_target_acl_cit */ |
@@ -1463,8 +1419,7 @@ static struct se_portal_group *lio_target_tiqn_addtpg( | |||
1463 | if (!tpg) | 1419 | if (!tpg) |
1464 | return NULL; | 1420 | return NULL; |
1465 | 1421 | ||
1466 | ret = core_tpg_register(&iscsi_ops, wwn, &tpg->tpg_se_tpg, | 1422 | ret = core_tpg_register(wwn, &tpg->tpg_se_tpg, SCSI_PROTOCOL_ISCSI); |
1467 | tpg, TRANSPORT_TPG_TYPE_NORMAL); | ||
1468 | if (ret < 0) | 1423 | if (ret < 0) |
1469 | return NULL; | 1424 | return NULL; |
1470 | 1425 | ||
@@ -1735,14 +1690,6 @@ static char *iscsi_get_fabric_name(void) | |||
1735 | return "iSCSI"; | 1690 | return "iSCSI"; |
1736 | } | 1691 | } |
1737 | 1692 | ||
1738 | static u32 iscsi_get_task_tag(struct se_cmd *se_cmd) | ||
1739 | { | ||
1740 | struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); | ||
1741 | |||
1742 | /* only used for printks or comparism with ->ref_task_tag */ | ||
1743 | return (__force u32)cmd->init_task_tag; | ||
1744 | } | ||
1745 | |||
1746 | static int iscsi_get_cmd_state(struct se_cmd *se_cmd) | 1693 | static int iscsi_get_cmd_state(struct se_cmd *se_cmd) |
1747 | { | 1694 | { |
1748 | struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); | 1695 | struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); |
@@ -1832,78 +1779,58 @@ static void lio_aborted_task(struct se_cmd *se_cmd) | |||
1832 | cmd->conn->conn_transport->iscsit_aborted_task(cmd->conn, cmd); | 1779 | cmd->conn->conn_transport->iscsit_aborted_task(cmd->conn, cmd); |
1833 | } | 1780 | } |
1834 | 1781 | ||
1835 | static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg) | 1782 | static inline struct iscsi_portal_group *iscsi_tpg(struct se_portal_group *se_tpg) |
1836 | { | 1783 | { |
1837 | struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; | 1784 | return container_of(se_tpg, struct iscsi_portal_group, tpg_se_tpg); |
1785 | } | ||
1838 | 1786 | ||
1839 | return &tpg->tpg_tiqn->tiqn[0]; | 1787 | static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg) |
1788 | { | ||
1789 | return iscsi_tpg(se_tpg)->tpg_tiqn->tiqn; | ||
1840 | } | 1790 | } |
1841 | 1791 | ||
1842 | static u16 lio_tpg_get_tag(struct se_portal_group *se_tpg) | 1792 | static u16 lio_tpg_get_tag(struct se_portal_group *se_tpg) |
1843 | { | 1793 | { |
1844 | struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; | 1794 | return iscsi_tpg(se_tpg)->tpgt; |
1845 | |||
1846 | return tpg->tpgt; | ||
1847 | } | 1795 | } |
1848 | 1796 | ||
1849 | static u32 lio_tpg_get_default_depth(struct se_portal_group *se_tpg) | 1797 | static u32 lio_tpg_get_default_depth(struct se_portal_group *se_tpg) |
1850 | { | 1798 | { |
1851 | struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; | 1799 | return iscsi_tpg(se_tpg)->tpg_attrib.default_cmdsn_depth; |
1852 | |||
1853 | return tpg->tpg_attrib.default_cmdsn_depth; | ||
1854 | } | 1800 | } |
1855 | 1801 | ||
1856 | static int lio_tpg_check_demo_mode(struct se_portal_group *se_tpg) | 1802 | static int lio_tpg_check_demo_mode(struct se_portal_group *se_tpg) |
1857 | { | 1803 | { |
1858 | struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; | 1804 | return iscsi_tpg(se_tpg)->tpg_attrib.generate_node_acls; |
1859 | |||
1860 | return tpg->tpg_attrib.generate_node_acls; | ||
1861 | } | 1805 | } |
1862 | 1806 | ||
1863 | static int lio_tpg_check_demo_mode_cache(struct se_portal_group *se_tpg) | 1807 | static int lio_tpg_check_demo_mode_cache(struct se_portal_group *se_tpg) |
1864 | { | 1808 | { |
1865 | struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; | 1809 | return iscsi_tpg(se_tpg)->tpg_attrib.cache_dynamic_acls; |
1866 | |||
1867 | return tpg->tpg_attrib.cache_dynamic_acls; | ||
1868 | } | 1810 | } |
1869 | 1811 | ||
1870 | static int lio_tpg_check_demo_mode_write_protect( | 1812 | static int lio_tpg_check_demo_mode_write_protect( |
1871 | struct se_portal_group *se_tpg) | 1813 | struct se_portal_group *se_tpg) |
1872 | { | 1814 | { |
1873 | struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; | 1815 | return iscsi_tpg(se_tpg)->tpg_attrib.demo_mode_write_protect; |
1874 | |||
1875 | return tpg->tpg_attrib.demo_mode_write_protect; | ||
1876 | } | 1816 | } |
1877 | 1817 | ||
1878 | static int lio_tpg_check_prod_mode_write_protect( | 1818 | static int lio_tpg_check_prod_mode_write_protect( |
1879 | struct se_portal_group *se_tpg) | 1819 | struct se_portal_group *se_tpg) |
1880 | { | 1820 | { |
1881 | struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; | 1821 | return iscsi_tpg(se_tpg)->tpg_attrib.prod_mode_write_protect; |
1882 | |||
1883 | return tpg->tpg_attrib.prod_mode_write_protect; | ||
1884 | } | 1822 | } |
1885 | 1823 | ||
1886 | static int lio_tpg_check_prot_fabric_only( | 1824 | static int lio_tpg_check_prot_fabric_only( |
1887 | struct se_portal_group *se_tpg) | 1825 | struct se_portal_group *se_tpg) |
1888 | { | 1826 | { |
1889 | struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; | ||
1890 | /* | 1827 | /* |
1891 | * Only report fabric_prot_type if t10_pi has also been enabled | 1828 | * Only report fabric_prot_type if t10_pi has also been enabled |
1892 | * for incoming ib_isert sessions. | 1829 | * for incoming ib_isert sessions. |
1893 | */ | 1830 | */ |
1894 | if (!tpg->tpg_attrib.t10_pi) | 1831 | if (!iscsi_tpg(se_tpg)->tpg_attrib.t10_pi) |
1895 | return 0; | 1832 | return 0; |
1896 | 1833 | return iscsi_tpg(se_tpg)->tpg_attrib.fabric_prot_type; | |
1897 | return tpg->tpg_attrib.fabric_prot_type; | ||
1898 | } | ||
1899 | |||
1900 | static void lio_tpg_release_fabric_acl( | ||
1901 | struct se_portal_group *se_tpg, | ||
1902 | struct se_node_acl *se_acl) | ||
1903 | { | ||
1904 | struct iscsi_node_acl *acl = container_of(se_acl, | ||
1905 | struct iscsi_node_acl, se_node_acl); | ||
1906 | kfree(acl); | ||
1907 | } | 1834 | } |
1908 | 1835 | ||
1909 | /* | 1836 | /* |
@@ -1948,9 +1875,7 @@ static void lio_tpg_close_session(struct se_session *se_sess) | |||
1948 | 1875 | ||
1949 | static u32 lio_tpg_get_inst_index(struct se_portal_group *se_tpg) | 1876 | static u32 lio_tpg_get_inst_index(struct se_portal_group *se_tpg) |
1950 | { | 1877 | { |
1951 | struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; | 1878 | return iscsi_tpg(se_tpg)->tpg_tiqn->tiqn_index; |
1952 | |||
1953 | return tpg->tpg_tiqn->tiqn_index; | ||
1954 | } | 1879 | } |
1955 | 1880 | ||
1956 | static void lio_set_default_node_attributes(struct se_node_acl *se_acl) | 1881 | static void lio_set_default_node_attributes(struct se_node_acl *se_acl) |
@@ -1967,7 +1892,7 @@ static void lio_set_default_node_attributes(struct se_node_acl *se_acl) | |||
1967 | 1892 | ||
1968 | static int lio_check_stop_free(struct se_cmd *se_cmd) | 1893 | static int lio_check_stop_free(struct se_cmd *se_cmd) |
1969 | { | 1894 | { |
1970 | return target_put_sess_cmd(se_cmd->se_sess, se_cmd); | 1895 | return target_put_sess_cmd(se_cmd); |
1971 | } | 1896 | } |
1972 | 1897 | ||
1973 | static void lio_release_cmd(struct se_cmd *se_cmd) | 1898 | static void lio_release_cmd(struct se_cmd *se_cmd) |
@@ -1981,14 +1906,11 @@ static void lio_release_cmd(struct se_cmd *se_cmd) | |||
1981 | const struct target_core_fabric_ops iscsi_ops = { | 1906 | const struct target_core_fabric_ops iscsi_ops = { |
1982 | .module = THIS_MODULE, | 1907 | .module = THIS_MODULE, |
1983 | .name = "iscsi", | 1908 | .name = "iscsi", |
1909 | .node_acl_size = sizeof(struct iscsi_node_acl), | ||
1984 | .get_fabric_name = iscsi_get_fabric_name, | 1910 | .get_fabric_name = iscsi_get_fabric_name, |
1985 | .get_fabric_proto_ident = iscsi_get_fabric_proto_ident, | ||
1986 | .tpg_get_wwn = lio_tpg_get_endpoint_wwn, | 1911 | .tpg_get_wwn = lio_tpg_get_endpoint_wwn, |
1987 | .tpg_get_tag = lio_tpg_get_tag, | 1912 | .tpg_get_tag = lio_tpg_get_tag, |
1988 | .tpg_get_default_depth = lio_tpg_get_default_depth, | 1913 | .tpg_get_default_depth = lio_tpg_get_default_depth, |
1989 | .tpg_get_pr_transport_id = iscsi_get_pr_transport_id, | ||
1990 | .tpg_get_pr_transport_id_len = iscsi_get_pr_transport_id_len, | ||
1991 | .tpg_parse_pr_out_transport_id = iscsi_parse_pr_out_transport_id, | ||
1992 | .tpg_check_demo_mode = lio_tpg_check_demo_mode, | 1914 | .tpg_check_demo_mode = lio_tpg_check_demo_mode, |
1993 | .tpg_check_demo_mode_cache = lio_tpg_check_demo_mode_cache, | 1915 | .tpg_check_demo_mode_cache = lio_tpg_check_demo_mode_cache, |
1994 | .tpg_check_demo_mode_write_protect = | 1916 | .tpg_check_demo_mode_write_protect = |
@@ -1996,8 +1918,6 @@ const struct target_core_fabric_ops iscsi_ops = { | |||
1996 | .tpg_check_prod_mode_write_protect = | 1918 | .tpg_check_prod_mode_write_protect = |
1997 | lio_tpg_check_prod_mode_write_protect, | 1919 | lio_tpg_check_prod_mode_write_protect, |
1998 | .tpg_check_prot_fabric_only = &lio_tpg_check_prot_fabric_only, | 1920 | .tpg_check_prot_fabric_only = &lio_tpg_check_prot_fabric_only, |
1999 | .tpg_alloc_fabric_acl = lio_tpg_alloc_fabric_acl, | ||
2000 | .tpg_release_fabric_acl = lio_tpg_release_fabric_acl, | ||
2001 | .tpg_get_inst_index = lio_tpg_get_inst_index, | 1921 | .tpg_get_inst_index = lio_tpg_get_inst_index, |
2002 | .check_stop_free = lio_check_stop_free, | 1922 | .check_stop_free = lio_check_stop_free, |
2003 | .release_cmd = lio_release_cmd, | 1923 | .release_cmd = lio_release_cmd, |
@@ -2008,7 +1928,6 @@ const struct target_core_fabric_ops iscsi_ops = { | |||
2008 | .write_pending = lio_write_pending, | 1928 | .write_pending = lio_write_pending, |
2009 | .write_pending_status = lio_write_pending_status, | 1929 | .write_pending_status = lio_write_pending_status, |
2010 | .set_default_node_attributes = lio_set_default_node_attributes, | 1930 | .set_default_node_attributes = lio_set_default_node_attributes, |
2011 | .get_task_tag = iscsi_get_task_tag, | ||
2012 | .get_cmd_state = iscsi_get_cmd_state, | 1931 | .get_cmd_state = iscsi_get_cmd_state, |
2013 | .queue_data_in = lio_queue_data_in, | 1932 | .queue_data_in = lio_queue_data_in, |
2014 | .queue_status = lio_queue_status, | 1933 | .queue_status = lio_queue_status, |
@@ -2020,8 +1939,8 @@ const struct target_core_fabric_ops iscsi_ops = { | |||
2020 | .fabric_drop_tpg = lio_target_tiqn_deltpg, | 1939 | .fabric_drop_tpg = lio_target_tiqn_deltpg, |
2021 | .fabric_make_np = lio_target_call_addnptotpg, | 1940 | .fabric_make_np = lio_target_call_addnptotpg, |
2022 | .fabric_drop_np = lio_target_call_delnpfromtpg, | 1941 | .fabric_drop_np = lio_target_call_delnpfromtpg, |
2023 | .fabric_make_nodeacl = lio_target_make_nodeacl, | 1942 | .fabric_init_nodeacl = lio_target_init_nodeacl, |
2024 | .fabric_drop_nodeacl = lio_target_drop_nodeacl, | 1943 | .fabric_cleanup_nodeacl = lio_target_cleanup_nodeacl, |
2025 | 1944 | ||
2026 | .tfc_discovery_attrs = lio_target_discovery_auth_attrs, | 1945 | .tfc_discovery_attrs = lio_target_discovery_auth_attrs, |
2027 | .tfc_wwn_attrs = lio_target_wwn_attrs, | 1946 | .tfc_wwn_attrs = lio_target_wwn_attrs, |
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c index 959a14c9dd5d..210f6e4830e3 100644 --- a/drivers/target/iscsi/iscsi_target_erl0.c +++ b/drivers/target/iscsi/iscsi_target_erl0.c | |||
@@ -956,56 +956,3 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn) | |||
956 | 956 | ||
957 | iscsit_handle_connection_cleanup(conn); | 957 | iscsit_handle_connection_cleanup(conn); |
958 | } | 958 | } |
959 | |||
960 | /* | ||
961 | * This is the simple function that makes the magic of | ||
962 | * sync and steering happen in the follow paradoxical order: | ||
963 | * | ||
964 | * 0) Receive conn->of_marker (bytes left until next OFMarker) | ||
965 | * bytes into an offload buffer. When we pass the exact number | ||
966 | * of bytes in conn->of_marker, iscsit_dump_data_payload() and hence | ||
967 | * rx_data() will automatically receive the identical u32 marker | ||
968 | * values and store it in conn->of_marker_offset; | ||
969 | * 1) Now conn->of_marker_offset will contain the offset to the start | ||
970 | * of the next iSCSI PDU. Dump these remaining bytes into another | ||
971 | * offload buffer. | ||
972 | * 2) We are done! | ||
973 | * Next byte in the TCP stream will contain the next iSCSI PDU! | ||
974 | * Cool Huh?! | ||
975 | */ | ||
976 | int iscsit_recover_from_unknown_opcode(struct iscsi_conn *conn) | ||
977 | { | ||
978 | /* | ||
979 | * Make sure the remaining bytes to next maker is a sane value. | ||
980 | */ | ||
981 | if (conn->of_marker > (conn->conn_ops->OFMarkInt * 4)) { | ||
982 | pr_err("Remaining bytes to OFMarker: %u exceeds" | ||
983 | " OFMarkInt bytes: %u.\n", conn->of_marker, | ||
984 | conn->conn_ops->OFMarkInt * 4); | ||
985 | return -1; | ||
986 | } | ||
987 | |||
988 | pr_debug("Advancing %u bytes in TCP stream to get to the" | ||
989 | " next OFMarker.\n", conn->of_marker); | ||
990 | |||
991 | if (iscsit_dump_data_payload(conn, conn->of_marker, 0) < 0) | ||
992 | return -1; | ||
993 | |||
994 | /* | ||
995 | * Make sure the offset marker we retrived is a valid value. | ||
996 | */ | ||
997 | if (conn->of_marker_offset > (ISCSI_HDR_LEN + (ISCSI_CRC_LEN * 2) + | ||
998 | conn->conn_ops->MaxRecvDataSegmentLength)) { | ||
999 | pr_err("OfMarker offset value: %u exceeds limit.\n", | ||
1000 | conn->of_marker_offset); | ||
1001 | return -1; | ||
1002 | } | ||
1003 | |||
1004 | pr_debug("Discarding %u bytes of TCP stream to get to the" | ||
1005 | " next iSCSI Opcode.\n", conn->of_marker_offset); | ||
1006 | |||
1007 | if (iscsit_dump_data_payload(conn, conn->of_marker_offset, 0) < 0) | ||
1008 | return -1; | ||
1009 | |||
1010 | return 0; | ||
1011 | } | ||
diff --git a/drivers/target/iscsi/iscsi_target_erl0.h b/drivers/target/iscsi/iscsi_target_erl0.h index 21acc9a06376..a9e2f9497fb2 100644 --- a/drivers/target/iscsi/iscsi_target_erl0.h +++ b/drivers/target/iscsi/iscsi_target_erl0.h | |||
@@ -10,6 +10,5 @@ extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *); | |||
10 | extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int); | 10 | extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int); |
11 | extern void iscsit_fall_back_to_erl0(struct iscsi_session *); | 11 | extern void iscsit_fall_back_to_erl0(struct iscsi_session *); |
12 | extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *); | 12 | extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *); |
13 | extern int iscsit_recover_from_unknown_opcode(struct iscsi_conn *); | ||
14 | 13 | ||
15 | #endif /*** ISCSI_TARGET_ERL0_H ***/ | 14 | #endif /*** ISCSI_TARGET_ERL0_H ***/ |
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 70d799dfab03..3d0fe4ff5590 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c | |||
@@ -410,8 +410,6 @@ static int iscsi_login_zero_tsih_s2( | |||
410 | if (iscsi_change_param_sprintf(conn, "ErrorRecoveryLevel=%d", na->default_erl)) | 410 | if (iscsi_change_param_sprintf(conn, "ErrorRecoveryLevel=%d", na->default_erl)) |
411 | return -1; | 411 | return -1; |
412 | 412 | ||
413 | if (iscsi_login_disable_FIM_keys(conn->param_list, conn) < 0) | ||
414 | return -1; | ||
415 | /* | 413 | /* |
416 | * Set RDMAExtensions=Yes by default for iSER enabled network portals | 414 | * Set RDMAExtensions=Yes by default for iSER enabled network portals |
417 | */ | 415 | */ |
@@ -477,59 +475,6 @@ check_prot: | |||
477 | return 0; | 475 | return 0; |
478 | } | 476 | } |
479 | 477 | ||
480 | /* | ||
481 | * Remove PSTATE_NEGOTIATE for the four FIM related keys. | ||
482 | * The Initiator node will be able to enable FIM by proposing them itself. | ||
483 | */ | ||
484 | int iscsi_login_disable_FIM_keys( | ||
485 | struct iscsi_param_list *param_list, | ||
486 | struct iscsi_conn *conn) | ||
487 | { | ||
488 | struct iscsi_param *param; | ||
489 | |||
490 | param = iscsi_find_param_from_key("OFMarker", param_list); | ||
491 | if (!param) { | ||
492 | pr_err("iscsi_find_param_from_key() for" | ||
493 | " OFMarker failed\n"); | ||
494 | iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, | ||
495 | ISCSI_LOGIN_STATUS_NO_RESOURCES); | ||
496 | return -1; | ||
497 | } | ||
498 | param->state &= ~PSTATE_NEGOTIATE; | ||
499 | |||
500 | param = iscsi_find_param_from_key("OFMarkInt", param_list); | ||
501 | if (!param) { | ||
502 | pr_err("iscsi_find_param_from_key() for" | ||
503 | " IFMarker failed\n"); | ||
504 | iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, | ||
505 | ISCSI_LOGIN_STATUS_NO_RESOURCES); | ||
506 | return -1; | ||
507 | } | ||
508 | param->state &= ~PSTATE_NEGOTIATE; | ||
509 | |||
510 | param = iscsi_find_param_from_key("IFMarker", param_list); | ||
511 | if (!param) { | ||
512 | pr_err("iscsi_find_param_from_key() for" | ||
513 | " IFMarker failed\n"); | ||
514 | iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, | ||
515 | ISCSI_LOGIN_STATUS_NO_RESOURCES); | ||
516 | return -1; | ||
517 | } | ||
518 | param->state &= ~PSTATE_NEGOTIATE; | ||
519 | |||
520 | param = iscsi_find_param_from_key("IFMarkInt", param_list); | ||
521 | if (!param) { | ||
522 | pr_err("iscsi_find_param_from_key() for" | ||
523 | " IFMarker failed\n"); | ||
524 | iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, | ||
525 | ISCSI_LOGIN_STATUS_NO_RESOURCES); | ||
526 | return -1; | ||
527 | } | ||
528 | param->state &= ~PSTATE_NEGOTIATE; | ||
529 | |||
530 | return 0; | ||
531 | } | ||
532 | |||
533 | static int iscsi_login_non_zero_tsih_s1( | 478 | static int iscsi_login_non_zero_tsih_s1( |
534 | struct iscsi_conn *conn, | 479 | struct iscsi_conn *conn, |
535 | unsigned char *buf) | 480 | unsigned char *buf) |
@@ -616,7 +561,7 @@ static int iscsi_login_non_zero_tsih_s2( | |||
616 | if (iscsi_change_param_sprintf(conn, "TargetPortalGroupTag=%hu", sess->tpg->tpgt)) | 561 | if (iscsi_change_param_sprintf(conn, "TargetPortalGroupTag=%hu", sess->tpg->tpgt)) |
617 | return -1; | 562 | return -1; |
618 | 563 | ||
619 | return iscsi_login_disable_FIM_keys(conn->param_list, conn); | 564 | return 0; |
620 | } | 565 | } |
621 | 566 | ||
622 | int iscsi_login_post_auth_non_zero_tsih( | 567 | int iscsi_login_post_auth_non_zero_tsih( |
@@ -765,7 +710,6 @@ int iscsi_post_login_handler( | |||
765 | conn->conn_state = TARG_CONN_STATE_LOGGED_IN; | 710 | conn->conn_state = TARG_CONN_STATE_LOGGED_IN; |
766 | 711 | ||
767 | iscsi_set_connection_parameters(conn->conn_ops, conn->param_list); | 712 | iscsi_set_connection_parameters(conn->conn_ops, conn->param_list); |
768 | iscsit_set_sync_and_steering_values(conn); | ||
769 | /* | 713 | /* |
770 | * SCSI Initiator -> SCSI Target Port Mapping | 714 | * SCSI Initiator -> SCSI Target Port Mapping |
771 | */ | 715 | */ |
diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h index 29d098324b7f..1c7358081533 100644 --- a/drivers/target/iscsi/iscsi_target_login.h +++ b/drivers/target/iscsi/iscsi_target_login.h | |||
@@ -16,6 +16,5 @@ extern int iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8); | |||
16 | extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *, | 16 | extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *, |
17 | bool, bool); | 17 | bool, bool); |
18 | extern int iscsi_target_login_thread(void *); | 18 | extern int iscsi_target_login_thread(void *); |
19 | extern int iscsi_login_disable_FIM_keys(struct iscsi_param_list *, struct iscsi_conn *); | ||
20 | 19 | ||
21 | #endif /*** ISCSI_TARGET_LOGIN_H ***/ | 20 | #endif /*** ISCSI_TARGET_LOGIN_H ***/ |
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c index d4f9e9645697..e8a52f7d6204 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.c +++ b/drivers/target/iscsi/iscsi_target_parameters.c | |||
@@ -34,13 +34,6 @@ int iscsi_login_rx_data( | |||
34 | iov.iov_len = length; | 34 | iov.iov_len = length; |
35 | iov.iov_base = buf; | 35 | iov.iov_base = buf; |
36 | 36 | ||
37 | /* | ||
38 | * Initial Marker-less Interval. | ||
39 | * Add the values regardless of IFMarker/OFMarker, considering | ||
40 | * it may not be negoitated yet. | ||
41 | */ | ||
42 | conn->of_marker += length; | ||
43 | |||
44 | rx_got = rx_data(conn, &iov, 1, length); | 37 | rx_got = rx_data(conn, &iov, 1, length); |
45 | if (rx_got != length) { | 38 | if (rx_got != length) { |
46 | pr_err("rx_data returned %d, expecting %d.\n", | 39 | pr_err("rx_data returned %d, expecting %d.\n", |
@@ -72,13 +65,6 @@ int iscsi_login_tx_data( | |||
72 | iov_cnt++; | 65 | iov_cnt++; |
73 | } | 66 | } |
74 | 67 | ||
75 | /* | ||
76 | * Initial Marker-less Interval. | ||
77 | * Add the values regardless of IFMarker/OFMarker, considering | ||
78 | * it may not be negoitated yet. | ||
79 | */ | ||
80 | conn->if_marker += length; | ||
81 | |||
82 | tx_sent = tx_data(conn, &iov[0], iov_cnt, length); | 68 | tx_sent = tx_data(conn, &iov[0], iov_cnt, length); |
83 | if (tx_sent != length) { | 69 | if (tx_sent != length) { |
84 | pr_err("tx_data returned %d, expecting %d.\n", | 70 | pr_err("tx_data returned %d, expecting %d.\n", |
@@ -97,12 +83,6 @@ void iscsi_dump_conn_ops(struct iscsi_conn_ops *conn_ops) | |||
97 | "CRC32C" : "None"); | 83 | "CRC32C" : "None"); |
98 | pr_debug("MaxRecvDataSegmentLength: %u\n", | 84 | pr_debug("MaxRecvDataSegmentLength: %u\n", |
99 | conn_ops->MaxRecvDataSegmentLength); | 85 | conn_ops->MaxRecvDataSegmentLength); |
100 | pr_debug("OFMarker: %s\n", (conn_ops->OFMarker) ? "Yes" : "No"); | ||
101 | pr_debug("IFMarker: %s\n", (conn_ops->IFMarker) ? "Yes" : "No"); | ||
102 | if (conn_ops->OFMarker) | ||
103 | pr_debug("OFMarkInt: %u\n", conn_ops->OFMarkInt); | ||
104 | if (conn_ops->IFMarker) | ||
105 | pr_debug("IFMarkInt: %u\n", conn_ops->IFMarkInt); | ||
106 | } | 86 | } |
107 | 87 | ||
108 | void iscsi_dump_sess_ops(struct iscsi_sess_ops *sess_ops) | 88 | void iscsi_dump_sess_ops(struct iscsi_sess_ops *sess_ops) |
@@ -194,10 +174,6 @@ static struct iscsi_param *iscsi_set_default_param(struct iscsi_param_list *para | |||
194 | case TYPERANGE_DIGEST: | 174 | case TYPERANGE_DIGEST: |
195 | param->type = TYPE_VALUE_LIST | TYPE_STRING; | 175 | param->type = TYPE_VALUE_LIST | TYPE_STRING; |
196 | break; | 176 | break; |
197 | case TYPERANGE_MARKINT: | ||
198 | param->type = TYPE_NUMBER_RANGE; | ||
199 | param->type_range |= TYPERANGE_1_TO_65535; | ||
200 | break; | ||
201 | case TYPERANGE_ISCSINAME: | 177 | case TYPERANGE_ISCSINAME: |
202 | case TYPERANGE_SESSIONTYPE: | 178 | case TYPERANGE_SESSIONTYPE: |
203 | case TYPERANGE_TARGETADDRESS: | 179 | case TYPERANGE_TARGETADDRESS: |
@@ -422,13 +398,13 @@ int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr) | |||
422 | 398 | ||
423 | param = iscsi_set_default_param(pl, IFMARKINT, INITIAL_IFMARKINT, | 399 | param = iscsi_set_default_param(pl, IFMARKINT, INITIAL_IFMARKINT, |
424 | PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH, | 400 | PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH, |
425 | TYPERANGE_MARKINT, USE_INITIAL_ONLY); | 401 | TYPERANGE_UTF8, USE_INITIAL_ONLY); |
426 | if (!param) | 402 | if (!param) |
427 | goto out; | 403 | goto out; |
428 | 404 | ||
429 | param = iscsi_set_default_param(pl, OFMARKINT, INITIAL_OFMARKINT, | 405 | param = iscsi_set_default_param(pl, OFMARKINT, INITIAL_OFMARKINT, |
430 | PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH, | 406 | PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH, |
431 | TYPERANGE_MARKINT, USE_INITIAL_ONLY); | 407 | TYPERANGE_UTF8, USE_INITIAL_ONLY); |
432 | if (!param) | 408 | if (!param) |
433 | goto out; | 409 | goto out; |
434 | /* | 410 | /* |
@@ -524,9 +500,9 @@ int iscsi_set_keys_to_negotiate( | |||
524 | } else if (!strcmp(param->name, OFMARKER)) { | 500 | } else if (!strcmp(param->name, OFMARKER)) { |
525 | SET_PSTATE_NEGOTIATE(param); | 501 | SET_PSTATE_NEGOTIATE(param); |
526 | } else if (!strcmp(param->name, IFMARKINT)) { | 502 | } else if (!strcmp(param->name, IFMARKINT)) { |
527 | SET_PSTATE_NEGOTIATE(param); | 503 | SET_PSTATE_REJECT(param); |
528 | } else if (!strcmp(param->name, OFMARKINT)) { | 504 | } else if (!strcmp(param->name, OFMARKINT)) { |
529 | SET_PSTATE_NEGOTIATE(param); | 505 | SET_PSTATE_REJECT(param); |
530 | } else if (!strcmp(param->name, RDMAEXTENSIONS)) { | 506 | } else if (!strcmp(param->name, RDMAEXTENSIONS)) { |
531 | if (iser) | 507 | if (iser) |
532 | SET_PSTATE_NEGOTIATE(param); | 508 | SET_PSTATE_NEGOTIATE(param); |
@@ -906,91 +882,6 @@ static int iscsi_check_numerical_value(struct iscsi_param *param, char *value_pt | |||
906 | return 0; | 882 | return 0; |
907 | } | 883 | } |
908 | 884 | ||
909 | static int iscsi_check_numerical_range_value(struct iscsi_param *param, char *value) | ||
910 | { | ||
911 | char *left_val_ptr = NULL, *right_val_ptr = NULL; | ||
912 | char *tilde_ptr = NULL; | ||
913 | u32 left_val, right_val, local_left_val; | ||
914 | |||
915 | if (strcmp(param->name, IFMARKINT) && | ||
916 | strcmp(param->name, OFMARKINT)) { | ||
917 | pr_err("Only parameters \"%s\" or \"%s\" may contain a" | ||
918 | " numerical range value.\n", IFMARKINT, OFMARKINT); | ||
919 | return -1; | ||
920 | } | ||
921 | |||
922 | if (IS_PSTATE_PROPOSER(param)) | ||
923 | return 0; | ||
924 | |||
925 | tilde_ptr = strchr(value, '~'); | ||
926 | if (!tilde_ptr) { | ||
927 | pr_err("Unable to locate numerical range indicator" | ||
928 | " \"~\" for \"%s\".\n", param->name); | ||
929 | return -1; | ||
930 | } | ||
931 | *tilde_ptr = '\0'; | ||
932 | |||
933 | left_val_ptr = value; | ||
934 | right_val_ptr = value + strlen(left_val_ptr) + 1; | ||
935 | |||
936 | if (iscsi_check_numerical_value(param, left_val_ptr) < 0) | ||
937 | return -1; | ||
938 | if (iscsi_check_numerical_value(param, right_val_ptr) < 0) | ||
939 | return -1; | ||
940 | |||
941 | left_val = simple_strtoul(left_val_ptr, NULL, 0); | ||
942 | right_val = simple_strtoul(right_val_ptr, NULL, 0); | ||
943 | *tilde_ptr = '~'; | ||
944 | |||
945 | if (right_val < left_val) { | ||
946 | pr_err("Numerical range for parameter \"%s\" contains" | ||
947 | " a right value which is less than the left.\n", | ||
948 | param->name); | ||
949 | return -1; | ||
950 | } | ||
951 | |||
952 | /* | ||
953 | * For now, enforce reasonable defaults for [I,O]FMarkInt. | ||
954 | */ | ||
955 | tilde_ptr = strchr(param->value, '~'); | ||
956 | if (!tilde_ptr) { | ||
957 | pr_err("Unable to locate numerical range indicator" | ||
958 | " \"~\" for \"%s\".\n", param->name); | ||
959 | return -1; | ||
960 | } | ||
961 | *tilde_ptr = '\0'; | ||
962 | |||
963 | left_val_ptr = param->value; | ||
964 | right_val_ptr = param->value + strlen(left_val_ptr) + 1; | ||
965 | |||
966 | local_left_val = simple_strtoul(left_val_ptr, NULL, 0); | ||
967 | *tilde_ptr = '~'; | ||
968 | |||
969 | if (param->set_param) { | ||
970 | if ((left_val < local_left_val) || | ||
971 | (right_val < local_left_val)) { | ||
972 | pr_err("Passed value range \"%u~%u\" is below" | ||
973 | " minimum left value \"%u\" for key \"%s\"," | ||
974 | " rejecting.\n", left_val, right_val, | ||
975 | local_left_val, param->name); | ||
976 | return -1; | ||
977 | } | ||
978 | } else { | ||
979 | if ((left_val < local_left_val) && | ||
980 | (right_val < local_left_val)) { | ||
981 | pr_err("Received value range \"%u~%u\" is" | ||
982 | " below minimum left value \"%u\" for key" | ||
983 | " \"%s\", rejecting.\n", left_val, right_val, | ||
984 | local_left_val, param->name); | ||
985 | SET_PSTATE_REJECT(param); | ||
986 | if (iscsi_update_param_value(param, REJECT) < 0) | ||
987 | return -1; | ||
988 | } | ||
989 | } | ||
990 | |||
991 | return 0; | ||
992 | } | ||
993 | |||
994 | static int iscsi_check_string_or_list_value(struct iscsi_param *param, char *value) | 885 | static int iscsi_check_string_or_list_value(struct iscsi_param *param, char *value) |
995 | { | 886 | { |
996 | if (IS_PSTATE_PROPOSER(param)) | 887 | if (IS_PSTATE_PROPOSER(param)) |
@@ -1027,33 +918,6 @@ static int iscsi_check_string_or_list_value(struct iscsi_param *param, char *val | |||
1027 | return 0; | 918 | return 0; |
1028 | } | 919 | } |
1029 | 920 | ||
1030 | /* | ||
1031 | * This function is used to pick a value range number, currently just | ||
1032 | * returns the lesser of both right values. | ||
1033 | */ | ||
1034 | static char *iscsi_get_value_from_number_range( | ||
1035 | struct iscsi_param *param, | ||
1036 | char *value) | ||
1037 | { | ||
1038 | char *end_ptr, *tilde_ptr1 = NULL, *tilde_ptr2 = NULL; | ||
1039 | u32 acceptor_right_value, proposer_right_value; | ||
1040 | |||
1041 | tilde_ptr1 = strchr(value, '~'); | ||
1042 | if (!tilde_ptr1) | ||
1043 | return NULL; | ||
1044 | *tilde_ptr1++ = '\0'; | ||
1045 | proposer_right_value = simple_strtoul(tilde_ptr1, &end_ptr, 0); | ||
1046 | |||
1047 | tilde_ptr2 = strchr(param->value, '~'); | ||
1048 | if (!tilde_ptr2) | ||
1049 | return NULL; | ||
1050 | *tilde_ptr2++ = '\0'; | ||
1051 | acceptor_right_value = simple_strtoul(tilde_ptr2, &end_ptr, 0); | ||
1052 | |||
1053 | return (acceptor_right_value >= proposer_right_value) ? | ||
1054 | tilde_ptr1 : tilde_ptr2; | ||
1055 | } | ||
1056 | |||
1057 | static char *iscsi_check_valuelist_for_support( | 921 | static char *iscsi_check_valuelist_for_support( |
1058 | struct iscsi_param *param, | 922 | struct iscsi_param *param, |
1059 | char *value) | 923 | char *value) |
@@ -1103,7 +967,7 @@ static int iscsi_check_acceptor_state(struct iscsi_param *param, char *value, | |||
1103 | struct iscsi_conn *conn) | 967 | struct iscsi_conn *conn) |
1104 | { | 968 | { |
1105 | u8 acceptor_boolean_value = 0, proposer_boolean_value = 0; | 969 | u8 acceptor_boolean_value = 0, proposer_boolean_value = 0; |
1106 | char *negoitated_value = NULL; | 970 | char *negotiated_value = NULL; |
1107 | 971 | ||
1108 | if (IS_PSTATE_ACCEPTOR(param)) { | 972 | if (IS_PSTATE_ACCEPTOR(param)) { |
1109 | pr_err("Received key \"%s\" twice, protocol error.\n", | 973 | pr_err("Received key \"%s\" twice, protocol error.\n", |
@@ -1203,24 +1067,16 @@ static int iscsi_check_acceptor_state(struct iscsi_param *param, char *value, | |||
1203 | pr_debug("Updated %s to target MXDSL value: %s\n", | 1067 | pr_debug("Updated %s to target MXDSL value: %s\n", |
1204 | param->name, param->value); | 1068 | param->name, param->value); |
1205 | } | 1069 | } |
1206 | |||
1207 | } else if (IS_TYPE_NUMBER_RANGE(param)) { | ||
1208 | negoitated_value = iscsi_get_value_from_number_range( | ||
1209 | param, value); | ||
1210 | if (!negoitated_value) | ||
1211 | return -1; | ||
1212 | if (iscsi_update_param_value(param, negoitated_value) < 0) | ||
1213 | return -1; | ||
1214 | } else if (IS_TYPE_VALUE_LIST(param)) { | 1070 | } else if (IS_TYPE_VALUE_LIST(param)) { |
1215 | negoitated_value = iscsi_check_valuelist_for_support( | 1071 | negotiated_value = iscsi_check_valuelist_for_support( |
1216 | param, value); | 1072 | param, value); |
1217 | if (!negoitated_value) { | 1073 | if (!negotiated_value) { |
1218 | pr_err("Proposer's value list \"%s\" contains" | 1074 | pr_err("Proposer's value list \"%s\" contains" |
1219 | " no valid values from Acceptor's value list" | 1075 | " no valid values from Acceptor's value list" |
1220 | " \"%s\".\n", value, param->value); | 1076 | " \"%s\".\n", value, param->value); |
1221 | return -1; | 1077 | return -1; |
1222 | } | 1078 | } |
1223 | if (iscsi_update_param_value(param, negoitated_value) < 0) | 1079 | if (iscsi_update_param_value(param, negotiated_value) < 0) |
1224 | return -1; | 1080 | return -1; |
1225 | } else if (IS_PHASE_DECLARATIVE(param)) { | 1081 | } else if (IS_PHASE_DECLARATIVE(param)) { |
1226 | if (iscsi_update_param_value(param, value) < 0) | 1082 | if (iscsi_update_param_value(param, value) < 0) |
@@ -1239,47 +1095,7 @@ static int iscsi_check_proposer_state(struct iscsi_param *param, char *value) | |||
1239 | return -1; | 1095 | return -1; |
1240 | } | 1096 | } |
1241 | 1097 | ||
1242 | if (IS_TYPE_NUMBER_RANGE(param)) { | 1098 | if (IS_TYPE_VALUE_LIST(param)) { |
1243 | u32 left_val = 0, right_val = 0, recieved_value = 0; | ||
1244 | char *left_val_ptr = NULL, *right_val_ptr = NULL; | ||
1245 | char *tilde_ptr = NULL; | ||
1246 | |||
1247 | if (!strcmp(value, IRRELEVANT) || !strcmp(value, REJECT)) { | ||
1248 | if (iscsi_update_param_value(param, value) < 0) | ||
1249 | return -1; | ||
1250 | return 0; | ||
1251 | } | ||
1252 | |||
1253 | tilde_ptr = strchr(value, '~'); | ||
1254 | if (tilde_ptr) { | ||
1255 | pr_err("Illegal \"~\" in response for \"%s\".\n", | ||
1256 | param->name); | ||
1257 | return -1; | ||
1258 | } | ||
1259 | tilde_ptr = strchr(param->value, '~'); | ||
1260 | if (!tilde_ptr) { | ||
1261 | pr_err("Unable to locate numerical range" | ||
1262 | " indicator \"~\" for \"%s\".\n", param->name); | ||
1263 | return -1; | ||
1264 | } | ||
1265 | *tilde_ptr = '\0'; | ||
1266 | |||
1267 | left_val_ptr = param->value; | ||
1268 | right_val_ptr = param->value + strlen(left_val_ptr) + 1; | ||
1269 | left_val = simple_strtoul(left_val_ptr, NULL, 0); | ||
1270 | right_val = simple_strtoul(right_val_ptr, NULL, 0); | ||
1271 | recieved_value = simple_strtoul(value, NULL, 0); | ||
1272 | |||
1273 | *tilde_ptr = '~'; | ||
1274 | |||
1275 | if ((recieved_value < left_val) || | ||
1276 | (recieved_value > right_val)) { | ||
1277 | pr_err("Illegal response \"%s=%u\", value must" | ||
1278 | " be between %u and %u.\n", param->name, | ||
1279 | recieved_value, left_val, right_val); | ||
1280 | return -1; | ||
1281 | } | ||
1282 | } else if (IS_TYPE_VALUE_LIST(param)) { | ||
1283 | char *comma_ptr = NULL, *tmp_ptr = NULL; | 1099 | char *comma_ptr = NULL, *tmp_ptr = NULL; |
1284 | 1100 | ||
1285 | comma_ptr = strchr(value, ','); | 1101 | comma_ptr = strchr(value, ','); |
@@ -1361,9 +1177,6 @@ static int iscsi_check_value(struct iscsi_param *param, char *value) | |||
1361 | } else if (IS_TYPE_NUMBER(param)) { | 1177 | } else if (IS_TYPE_NUMBER(param)) { |
1362 | if (iscsi_check_numerical_value(param, value) < 0) | 1178 | if (iscsi_check_numerical_value(param, value) < 0) |
1363 | return -1; | 1179 | return -1; |
1364 | } else if (IS_TYPE_NUMBER_RANGE(param)) { | ||
1365 | if (iscsi_check_numerical_range_value(param, value) < 0) | ||
1366 | return -1; | ||
1367 | } else if (IS_TYPE_STRING(param) || IS_TYPE_VALUE_LIST(param)) { | 1180 | } else if (IS_TYPE_STRING(param) || IS_TYPE_VALUE_LIST(param)) { |
1368 | if (iscsi_check_string_or_list_value(param, value) < 0) | 1181 | if (iscsi_check_string_or_list_value(param, value) < 0) |
1369 | return -1; | 1182 | return -1; |
@@ -1483,8 +1296,6 @@ static int iscsi_enforce_integrity_rules( | |||
1483 | char *tmpptr; | 1296 | char *tmpptr; |
1484 | u8 DataSequenceInOrder = 0; | 1297 | u8 DataSequenceInOrder = 0; |
1485 | u8 ErrorRecoveryLevel = 0, SessionType = 0; | 1298 | u8 ErrorRecoveryLevel = 0, SessionType = 0; |
1486 | u8 IFMarker = 0, OFMarker = 0; | ||
1487 | u8 IFMarkInt_Reject = 1, OFMarkInt_Reject = 1; | ||
1488 | u32 FirstBurstLength = 0, MaxBurstLength = 0; | 1299 | u32 FirstBurstLength = 0, MaxBurstLength = 0; |
1489 | struct iscsi_param *param = NULL; | 1300 | struct iscsi_param *param = NULL; |
1490 | 1301 | ||
@@ -1503,28 +1314,12 @@ static int iscsi_enforce_integrity_rules( | |||
1503 | if (!strcmp(param->name, MAXBURSTLENGTH)) | 1314 | if (!strcmp(param->name, MAXBURSTLENGTH)) |
1504 | MaxBurstLength = simple_strtoul(param->value, | 1315 | MaxBurstLength = simple_strtoul(param->value, |
1505 | &tmpptr, 0); | 1316 | &tmpptr, 0); |
1506 | if (!strcmp(param->name, IFMARKER)) | ||
1507 | if (!strcmp(param->value, YES)) | ||
1508 | IFMarker = 1; | ||
1509 | if (!strcmp(param->name, OFMARKER)) | ||
1510 | if (!strcmp(param->value, YES)) | ||
1511 | OFMarker = 1; | ||
1512 | if (!strcmp(param->name, IFMARKINT)) | ||
1513 | if (!strcmp(param->value, REJECT)) | ||
1514 | IFMarkInt_Reject = 1; | ||
1515 | if (!strcmp(param->name, OFMARKINT)) | ||
1516 | if (!strcmp(param->value, REJECT)) | ||
1517 | OFMarkInt_Reject = 1; | ||
1518 | } | 1317 | } |
1519 | 1318 | ||
1520 | list_for_each_entry(param, ¶m_list->param_list, p_list) { | 1319 | list_for_each_entry(param, ¶m_list->param_list, p_list) { |
1521 | if (!(param->phase & phase)) | 1320 | if (!(param->phase & phase)) |
1522 | continue; | 1321 | continue; |
1523 | if (!SessionType && (!IS_PSTATE_ACCEPTOR(param) && | 1322 | if (!SessionType && !IS_PSTATE_ACCEPTOR(param)) |
1524 | (strcmp(param->name, IFMARKER) && | ||
1525 | strcmp(param->name, OFMARKER) && | ||
1526 | strcmp(param->name, IFMARKINT) && | ||
1527 | strcmp(param->name, OFMARKINT)))) | ||
1528 | continue; | 1323 | continue; |
1529 | if (!strcmp(param->name, MAXOUTSTANDINGR2T) && | 1324 | if (!strcmp(param->name, MAXOUTSTANDINGR2T) && |
1530 | DataSequenceInOrder && (ErrorRecoveryLevel > 0)) { | 1325 | DataSequenceInOrder && (ErrorRecoveryLevel > 0)) { |
@@ -1556,38 +1351,6 @@ static int iscsi_enforce_integrity_rules( | |||
1556 | param->name, param->value); | 1351 | param->name, param->value); |
1557 | } | 1352 | } |
1558 | } | 1353 | } |
1559 | if (!strcmp(param->name, IFMARKER) && IFMarkInt_Reject) { | ||
1560 | if (iscsi_update_param_value(param, NO) < 0) | ||
1561 | return -1; | ||
1562 | IFMarker = 0; | ||
1563 | pr_debug("Reset \"%s\" to \"%s\".\n", | ||
1564 | param->name, param->value); | ||
1565 | } | ||
1566 | if (!strcmp(param->name, OFMARKER) && OFMarkInt_Reject) { | ||
1567 | if (iscsi_update_param_value(param, NO) < 0) | ||
1568 | return -1; | ||
1569 | OFMarker = 0; | ||
1570 | pr_debug("Reset \"%s\" to \"%s\".\n", | ||
1571 | param->name, param->value); | ||
1572 | } | ||
1573 | if (!strcmp(param->name, IFMARKINT) && !IFMarker) { | ||
1574 | if (!strcmp(param->value, REJECT)) | ||
1575 | continue; | ||
1576 | param->state &= ~PSTATE_NEGOTIATE; | ||
1577 | if (iscsi_update_param_value(param, IRRELEVANT) < 0) | ||
1578 | return -1; | ||
1579 | pr_debug("Reset \"%s\" to \"%s\".\n", | ||
1580 | param->name, param->value); | ||
1581 | } | ||
1582 | if (!strcmp(param->name, OFMARKINT) && !OFMarker) { | ||
1583 | if (!strcmp(param->value, REJECT)) | ||
1584 | continue; | ||
1585 | param->state &= ~PSTATE_NEGOTIATE; | ||
1586 | if (iscsi_update_param_value(param, IRRELEVANT) < 0) | ||
1587 | return -1; | ||
1588 | pr_debug("Reset \"%s\" to \"%s\".\n", | ||
1589 | param->name, param->value); | ||
1590 | } | ||
1591 | } | 1354 | } |
1592 | 1355 | ||
1593 | return 0; | 1356 | return 0; |
@@ -1824,24 +1587,6 @@ void iscsi_set_connection_parameters( | |||
1824 | */ | 1587 | */ |
1825 | pr_debug("MaxRecvDataSegmentLength: %u\n", | 1588 | pr_debug("MaxRecvDataSegmentLength: %u\n", |
1826 | ops->MaxRecvDataSegmentLength); | 1589 | ops->MaxRecvDataSegmentLength); |
1827 | } else if (!strcmp(param->name, OFMARKER)) { | ||
1828 | ops->OFMarker = !strcmp(param->value, YES); | ||
1829 | pr_debug("OFMarker: %s\n", | ||
1830 | param->value); | ||
1831 | } else if (!strcmp(param->name, IFMARKER)) { | ||
1832 | ops->IFMarker = !strcmp(param->value, YES); | ||
1833 | pr_debug("IFMarker: %s\n", | ||
1834 | param->value); | ||
1835 | } else if (!strcmp(param->name, OFMARKINT)) { | ||
1836 | ops->OFMarkInt = | ||
1837 | simple_strtoul(param->value, &tmpptr, 0); | ||
1838 | pr_debug("OFMarkInt: %s\n", | ||
1839 | param->value); | ||
1840 | } else if (!strcmp(param->name, IFMARKINT)) { | ||
1841 | ops->IFMarkInt = | ||
1842 | simple_strtoul(param->value, &tmpptr, 0); | ||
1843 | pr_debug("IFMarkInt: %s\n", | ||
1844 | param->value); | ||
1845 | } else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) { | 1590 | } else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) { |
1846 | ops->InitiatorRecvDataSegmentLength = | 1591 | ops->InitiatorRecvDataSegmentLength = |
1847 | simple_strtoul(param->value, &tmpptr, 0); | 1592 | simple_strtoul(param->value, &tmpptr, 0); |
diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h index a47046a752aa..a0751e3f0813 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.h +++ b/drivers/target/iscsi/iscsi_target_parameters.h | |||
@@ -138,8 +138,8 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *, | |||
138 | #define INITIAL_SESSIONTYPE NORMAL | 138 | #define INITIAL_SESSIONTYPE NORMAL |
139 | #define INITIAL_IFMARKER NO | 139 | #define INITIAL_IFMARKER NO |
140 | #define INITIAL_OFMARKER NO | 140 | #define INITIAL_OFMARKER NO |
141 | #define INITIAL_IFMARKINT "2048~65535" | 141 | #define INITIAL_IFMARKINT REJECT |
142 | #define INITIAL_OFMARKINT "2048~65535" | 142 | #define INITIAL_OFMARKINT REJECT |
143 | 143 | ||
144 | /* | 144 | /* |
145 | * Initial values for iSER parameters following RFC-5046 Section 6 | 145 | * Initial values for iSER parameters following RFC-5046 Section 6 |
@@ -239,10 +239,9 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *, | |||
239 | #define TYPERANGE_AUTH 0x0200 | 239 | #define TYPERANGE_AUTH 0x0200 |
240 | #define TYPERANGE_DIGEST 0x0400 | 240 | #define TYPERANGE_DIGEST 0x0400 |
241 | #define TYPERANGE_ISCSINAME 0x0800 | 241 | #define TYPERANGE_ISCSINAME 0x0800 |
242 | #define TYPERANGE_MARKINT 0x1000 | 242 | #define TYPERANGE_SESSIONTYPE 0x1000 |
243 | #define TYPERANGE_SESSIONTYPE 0x2000 | 243 | #define TYPERANGE_TARGETADDRESS 0x2000 |
244 | #define TYPERANGE_TARGETADDRESS 0x4000 | 244 | #define TYPERANGE_UTF8 0x4000 |
245 | #define TYPERANGE_UTF8 0x8000 | ||
246 | 245 | ||
247 | #define IS_TYPERANGE_0_TO_2(p) ((p)->type_range & TYPERANGE_0_TO_2) | 246 | #define IS_TYPERANGE_0_TO_2(p) ((p)->type_range & TYPERANGE_0_TO_2) |
248 | #define IS_TYPERANGE_0_TO_3600(p) ((p)->type_range & TYPERANGE_0_TO_3600) | 247 | #define IS_TYPERANGE_0_TO_3600(p) ((p)->type_range & TYPERANGE_0_TO_3600) |
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c index fe9a582ca6af..cf59c397007b 100644 --- a/drivers/target/iscsi/iscsi_target_tmr.c +++ b/drivers/target/iscsi/iscsi_target_tmr.c | |||
@@ -120,7 +120,7 @@ u8 iscsit_tmr_task_reassign( | |||
120 | struct iscsi_tmr_req *tmr_req = cmd->tmr_req; | 120 | struct iscsi_tmr_req *tmr_req = cmd->tmr_req; |
121 | struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req; | 121 | struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req; |
122 | struct iscsi_tm *hdr = (struct iscsi_tm *) buf; | 122 | struct iscsi_tm *hdr = (struct iscsi_tm *) buf; |
123 | int ret, ref_lun; | 123 | u64 ret, ref_lun; |
124 | 124 | ||
125 | pr_debug("Got TASK_REASSIGN TMR ITT: 0x%08x," | 125 | pr_debug("Got TASK_REASSIGN TMR ITT: 0x%08x," |
126 | " RefTaskTag: 0x%08x, ExpDataSN: 0x%08x, CID: %hu\n", | 126 | " RefTaskTag: 0x%08x, ExpDataSN: 0x%08x, CID: %hu\n", |
@@ -164,7 +164,7 @@ u8 iscsit_tmr_task_reassign( | |||
164 | ref_lun = scsilun_to_int(&hdr->lun); | 164 | ref_lun = scsilun_to_int(&hdr->lun); |
165 | if (ref_lun != ref_cmd->se_cmd.orig_fe_lun) { | 165 | if (ref_lun != ref_cmd->se_cmd.orig_fe_lun) { |
166 | pr_err("Unable to perform connection recovery for" | 166 | pr_err("Unable to perform connection recovery for" |
167 | " differing ref_lun: %d ref_cmd orig_fe_lun: %u\n", | 167 | " differing ref_lun: %llu ref_cmd orig_fe_lun: %llu\n", |
168 | ref_lun, ref_cmd->se_cmd.orig_fe_lun); | 168 | ref_lun, ref_cmd->se_cmd.orig_fe_lun); |
169 | return ISCSI_TMF_RSP_REJECTED; | 169 | return ISCSI_TMF_RSP_REJECTED; |
170 | } | 170 | } |
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c index 5e3295fe404d..968068ffcb1c 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.c +++ b/drivers/target/iscsi/iscsi_target_tpg.c | |||
@@ -18,7 +18,6 @@ | |||
18 | 18 | ||
19 | #include <target/target_core_base.h> | 19 | #include <target/target_core_base.h> |
20 | #include <target/target_core_fabric.h> | 20 | #include <target/target_core_fabric.h> |
21 | #include <target/target_core_configfs.h> | ||
22 | 21 | ||
23 | #include <target/iscsi/iscsi_target_core.h> | 22 | #include <target/iscsi/iscsi_target_core.h> |
24 | #include "iscsi_target_erl0.h" | 23 | #include "iscsi_target_erl0.h" |
@@ -67,9 +66,12 @@ int iscsit_load_discovery_tpg(void) | |||
67 | pr_err("Unable to allocate struct iscsi_portal_group\n"); | 66 | pr_err("Unable to allocate struct iscsi_portal_group\n"); |
68 | return -1; | 67 | return -1; |
69 | } | 68 | } |
70 | 69 | /* | |
71 | ret = core_tpg_register(&iscsi_ops, NULL, &tpg->tpg_se_tpg, | 70 | * Save iscsi_ops pointer for special case discovery TPG that |
72 | tpg, TRANSPORT_TPG_TYPE_DISCOVERY); | 71 | * doesn't exist as se_wwn->wwn_group within configfs. |
72 | */ | ||
73 | tpg->tpg_se_tpg.se_tpg_tfo = &iscsi_ops; | ||
74 | ret = core_tpg_register(NULL, &tpg->tpg_se_tpg, -1); | ||
73 | if (ret < 0) { | 75 | if (ret < 0) { |
74 | kfree(tpg); | 76 | kfree(tpg); |
75 | return -1; | 77 | return -1; |
@@ -280,8 +282,6 @@ int iscsit_tpg_del_portal_group( | |||
280 | return -EPERM; | 282 | return -EPERM; |
281 | } | 283 | } |
282 | 284 | ||
283 | core_tpg_clear_object_luns(&tpg->tpg_se_tpg); | ||
284 | |||
285 | if (tpg->param_list) { | 285 | if (tpg->param_list) { |
286 | iscsi_release_param_list(tpg->param_list); | 286 | iscsi_release_param_list(tpg->param_list); |
287 | tpg->param_list = NULL; | 287 | tpg->param_list = NULL; |
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index b18edda3e8af..a2bff0702eb2 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <scsi/iscsi_proto.h> | 22 | #include <scsi/iscsi_proto.h> |
23 | #include <target/target_core_base.h> | 23 | #include <target/target_core_base.h> |
24 | #include <target/target_core_fabric.h> | 24 | #include <target/target_core_fabric.h> |
25 | #include <target/target_core_configfs.h> | ||
26 | #include <target/iscsi/iscsi_transport.h> | 25 | #include <target/iscsi/iscsi_transport.h> |
27 | 26 | ||
28 | #include <target/iscsi/iscsi_target_core.h> | 27 | #include <target/iscsi/iscsi_target_core.h> |
@@ -746,7 +745,7 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown) | |||
746 | rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown); | 745 | rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown); |
747 | if (!rc && shutdown && se_cmd && se_cmd->se_sess) { | 746 | if (!rc && shutdown && se_cmd && se_cmd->se_sess) { |
748 | __iscsit_free_cmd(cmd, true, shutdown); | 747 | __iscsit_free_cmd(cmd, true, shutdown); |
749 | target_put_sess_cmd(se_cmd->se_sess, se_cmd); | 748 | target_put_sess_cmd(se_cmd); |
750 | } | 749 | } |
751 | break; | 750 | break; |
752 | case ISCSI_OP_REJECT: | 751 | case ISCSI_OP_REJECT: |
@@ -762,7 +761,7 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown) | |||
762 | rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown); | 761 | rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown); |
763 | if (!rc && shutdown && se_cmd->se_sess) { | 762 | if (!rc && shutdown && se_cmd->se_sess) { |
764 | __iscsit_free_cmd(cmd, true, shutdown); | 763 | __iscsit_free_cmd(cmd, true, shutdown); |
765 | target_put_sess_cmd(se_cmd->se_sess, se_cmd); | 764 | target_put_sess_cmd(se_cmd); |
766 | } | 765 | } |
767 | break; | 766 | break; |
768 | } | 767 | } |
@@ -809,54 +808,6 @@ void iscsit_inc_session_usage_count(struct iscsi_session *sess) | |||
809 | spin_unlock_bh(&sess->session_usage_lock); | 808 | spin_unlock_bh(&sess->session_usage_lock); |
810 | } | 809 | } |
811 | 810 | ||
812 | /* | ||
813 | * Setup conn->if_marker and conn->of_marker values based upon | ||
814 | * the initial marker-less interval. (see iSCSI v19 A.2) | ||
815 | */ | ||
816 | int iscsit_set_sync_and_steering_values(struct iscsi_conn *conn) | ||
817 | { | ||
818 | int login_ifmarker_count = 0, login_ofmarker_count = 0, next_marker = 0; | ||
819 | /* | ||
820 | * IFMarkInt and OFMarkInt are negotiated as 32-bit words. | ||
821 | */ | ||
822 | u32 IFMarkInt = (conn->conn_ops->IFMarkInt * 4); | ||
823 | u32 OFMarkInt = (conn->conn_ops->OFMarkInt * 4); | ||
824 | |||
825 | if (conn->conn_ops->OFMarker) { | ||
826 | /* | ||
827 | * Account for the first Login Command received not | ||
828 | * via iscsi_recv_msg(). | ||
829 | */ | ||
830 | conn->of_marker += ISCSI_HDR_LEN; | ||
831 | if (conn->of_marker <= OFMarkInt) { | ||
832 | conn->of_marker = (OFMarkInt - conn->of_marker); | ||
833 | } else { | ||
834 | login_ofmarker_count = (conn->of_marker / OFMarkInt); | ||
835 | next_marker = (OFMarkInt * (login_ofmarker_count + 1)) + | ||
836 | (login_ofmarker_count * MARKER_SIZE); | ||
837 | conn->of_marker = (next_marker - conn->of_marker); | ||
838 | } | ||
839 | conn->of_marker_offset = 0; | ||
840 | pr_debug("Setting OFMarker value to %u based on Initial" | ||
841 | " Markerless Interval.\n", conn->of_marker); | ||
842 | } | ||
843 | |||
844 | if (conn->conn_ops->IFMarker) { | ||
845 | if (conn->if_marker <= IFMarkInt) { | ||
846 | conn->if_marker = (IFMarkInt - conn->if_marker); | ||
847 | } else { | ||
848 | login_ifmarker_count = (conn->if_marker / IFMarkInt); | ||
849 | next_marker = (IFMarkInt * (login_ifmarker_count + 1)) + | ||
850 | (login_ifmarker_count * MARKER_SIZE); | ||
851 | conn->if_marker = (next_marker - conn->if_marker); | ||
852 | } | ||
853 | pr_debug("Setting IFMarker value to %u based on Initial" | ||
854 | " Markerless Interval.\n", conn->if_marker); | ||
855 | } | ||
856 | |||
857 | return 0; | ||
858 | } | ||
859 | |||
860 | struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *sess, u16 cid) | 811 | struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *sess, u16 cid) |
861 | { | 812 | { |
862 | struct iscsi_conn *conn; | 813 | struct iscsi_conn *conn; |
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h index 1ab754a671ff..995f1cb29d0e 100644 --- a/drivers/target/iscsi/iscsi_target_util.h +++ b/drivers/target/iscsi/iscsi_target_util.h | |||
@@ -34,7 +34,6 @@ extern void iscsit_free_cmd(struct iscsi_cmd *, bool); | |||
34 | extern int iscsit_check_session_usage_count(struct iscsi_session *); | 34 | extern int iscsit_check_session_usage_count(struct iscsi_session *); |
35 | extern void iscsit_dec_session_usage_count(struct iscsi_session *); | 35 | extern void iscsit_dec_session_usage_count(struct iscsi_session *); |
36 | extern void iscsit_inc_session_usage_count(struct iscsi_session *); | 36 | extern void iscsit_inc_session_usage_count(struct iscsi_session *); |
37 | extern int iscsit_set_sync_and_steering_values(struct iscsi_conn *); | ||
38 | extern struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *, u16); | 37 | extern struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *, u16); |
39 | extern struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *, u16); | 38 | extern struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *, u16); |
40 | extern void iscsit_check_conn_usage_count(struct iscsi_conn *); | 39 | extern void iscsit_check_conn_usage_count(struct iscsi_conn *); |
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 51f0c895c6a5..a556bdebd775 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c | |||
@@ -35,14 +35,11 @@ | |||
35 | #include <target/target_core_base.h> | 35 | #include <target/target_core_base.h> |
36 | #include <target/target_core_fabric.h> | 36 | #include <target/target_core_fabric.h> |
37 | #include <target/target_core_fabric_configfs.h> | 37 | #include <target/target_core_fabric_configfs.h> |
38 | #include <target/target_core_configfs.h> | ||
39 | 38 | ||
40 | #include "tcm_loop.h" | 39 | #include "tcm_loop.h" |
41 | 40 | ||
42 | #define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev) | 41 | #define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev) |
43 | 42 | ||
44 | static const struct target_core_fabric_ops loop_ops; | ||
45 | |||
46 | static struct workqueue_struct *tcm_loop_workqueue; | 43 | static struct workqueue_struct *tcm_loop_workqueue; |
47 | static struct kmem_cache *tcm_loop_cmd_cache; | 44 | static struct kmem_cache *tcm_loop_cmd_cache; |
48 | 45 | ||
@@ -165,6 +162,7 @@ static void tcm_loop_submission_work(struct work_struct *work) | |||
165 | transfer_length = scsi_bufflen(sc); | 162 | transfer_length = scsi_bufflen(sc); |
166 | } | 163 | } |
167 | 164 | ||
165 | se_cmd->tag = tl_cmd->sc_cmd_tag; | ||
168 | rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd, | 166 | rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd, |
169 | &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun, | 167 | &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun, |
170 | transfer_length, TCM_SIMPLE_TAG, | 168 | transfer_length, TCM_SIMPLE_TAG, |
@@ -217,7 +215,7 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) | |||
217 | * to struct scsi_device | 215 | * to struct scsi_device |
218 | */ | 216 | */ |
219 | static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg, | 217 | static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg, |
220 | int lun, int task, enum tcm_tmreq_table tmr) | 218 | u64 lun, int task, enum tcm_tmreq_table tmr) |
221 | { | 219 | { |
222 | struct se_cmd *se_cmd = NULL; | 220 | struct se_cmd *se_cmd = NULL; |
223 | struct se_session *se_sess; | 221 | struct se_session *se_sess; |
@@ -409,7 +407,7 @@ static int tcm_loop_driver_probe(struct device *dev) | |||
409 | sh->max_id = 2; | 407 | sh->max_id = 2; |
410 | sh->max_lun = 0; | 408 | sh->max_lun = 0; |
411 | sh->max_channel = 0; | 409 | sh->max_channel = 0; |
412 | sh->max_cmd_len = TL_SCSI_MAX_CMD_LEN; | 410 | sh->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE; |
413 | 411 | ||
414 | host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | | 412 | host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | |
415 | SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | | 413 | SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | |
@@ -520,147 +518,26 @@ static char *tcm_loop_get_fabric_name(void) | |||
520 | return "loopback"; | 518 | return "loopback"; |
521 | } | 519 | } |
522 | 520 | ||
523 | static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg) | 521 | static inline struct tcm_loop_tpg *tl_tpg(struct se_portal_group *se_tpg) |
524 | { | 522 | { |
525 | struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr; | 523 | return container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg); |
526 | struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; | ||
527 | /* | ||
528 | * tl_proto_id is set at tcm_loop_configfs.c:tcm_loop_make_scsi_hba() | ||
529 | * time based on the protocol dependent prefix of the passed configfs group. | ||
530 | * | ||
531 | * Based upon tl_proto_id, TCM_Loop emulates the requested fabric | ||
532 | * ProtocolID using target_core_fabric_lib.c symbols. | ||
533 | */ | ||
534 | switch (tl_hba->tl_proto_id) { | ||
535 | case SCSI_PROTOCOL_SAS: | ||
536 | return sas_get_fabric_proto_ident(se_tpg); | ||
537 | case SCSI_PROTOCOL_FCP: | ||
538 | return fc_get_fabric_proto_ident(se_tpg); | ||
539 | case SCSI_PROTOCOL_ISCSI: | ||
540 | return iscsi_get_fabric_proto_ident(se_tpg); | ||
541 | default: | ||
542 | pr_err("Unknown tl_proto_id: 0x%02x, using" | ||
543 | " SAS emulation\n", tl_hba->tl_proto_id); | ||
544 | break; | ||
545 | } | ||
546 | |||
547 | return sas_get_fabric_proto_ident(se_tpg); | ||
548 | } | 524 | } |
549 | 525 | ||
550 | static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg) | 526 | static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg) |
551 | { | 527 | { |
552 | struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr; | ||
553 | /* | 528 | /* |
554 | * Return the passed NAA identifier for the SAS Target Port | 529 | * Return the passed NAA identifier for the SAS Target Port |
555 | */ | 530 | */ |
556 | return &tl_tpg->tl_hba->tl_wwn_address[0]; | 531 | return &tl_tpg(se_tpg)->tl_hba->tl_wwn_address[0]; |
557 | } | 532 | } |
558 | 533 | ||
559 | static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg) | 534 | static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg) |
560 | { | 535 | { |
561 | struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr; | ||
562 | /* | 536 | /* |
563 | * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83 | 537 | * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83 |
564 | * to represent the SCSI Target Port. | 538 | * to represent the SCSI Target Port. |
565 | */ | 539 | */ |
566 | return tl_tpg->tl_tpgt; | 540 | return tl_tpg(se_tpg)->tl_tpgt; |
567 | } | ||
568 | |||
569 | static u32 tcm_loop_get_default_depth(struct se_portal_group *se_tpg) | ||
570 | { | ||
571 | return 1; | ||
572 | } | ||
573 | |||
574 | static u32 tcm_loop_get_pr_transport_id( | ||
575 | struct se_portal_group *se_tpg, | ||
576 | struct se_node_acl *se_nacl, | ||
577 | struct t10_pr_registration *pr_reg, | ||
578 | int *format_code, | ||
579 | unsigned char *buf) | ||
580 | { | ||
581 | struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr; | ||
582 | struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; | ||
583 | |||
584 | switch (tl_hba->tl_proto_id) { | ||
585 | case SCSI_PROTOCOL_SAS: | ||
586 | return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg, | ||
587 | format_code, buf); | ||
588 | case SCSI_PROTOCOL_FCP: | ||
589 | return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg, | ||
590 | format_code, buf); | ||
591 | case SCSI_PROTOCOL_ISCSI: | ||
592 | return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg, | ||
593 | format_code, buf); | ||
594 | default: | ||
595 | pr_err("Unknown tl_proto_id: 0x%02x, using" | ||
596 | " SAS emulation\n", tl_hba->tl_proto_id); | ||
597 | break; | ||
598 | } | ||
599 | |||
600 | return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg, | ||
601 | format_code, buf); | ||
602 | } | ||
603 | |||
604 | static u32 tcm_loop_get_pr_transport_id_len( | ||
605 | struct se_portal_group *se_tpg, | ||
606 | struct se_node_acl *se_nacl, | ||
607 | struct t10_pr_registration *pr_reg, | ||
608 | int *format_code) | ||
609 | { | ||
610 | struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr; | ||
611 | struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; | ||
612 | |||
613 | switch (tl_hba->tl_proto_id) { | ||
614 | case SCSI_PROTOCOL_SAS: | ||
615 | return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, | ||
616 | format_code); | ||
617 | case SCSI_PROTOCOL_FCP: | ||
618 | return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, | ||
619 | format_code); | ||
620 | case SCSI_PROTOCOL_ISCSI: | ||
621 | return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, | ||
622 | format_code); | ||
623 | default: | ||
624 | pr_err("Unknown tl_proto_id: 0x%02x, using" | ||
625 | " SAS emulation\n", tl_hba->tl_proto_id); | ||
626 | break; | ||
627 | } | ||
628 | |||
629 | return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, | ||
630 | format_code); | ||
631 | } | ||
632 | |||
633 | /* | ||
634 | * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above | ||
635 | * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations. | ||
636 | */ | ||
637 | static char *tcm_loop_parse_pr_out_transport_id( | ||
638 | struct se_portal_group *se_tpg, | ||
639 | const char *buf, | ||
640 | u32 *out_tid_len, | ||
641 | char **port_nexus_ptr) | ||
642 | { | ||
643 | struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr; | ||
644 | struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; | ||
645 | |||
646 | switch (tl_hba->tl_proto_id) { | ||
647 | case SCSI_PROTOCOL_SAS: | ||
648 | return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, | ||
649 | port_nexus_ptr); | ||
650 | case SCSI_PROTOCOL_FCP: | ||
651 | return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, | ||
652 | port_nexus_ptr); | ||
653 | case SCSI_PROTOCOL_ISCSI: | ||
654 | return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, | ||
655 | port_nexus_ptr); | ||
656 | default: | ||
657 | pr_err("Unknown tl_proto_id: 0x%02x, using" | ||
658 | " SAS emulation\n", tl_hba->tl_proto_id); | ||
659 | break; | ||
660 | } | ||
661 | |||
662 | return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, | ||
663 | port_nexus_ptr); | ||
664 | } | 541 | } |
665 | 542 | ||
666 | /* | 543 | /* |
@@ -703,30 +580,6 @@ static int tcm_loop_check_prot_fabric_only(struct se_portal_group *se_tpg) | |||
703 | return tl_tpg->tl_fabric_prot_type; | 580 | return tl_tpg->tl_fabric_prot_type; |
704 | } | 581 | } |
705 | 582 | ||
706 | static struct se_node_acl *tcm_loop_tpg_alloc_fabric_acl( | ||
707 | struct se_portal_group *se_tpg) | ||
708 | { | ||
709 | struct tcm_loop_nacl *tl_nacl; | ||
710 | |||
711 | tl_nacl = kzalloc(sizeof(struct tcm_loop_nacl), GFP_KERNEL); | ||
712 | if (!tl_nacl) { | ||
713 | pr_err("Unable to allocate struct tcm_loop_nacl\n"); | ||
714 | return NULL; | ||
715 | } | ||
716 | |||
717 | return &tl_nacl->se_node_acl; | ||
718 | } | ||
719 | |||
720 | static void tcm_loop_tpg_release_fabric_acl( | ||
721 | struct se_portal_group *se_tpg, | ||
722 | struct se_node_acl *se_nacl) | ||
723 | { | ||
724 | struct tcm_loop_nacl *tl_nacl = container_of(se_nacl, | ||
725 | struct tcm_loop_nacl, se_node_acl); | ||
726 | |||
727 | kfree(tl_nacl); | ||
728 | } | ||
729 | |||
730 | static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg) | 583 | static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg) |
731 | { | 584 | { |
732 | return 1; | 585 | return 1; |
@@ -742,14 +595,6 @@ static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl) | |||
742 | return; | 595 | return; |
743 | } | 596 | } |
744 | 597 | ||
745 | static u32 tcm_loop_get_task_tag(struct se_cmd *se_cmd) | ||
746 | { | ||
747 | struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, | ||
748 | struct tcm_loop_cmd, tl_se_cmd); | ||
749 | |||
750 | return tl_cmd->sc_cmd_tag; | ||
751 | } | ||
752 | |||
753 | static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd) | 598 | static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd) |
754 | { | 599 | { |
755 | struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, | 600 | struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, |
@@ -902,7 +747,7 @@ static void tcm_loop_port_unlink( | |||
902 | se_lun->unpacked_lun); | 747 | se_lun->unpacked_lun); |
903 | if (!sd) { | 748 | if (!sd) { |
904 | pr_err("Unable to locate struct scsi_device for %d:%d:" | 749 | pr_err("Unable to locate struct scsi_device for %d:%d:" |
905 | "%d\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun); | 750 | "%llu\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun); |
906 | return; | 751 | return; |
907 | } | 752 | } |
908 | /* | 753 | /* |
@@ -1234,8 +1079,7 @@ static struct se_portal_group *tcm_loop_make_naa_tpg( | |||
1234 | /* | 1079 | /* |
1235 | * Register the tl_tpg as a emulated SAS TCM Target Endpoint | 1080 | * Register the tl_tpg as a emulated SAS TCM Target Endpoint |
1236 | */ | 1081 | */ |
1237 | ret = core_tpg_register(&loop_ops, wwn, &tl_tpg->tl_se_tpg, tl_tpg, | 1082 | ret = core_tpg_register(wwn, &tl_tpg->tl_se_tpg, tl_hba->tl_proto_id); |
1238 | TRANSPORT_TPG_TYPE_NORMAL); | ||
1239 | if (ret < 0) | 1083 | if (ret < 0) |
1240 | return ERR_PTR(-ENOMEM); | 1084 | return ERR_PTR(-ENOMEM); |
1241 | 1085 | ||
@@ -1386,13 +1230,8 @@ static const struct target_core_fabric_ops loop_ops = { | |||
1386 | .module = THIS_MODULE, | 1230 | .module = THIS_MODULE, |
1387 | .name = "loopback", | 1231 | .name = "loopback", |
1388 | .get_fabric_name = tcm_loop_get_fabric_name, | 1232 | .get_fabric_name = tcm_loop_get_fabric_name, |
1389 | .get_fabric_proto_ident = tcm_loop_get_fabric_proto_ident, | ||
1390 | .tpg_get_wwn = tcm_loop_get_endpoint_wwn, | 1233 | .tpg_get_wwn = tcm_loop_get_endpoint_wwn, |
1391 | .tpg_get_tag = tcm_loop_get_tag, | 1234 | .tpg_get_tag = tcm_loop_get_tag, |
1392 | .tpg_get_default_depth = tcm_loop_get_default_depth, | ||
1393 | .tpg_get_pr_transport_id = tcm_loop_get_pr_transport_id, | ||
1394 | .tpg_get_pr_transport_id_len = tcm_loop_get_pr_transport_id_len, | ||
1395 | .tpg_parse_pr_out_transport_id = tcm_loop_parse_pr_out_transport_id, | ||
1396 | .tpg_check_demo_mode = tcm_loop_check_demo_mode, | 1235 | .tpg_check_demo_mode = tcm_loop_check_demo_mode, |
1397 | .tpg_check_demo_mode_cache = tcm_loop_check_demo_mode_cache, | 1236 | .tpg_check_demo_mode_cache = tcm_loop_check_demo_mode_cache, |
1398 | .tpg_check_demo_mode_write_protect = | 1237 | .tpg_check_demo_mode_write_protect = |
@@ -1400,8 +1239,6 @@ static const struct target_core_fabric_ops loop_ops = { | |||
1400 | .tpg_check_prod_mode_write_protect = | 1239 | .tpg_check_prod_mode_write_protect = |
1401 | tcm_loop_check_prod_mode_write_protect, | 1240 | tcm_loop_check_prod_mode_write_protect, |
1402 | .tpg_check_prot_fabric_only = tcm_loop_check_prot_fabric_only, | 1241 | .tpg_check_prot_fabric_only = tcm_loop_check_prot_fabric_only, |
1403 | .tpg_alloc_fabric_acl = tcm_loop_tpg_alloc_fabric_acl, | ||
1404 | .tpg_release_fabric_acl = tcm_loop_tpg_release_fabric_acl, | ||
1405 | .tpg_get_inst_index = tcm_loop_get_inst_index, | 1242 | .tpg_get_inst_index = tcm_loop_get_inst_index, |
1406 | .check_stop_free = tcm_loop_check_stop_free, | 1243 | .check_stop_free = tcm_loop_check_stop_free, |
1407 | .release_cmd = tcm_loop_release_cmd, | 1244 | .release_cmd = tcm_loop_release_cmd, |
@@ -1411,7 +1248,6 @@ static const struct target_core_fabric_ops loop_ops = { | |||
1411 | .write_pending = tcm_loop_write_pending, | 1248 | .write_pending = tcm_loop_write_pending, |
1412 | .write_pending_status = tcm_loop_write_pending_status, | 1249 | .write_pending_status = tcm_loop_write_pending_status, |
1413 | .set_default_node_attributes = tcm_loop_set_default_node_attributes, | 1250 | .set_default_node_attributes = tcm_loop_set_default_node_attributes, |
1414 | .get_task_tag = tcm_loop_get_task_tag, | ||
1415 | .get_cmd_state = tcm_loop_get_cmd_state, | 1251 | .get_cmd_state = tcm_loop_get_cmd_state, |
1416 | .queue_data_in = tcm_loop_queue_data_in, | 1252 | .queue_data_in = tcm_loop_queue_data_in, |
1417 | .queue_status = tcm_loop_queue_status, | 1253 | .queue_status = tcm_loop_queue_status, |
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h index 1e72ff77cac9..4346462094a1 100644 --- a/drivers/target/loopback/tcm_loop.h +++ b/drivers/target/loopback/tcm_loop.h | |||
@@ -2,11 +2,6 @@ | |||
2 | #define TL_WWN_ADDR_LEN 256 | 2 | #define TL_WWN_ADDR_LEN 256 |
3 | #define TL_TPGS_PER_HBA 32 | 3 | #define TL_TPGS_PER_HBA 32 |
4 | 4 | ||
5 | /* | ||
6 | * Used in tcm_loop_driver_probe() for struct Scsi_Host->max_cmd_len | ||
7 | */ | ||
8 | #define TL_SCSI_MAX_CMD_LEN 32 | ||
9 | |||
10 | struct tcm_loop_cmd { | 5 | struct tcm_loop_cmd { |
11 | /* State of Linux/SCSI CDB+Data descriptor */ | 6 | /* State of Linux/SCSI CDB+Data descriptor */ |
12 | u32 sc_cmd_state; | 7 | u32 sc_cmd_state; |
@@ -33,10 +28,6 @@ struct tcm_loop_nexus { | |||
33 | struct se_session *se_sess; | 28 | struct se_session *se_sess; |
34 | }; | 29 | }; |
35 | 30 | ||
36 | struct tcm_loop_nacl { | ||
37 | struct se_node_acl se_node_acl; | ||
38 | }; | ||
39 | |||
40 | #define TCM_TRANSPORT_ONLINE 0 | 31 | #define TCM_TRANSPORT_ONLINE 0 |
41 | #define TCM_TRANSPORT_OFFLINE 1 | 32 | #define TCM_TRANSPORT_OFFLINE 1 |
42 | 33 | ||
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c index ce81f17ad1ba..0edf320fb685 100644 --- a/drivers/target/sbp/sbp_target.c +++ b/drivers/target/sbp/sbp_target.c | |||
@@ -36,7 +36,6 @@ | |||
36 | #include <target/target_core_backend.h> | 36 | #include <target/target_core_backend.h> |
37 | #include <target/target_core_fabric.h> | 37 | #include <target/target_core_fabric.h> |
38 | #include <target/target_core_fabric_configfs.h> | 38 | #include <target/target_core_fabric_configfs.h> |
39 | #include <target/target_core_configfs.h> | ||
40 | #include <target/configfs_macros.h> | 39 | #include <target/configfs_macros.h> |
41 | #include <asm/unaligned.h> | 40 | #include <asm/unaligned.h> |
42 | 41 | ||
@@ -109,13 +108,13 @@ static struct sbp_session *sbp_session_find_by_guid( | |||
109 | } | 108 | } |
110 | 109 | ||
111 | static struct sbp_login_descriptor *sbp_login_find_by_lun( | 110 | static struct sbp_login_descriptor *sbp_login_find_by_lun( |
112 | struct sbp_session *session, struct se_lun *lun) | 111 | struct sbp_session *session, u32 unpacked_lun) |
113 | { | 112 | { |
114 | struct sbp_login_descriptor *login, *found = NULL; | 113 | struct sbp_login_descriptor *login, *found = NULL; |
115 | 114 | ||
116 | spin_lock_bh(&session->lock); | 115 | spin_lock_bh(&session->lock); |
117 | list_for_each_entry(login, &session->login_list, link) { | 116 | list_for_each_entry(login, &session->login_list, link) { |
118 | if (login->lun == lun) | 117 | if (login->login_lun == unpacked_lun) |
119 | found = login; | 118 | found = login; |
120 | } | 119 | } |
121 | spin_unlock_bh(&session->lock); | 120 | spin_unlock_bh(&session->lock); |
@@ -125,7 +124,7 @@ static struct sbp_login_descriptor *sbp_login_find_by_lun( | |||
125 | 124 | ||
126 | static int sbp_login_count_all_by_lun( | 125 | static int sbp_login_count_all_by_lun( |
127 | struct sbp_tpg *tpg, | 126 | struct sbp_tpg *tpg, |
128 | struct se_lun *lun, | 127 | u32 unpacked_lun, |
129 | int exclusive) | 128 | int exclusive) |
130 | { | 129 | { |
131 | struct se_session *se_sess; | 130 | struct se_session *se_sess; |
@@ -139,7 +138,7 @@ static int sbp_login_count_all_by_lun( | |||
139 | 138 | ||
140 | spin_lock_bh(&sess->lock); | 139 | spin_lock_bh(&sess->lock); |
141 | list_for_each_entry(login, &sess->login_list, link) { | 140 | list_for_each_entry(login, &sess->login_list, link) { |
142 | if (login->lun != lun) | 141 | if (login->login_lun != unpacked_lun) |
143 | continue; | 142 | continue; |
144 | 143 | ||
145 | if (!exclusive || login->exclusive) | 144 | if (!exclusive || login->exclusive) |
@@ -175,23 +174,23 @@ static struct sbp_login_descriptor *sbp_login_find_by_id( | |||
175 | return found; | 174 | return found; |
176 | } | 175 | } |
177 | 176 | ||
178 | static struct se_lun *sbp_get_lun_from_tpg(struct sbp_tpg *tpg, int lun) | 177 | static u32 sbp_get_lun_from_tpg(struct sbp_tpg *tpg, u32 login_lun, int *err) |
179 | { | 178 | { |
180 | struct se_portal_group *se_tpg = &tpg->se_tpg; | 179 | struct se_portal_group *se_tpg = &tpg->se_tpg; |
181 | struct se_lun *se_lun; | 180 | struct se_lun *se_lun; |
182 | 181 | ||
183 | if (lun >= TRANSPORT_MAX_LUNS_PER_TPG) | 182 | rcu_read_lock(); |
184 | return ERR_PTR(-EINVAL); | 183 | hlist_for_each_entry_rcu(se_lun, &se_tpg->tpg_lun_hlist, link) { |
185 | 184 | if (se_lun->unpacked_lun == login_lun) { | |
186 | spin_lock(&se_tpg->tpg_lun_lock); | 185 | rcu_read_unlock(); |
187 | se_lun = se_tpg->tpg_lun_list[lun]; | 186 | *err = 0; |
188 | 187 | return login_lun; | |
189 | if (se_lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) | 188 | } |
190 | se_lun = ERR_PTR(-ENODEV); | 189 | } |
191 | 190 | rcu_read_unlock(); | |
192 | spin_unlock(&se_tpg->tpg_lun_lock); | ||
193 | 191 | ||
194 | return se_lun; | 192 | *err = -ENODEV; |
193 | return login_lun; | ||
195 | } | 194 | } |
196 | 195 | ||
197 | static struct sbp_session *sbp_session_create( | 196 | static struct sbp_session *sbp_session_create( |
@@ -295,17 +294,16 @@ static void sbp_management_request_login( | |||
295 | { | 294 | { |
296 | struct sbp_tport *tport = agent->tport; | 295 | struct sbp_tport *tport = agent->tport; |
297 | struct sbp_tpg *tpg = tport->tpg; | 296 | struct sbp_tpg *tpg = tport->tpg; |
298 | struct se_lun *se_lun; | ||
299 | int ret; | ||
300 | u64 guid; | ||
301 | struct sbp_session *sess; | 297 | struct sbp_session *sess; |
302 | struct sbp_login_descriptor *login; | 298 | struct sbp_login_descriptor *login; |
303 | struct sbp_login_response_block *response; | 299 | struct sbp_login_response_block *response; |
304 | int login_response_len; | 300 | u64 guid; |
301 | u32 unpacked_lun; | ||
302 | int login_response_len, ret; | ||
305 | 303 | ||
306 | se_lun = sbp_get_lun_from_tpg(tpg, | 304 | unpacked_lun = sbp_get_lun_from_tpg(tpg, |
307 | LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc))); | 305 | LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)), &ret); |
308 | if (IS_ERR(se_lun)) { | 306 | if (ret) { |
309 | pr_notice("login to unknown LUN: %d\n", | 307 | pr_notice("login to unknown LUN: %d\n", |
310 | LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc))); | 308 | LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc))); |
311 | 309 | ||
@@ -326,11 +324,11 @@ static void sbp_management_request_login( | |||
326 | } | 324 | } |
327 | 325 | ||
328 | pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n", | 326 | pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n", |
329 | se_lun->unpacked_lun, guid); | 327 | unpacked_lun, guid); |
330 | 328 | ||
331 | sess = sbp_session_find_by_guid(tpg, guid); | 329 | sess = sbp_session_find_by_guid(tpg, guid); |
332 | if (sess) { | 330 | if (sess) { |
333 | login = sbp_login_find_by_lun(sess, se_lun); | 331 | login = sbp_login_find_by_lun(sess, unpacked_lun); |
334 | if (login) { | 332 | if (login) { |
335 | pr_notice("initiator already logged-in\n"); | 333 | pr_notice("initiator already logged-in\n"); |
336 | 334 | ||
@@ -358,7 +356,7 @@ static void sbp_management_request_login( | |||
358 | * reject with access_denied if any logins present | 356 | * reject with access_denied if any logins present |
359 | */ | 357 | */ |
360 | if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)) && | 358 | if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)) && |
361 | sbp_login_count_all_by_lun(tpg, se_lun, 0)) { | 359 | sbp_login_count_all_by_lun(tpg, unpacked_lun, 0)) { |
362 | pr_warn("refusing exclusive login with other active logins\n"); | 360 | pr_warn("refusing exclusive login with other active logins\n"); |
363 | 361 | ||
364 | req->status.status = cpu_to_be32( | 362 | req->status.status = cpu_to_be32( |
@@ -371,7 +369,7 @@ static void sbp_management_request_login( | |||
371 | * check exclusive bit in any existing login descriptor | 369 | * check exclusive bit in any existing login descriptor |
372 | * reject with access_denied if any exclusive logins present | 370 | * reject with access_denied if any exclusive logins present |
373 | */ | 371 | */ |
374 | if (sbp_login_count_all_by_lun(tpg, se_lun, 1)) { | 372 | if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 1)) { |
375 | pr_warn("refusing login while another exclusive login present\n"); | 373 | pr_warn("refusing login while another exclusive login present\n"); |
376 | 374 | ||
377 | req->status.status = cpu_to_be32( | 375 | req->status.status = cpu_to_be32( |
@@ -384,7 +382,7 @@ static void sbp_management_request_login( | |||
384 | * check we haven't exceeded the number of allowed logins | 382 | * check we haven't exceeded the number of allowed logins |
385 | * reject with resources_unavailable if we have | 383 | * reject with resources_unavailable if we have |
386 | */ | 384 | */ |
387 | if (sbp_login_count_all_by_lun(tpg, se_lun, 0) >= | 385 | if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 0) >= |
388 | tport->max_logins_per_lun) { | 386 | tport->max_logins_per_lun) { |
389 | pr_warn("max number of logins reached\n"); | 387 | pr_warn("max number of logins reached\n"); |
390 | 388 | ||
@@ -440,7 +438,7 @@ static void sbp_management_request_login( | |||
440 | } | 438 | } |
441 | 439 | ||
442 | login->sess = sess; | 440 | login->sess = sess; |
443 | login->lun = se_lun; | 441 | login->login_lun = unpacked_lun; |
444 | login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo); | 442 | login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo); |
445 | login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)); | 443 | login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)); |
446 | login->login_id = atomic_inc_return(&login_id); | 444 | login->login_id = atomic_inc_return(&login_id); |
@@ -602,7 +600,7 @@ static void sbp_management_request_logout( | |||
602 | } | 600 | } |
603 | 601 | ||
604 | pr_info("mgt_agent LOGOUT from LUN %d session %d\n", | 602 | pr_info("mgt_agent LOGOUT from LUN %d session %d\n", |
605 | login->lun->unpacked_lun, login->login_id); | 603 | login->login_lun, login->login_id); |
606 | 604 | ||
607 | if (req->node_addr != login->sess->node_id) { | 605 | if (req->node_addr != login->sess->node_id) { |
608 | pr_warn("logout from different node ID\n"); | 606 | pr_warn("logout from different node ID\n"); |
@@ -1228,12 +1226,14 @@ static void sbp_handle_command(struct sbp_target_request *req) | |||
1228 | goto err; | 1226 | goto err; |
1229 | } | 1227 | } |
1230 | 1228 | ||
1231 | unpacked_lun = req->login->lun->unpacked_lun; | 1229 | unpacked_lun = req->login->login_lun; |
1232 | sbp_calc_data_length_direction(req, &data_length, &data_dir); | 1230 | sbp_calc_data_length_direction(req, &data_length, &data_dir); |
1233 | 1231 | ||
1234 | pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n", | 1232 | pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n", |
1235 | req->orb_pointer, unpacked_lun, data_length, data_dir); | 1233 | req->orb_pointer, unpacked_lun, data_length, data_dir); |
1236 | 1234 | ||
1235 | /* only used for printk until we do TMRs */ | ||
1236 | req->se_cmd.tag = req->orb_pointer; | ||
1237 | if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf, | 1237 | if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf, |
1238 | req->sense_buf, unpacked_lun, data_length, | 1238 | req->sense_buf, unpacked_lun, data_length, |
1239 | TCM_SIMPLE_TAG, data_dir, 0)) | 1239 | TCM_SIMPLE_TAG, data_dir, 0)) |
@@ -1707,33 +1707,6 @@ static u16 sbp_get_tag(struct se_portal_group *se_tpg) | |||
1707 | return tpg->tport_tpgt; | 1707 | return tpg->tport_tpgt; |
1708 | } | 1708 | } |
1709 | 1709 | ||
1710 | static u32 sbp_get_default_depth(struct se_portal_group *se_tpg) | ||
1711 | { | ||
1712 | return 1; | ||
1713 | } | ||
1714 | |||
1715 | static struct se_node_acl *sbp_alloc_fabric_acl(struct se_portal_group *se_tpg) | ||
1716 | { | ||
1717 | struct sbp_nacl *nacl; | ||
1718 | |||
1719 | nacl = kzalloc(sizeof(struct sbp_nacl), GFP_KERNEL); | ||
1720 | if (!nacl) { | ||
1721 | pr_err("Unable to allocate struct sbp_nacl\n"); | ||
1722 | return NULL; | ||
1723 | } | ||
1724 | |||
1725 | return &nacl->se_node_acl; | ||
1726 | } | ||
1727 | |||
1728 | static void sbp_release_fabric_acl( | ||
1729 | struct se_portal_group *se_tpg, | ||
1730 | struct se_node_acl *se_nacl) | ||
1731 | { | ||
1732 | struct sbp_nacl *nacl = | ||
1733 | container_of(se_nacl, struct sbp_nacl, se_node_acl); | ||
1734 | kfree(nacl); | ||
1735 | } | ||
1736 | |||
1737 | static u32 sbp_tpg_get_inst_index(struct se_portal_group *se_tpg) | 1710 | static u32 sbp_tpg_get_inst_index(struct se_portal_group *se_tpg) |
1738 | { | 1711 | { |
1739 | return 1; | 1712 | return 1; |
@@ -1795,15 +1768,6 @@ static void sbp_set_default_node_attrs(struct se_node_acl *nacl) | |||
1795 | return; | 1768 | return; |
1796 | } | 1769 | } |
1797 | 1770 | ||
1798 | static u32 sbp_get_task_tag(struct se_cmd *se_cmd) | ||
1799 | { | ||
1800 | struct sbp_target_request *req = container_of(se_cmd, | ||
1801 | struct sbp_target_request, se_cmd); | ||
1802 | |||
1803 | /* only used for printk until we do TMRs */ | ||
1804 | return (u32)req->orb_pointer; | ||
1805 | } | ||
1806 | |||
1807 | static int sbp_get_cmd_state(struct se_cmd *se_cmd) | 1771 | static int sbp_get_cmd_state(struct se_cmd *se_cmd) |
1808 | { | 1772 | { |
1809 | return 0; | 1773 | return 0; |
@@ -1859,106 +1823,23 @@ static int sbp_check_stop_free(struct se_cmd *se_cmd) | |||
1859 | return 1; | 1823 | return 1; |
1860 | } | 1824 | } |
1861 | 1825 | ||
1862 | /* | ||
1863 | * Handlers for Serial Bus Protocol 2/3 (SBP-2 / SBP-3) | ||
1864 | */ | ||
1865 | static u8 sbp_get_fabric_proto_ident(struct se_portal_group *se_tpg) | ||
1866 | { | ||
1867 | /* | ||
1868 | * Return a IEEE 1394 SCSI Protocol identifier for loopback operations | ||
1869 | * This is defined in section 7.5.1 Table 362 in spc4r17 | ||
1870 | */ | ||
1871 | return SCSI_PROTOCOL_SBP; | ||
1872 | } | ||
1873 | |||
1874 | static u32 sbp_get_pr_transport_id( | ||
1875 | struct se_portal_group *se_tpg, | ||
1876 | struct se_node_acl *se_nacl, | ||
1877 | struct t10_pr_registration *pr_reg, | ||
1878 | int *format_code, | ||
1879 | unsigned char *buf) | ||
1880 | { | ||
1881 | int ret; | ||
1882 | |||
1883 | /* | ||
1884 | * Set PROTOCOL IDENTIFIER to 3h for SBP | ||
1885 | */ | ||
1886 | buf[0] = SCSI_PROTOCOL_SBP; | ||
1887 | /* | ||
1888 | * From spc4r17, 7.5.4.4 TransportID for initiator ports using SCSI | ||
1889 | * over IEEE 1394 | ||
1890 | */ | ||
1891 | ret = hex2bin(&buf[8], se_nacl->initiatorname, 8); | ||
1892 | if (ret < 0) | ||
1893 | pr_debug("sbp transport_id: invalid hex string\n"); | ||
1894 | |||
1895 | /* | ||
1896 | * The IEEE 1394 Transport ID is a hardcoded 24-byte length | ||
1897 | */ | ||
1898 | return 24; | ||
1899 | } | ||
1900 | |||
1901 | static u32 sbp_get_pr_transport_id_len( | ||
1902 | struct se_portal_group *se_tpg, | ||
1903 | struct se_node_acl *se_nacl, | ||
1904 | struct t10_pr_registration *pr_reg, | ||
1905 | int *format_code) | ||
1906 | { | ||
1907 | *format_code = 0; | ||
1908 | /* | ||
1909 | * From spc4r17, 7.5.4.4 TransportID for initiator ports using SCSI | ||
1910 | * over IEEE 1394 | ||
1911 | * | ||
1912 | * The SBP Transport ID is a hardcoded 24-byte length | ||
1913 | */ | ||
1914 | return 24; | ||
1915 | } | ||
1916 | |||
1917 | /* | ||
1918 | * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above | ||
1919 | * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations. | ||
1920 | */ | ||
1921 | static char *sbp_parse_pr_out_transport_id( | ||
1922 | struct se_portal_group *se_tpg, | ||
1923 | const char *buf, | ||
1924 | u32 *out_tid_len, | ||
1925 | char **port_nexus_ptr) | ||
1926 | { | ||
1927 | /* | ||
1928 | * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.4 TransportID | ||
1929 | * for initiator ports using SCSI over SBP Serial SCSI Protocol | ||
1930 | * | ||
1931 | * The TransportID for a IEEE 1394 Initiator Port is of fixed size of | ||
1932 | * 24 bytes, and IEEE 1394 does not contain a I_T nexus identifier, | ||
1933 | * so we return the **port_nexus_ptr set to NULL. | ||
1934 | */ | ||
1935 | *port_nexus_ptr = NULL; | ||
1936 | *out_tid_len = 24; | ||
1937 | |||
1938 | return (char *)&buf[8]; | ||
1939 | } | ||
1940 | |||
1941 | static int sbp_count_se_tpg_luns(struct se_portal_group *tpg) | 1826 | static int sbp_count_se_tpg_luns(struct se_portal_group *tpg) |
1942 | { | 1827 | { |
1943 | int i, count = 0; | 1828 | struct se_lun *lun; |
1944 | 1829 | int count = 0; | |
1945 | spin_lock(&tpg->tpg_lun_lock); | ||
1946 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | ||
1947 | struct se_lun *se_lun = tpg->tpg_lun_list[i]; | ||
1948 | |||
1949 | if (se_lun->lun_status == TRANSPORT_LUN_STATUS_FREE) | ||
1950 | continue; | ||
1951 | 1830 | ||
1831 | rcu_read_lock(); | ||
1832 | hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) | ||
1952 | count++; | 1833 | count++; |
1953 | } | 1834 | rcu_read_unlock(); |
1954 | spin_unlock(&tpg->tpg_lun_lock); | ||
1955 | 1835 | ||
1956 | return count; | 1836 | return count; |
1957 | } | 1837 | } |
1958 | 1838 | ||
1959 | static int sbp_update_unit_directory(struct sbp_tport *tport) | 1839 | static int sbp_update_unit_directory(struct sbp_tport *tport) |
1960 | { | 1840 | { |
1961 | int num_luns, num_entries, idx = 0, mgt_agt_addr, ret, i; | 1841 | struct se_lun *lun; |
1842 | int num_luns, num_entries, idx = 0, mgt_agt_addr, ret; | ||
1962 | u32 *data; | 1843 | u32 *data; |
1963 | 1844 | ||
1964 | if (tport->unit_directory.data) { | 1845 | if (tport->unit_directory.data) { |
@@ -2020,28 +1901,23 @@ static int sbp_update_unit_directory(struct sbp_tport *tport) | |||
2020 | /* unit unique ID (leaf is just after LUNs) */ | 1901 | /* unit unique ID (leaf is just after LUNs) */ |
2021 | data[idx++] = 0x8d000000 | (num_luns + 1); | 1902 | data[idx++] = 0x8d000000 | (num_luns + 1); |
2022 | 1903 | ||
2023 | spin_lock(&tport->tpg->se_tpg.tpg_lun_lock); | 1904 | rcu_read_lock(); |
2024 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | 1905 | hlist_for_each_entry_rcu(lun, &tport->tpg->se_tpg.tpg_lun_hlist, link) { |
2025 | struct se_lun *se_lun = tport->tpg->se_tpg.tpg_lun_list[i]; | ||
2026 | struct se_device *dev; | 1906 | struct se_device *dev; |
2027 | int type; | 1907 | int type; |
2028 | 1908 | /* | |
2029 | if (se_lun->lun_status == TRANSPORT_LUN_STATUS_FREE) | 1909 | * rcu_dereference_raw protected by se_lun->lun_group symlink |
2030 | continue; | 1910 | * reference to se_device->dev_group. |
2031 | 1911 | */ | |
2032 | spin_unlock(&tport->tpg->se_tpg.tpg_lun_lock); | 1912 | dev = rcu_dereference_raw(lun->lun_se_dev); |
2033 | |||
2034 | dev = se_lun->lun_se_dev; | ||
2035 | type = dev->transport->get_device_type(dev); | 1913 | type = dev->transport->get_device_type(dev); |
2036 | 1914 | ||
2037 | /* logical_unit_number */ | 1915 | /* logical_unit_number */ |
2038 | data[idx++] = 0x14000000 | | 1916 | data[idx++] = 0x14000000 | |
2039 | ((type << 16) & 0x1f0000) | | 1917 | ((type << 16) & 0x1f0000) | |
2040 | (se_lun->unpacked_lun & 0xffff); | 1918 | (lun->unpacked_lun & 0xffff); |
2041 | |||
2042 | spin_lock(&tport->tpg->se_tpg.tpg_lun_lock); | ||
2043 | } | 1919 | } |
2044 | spin_unlock(&tport->tpg->se_tpg.tpg_lun_lock); | 1920 | rcu_read_unlock(); |
2045 | 1921 | ||
2046 | /* unit unique ID leaf */ | 1922 | /* unit unique ID leaf */ |
2047 | data[idx++] = 2 << 16; | 1923 | data[idx++] = 2 << 16; |
@@ -2100,48 +1976,13 @@ static ssize_t sbp_format_wwn(char *buf, size_t len, u64 wwn) | |||
2100 | return snprintf(buf, len, "%016llx", wwn); | 1976 | return snprintf(buf, len, "%016llx", wwn); |
2101 | } | 1977 | } |
2102 | 1978 | ||
2103 | static struct se_node_acl *sbp_make_nodeacl( | 1979 | static int sbp_init_nodeacl(struct se_node_acl *se_nacl, const char *name) |
2104 | struct se_portal_group *se_tpg, | ||
2105 | struct config_group *group, | ||
2106 | const char *name) | ||
2107 | { | 1980 | { |
2108 | struct se_node_acl *se_nacl, *se_nacl_new; | ||
2109 | struct sbp_nacl *nacl; | ||
2110 | u64 guid = 0; | 1981 | u64 guid = 0; |
2111 | u32 nexus_depth = 1; | ||
2112 | 1982 | ||
2113 | if (sbp_parse_wwn(name, &guid) < 0) | 1983 | if (sbp_parse_wwn(name, &guid) < 0) |
2114 | return ERR_PTR(-EINVAL); | 1984 | return -EINVAL; |
2115 | 1985 | return 0; | |
2116 | se_nacl_new = sbp_alloc_fabric_acl(se_tpg); | ||
2117 | if (!se_nacl_new) | ||
2118 | return ERR_PTR(-ENOMEM); | ||
2119 | |||
2120 | /* | ||
2121 | * se_nacl_new may be released by core_tpg_add_initiator_node_acl() | ||
2122 | * when converting a NodeACL from demo mode -> explict | ||
2123 | */ | ||
2124 | se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new, | ||
2125 | name, nexus_depth); | ||
2126 | if (IS_ERR(se_nacl)) { | ||
2127 | sbp_release_fabric_acl(se_tpg, se_nacl_new); | ||
2128 | return se_nacl; | ||
2129 | } | ||
2130 | |||
2131 | nacl = container_of(se_nacl, struct sbp_nacl, se_node_acl); | ||
2132 | nacl->guid = guid; | ||
2133 | sbp_format_wwn(nacl->iport_name, SBP_NAMELEN, guid); | ||
2134 | |||
2135 | return se_nacl; | ||
2136 | } | ||
2137 | |||
2138 | static void sbp_drop_nodeacl(struct se_node_acl *se_acl) | ||
2139 | { | ||
2140 | struct sbp_nacl *nacl = | ||
2141 | container_of(se_acl, struct sbp_nacl, se_node_acl); | ||
2142 | |||
2143 | core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1); | ||
2144 | kfree(nacl); | ||
2145 | } | 1986 | } |
2146 | 1987 | ||
2147 | static int sbp_post_link_lun( | 1988 | static int sbp_post_link_lun( |
@@ -2214,8 +2055,7 @@ static struct se_portal_group *sbp_make_tpg( | |||
2214 | goto out_free_tpg; | 2055 | goto out_free_tpg; |
2215 | } | 2056 | } |
2216 | 2057 | ||
2217 | ret = core_tpg_register(&sbp_ops, wwn, &tpg->se_tpg, tpg, | 2058 | ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SBP); |
2218 | TRANSPORT_TPG_TYPE_NORMAL); | ||
2219 | if (ret < 0) | 2059 | if (ret < 0) |
2220 | goto out_unreg_mgt_agt; | 2060 | goto out_unreg_mgt_agt; |
2221 | 2061 | ||
@@ -2505,19 +2345,12 @@ static const struct target_core_fabric_ops sbp_ops = { | |||
2505 | .module = THIS_MODULE, | 2345 | .module = THIS_MODULE, |
2506 | .name = "sbp", | 2346 | .name = "sbp", |
2507 | .get_fabric_name = sbp_get_fabric_name, | 2347 | .get_fabric_name = sbp_get_fabric_name, |
2508 | .get_fabric_proto_ident = sbp_get_fabric_proto_ident, | ||
2509 | .tpg_get_wwn = sbp_get_fabric_wwn, | 2348 | .tpg_get_wwn = sbp_get_fabric_wwn, |
2510 | .tpg_get_tag = sbp_get_tag, | 2349 | .tpg_get_tag = sbp_get_tag, |
2511 | .tpg_get_default_depth = sbp_get_default_depth, | ||
2512 | .tpg_get_pr_transport_id = sbp_get_pr_transport_id, | ||
2513 | .tpg_get_pr_transport_id_len = sbp_get_pr_transport_id_len, | ||
2514 | .tpg_parse_pr_out_transport_id = sbp_parse_pr_out_transport_id, | ||
2515 | .tpg_check_demo_mode = sbp_check_true, | 2350 | .tpg_check_demo_mode = sbp_check_true, |
2516 | .tpg_check_demo_mode_cache = sbp_check_true, | 2351 | .tpg_check_demo_mode_cache = sbp_check_true, |
2517 | .tpg_check_demo_mode_write_protect = sbp_check_false, | 2352 | .tpg_check_demo_mode_write_protect = sbp_check_false, |
2518 | .tpg_check_prod_mode_write_protect = sbp_check_false, | 2353 | .tpg_check_prod_mode_write_protect = sbp_check_false, |
2519 | .tpg_alloc_fabric_acl = sbp_alloc_fabric_acl, | ||
2520 | .tpg_release_fabric_acl = sbp_release_fabric_acl, | ||
2521 | .tpg_get_inst_index = sbp_tpg_get_inst_index, | 2354 | .tpg_get_inst_index = sbp_tpg_get_inst_index, |
2522 | .release_cmd = sbp_release_cmd, | 2355 | .release_cmd = sbp_release_cmd, |
2523 | .shutdown_session = sbp_shutdown_session, | 2356 | .shutdown_session = sbp_shutdown_session, |
@@ -2526,7 +2359,6 @@ static const struct target_core_fabric_ops sbp_ops = { | |||
2526 | .write_pending = sbp_write_pending, | 2359 | .write_pending = sbp_write_pending, |
2527 | .write_pending_status = sbp_write_pending_status, | 2360 | .write_pending_status = sbp_write_pending_status, |
2528 | .set_default_node_attributes = sbp_set_default_node_attrs, | 2361 | .set_default_node_attributes = sbp_set_default_node_attrs, |
2529 | .get_task_tag = sbp_get_task_tag, | ||
2530 | .get_cmd_state = sbp_get_cmd_state, | 2362 | .get_cmd_state = sbp_get_cmd_state, |
2531 | .queue_data_in = sbp_queue_data_in, | 2363 | .queue_data_in = sbp_queue_data_in, |
2532 | .queue_status = sbp_queue_status, | 2364 | .queue_status = sbp_queue_status, |
@@ -2542,8 +2374,7 @@ static const struct target_core_fabric_ops sbp_ops = { | |||
2542 | .fabric_pre_unlink = sbp_pre_unlink_lun, | 2374 | .fabric_pre_unlink = sbp_pre_unlink_lun, |
2543 | .fabric_make_np = NULL, | 2375 | .fabric_make_np = NULL, |
2544 | .fabric_drop_np = NULL, | 2376 | .fabric_drop_np = NULL, |
2545 | .fabric_make_nodeacl = sbp_make_nodeacl, | 2377 | .fabric_init_nodeacl = sbp_init_nodeacl, |
2546 | .fabric_drop_nodeacl = sbp_drop_nodeacl, | ||
2547 | 2378 | ||
2548 | .tfc_wwn_attrs = sbp_wwn_attrs, | 2379 | .tfc_wwn_attrs = sbp_wwn_attrs, |
2549 | .tfc_tpg_base_attrs = sbp_tpg_base_attrs, | 2380 | .tfc_tpg_base_attrs = sbp_tpg_base_attrs, |
diff --git a/drivers/target/sbp/sbp_target.h b/drivers/target/sbp/sbp_target.h index 6d0d74a2c545..73bcb1208832 100644 --- a/drivers/target/sbp/sbp_target.h +++ b/drivers/target/sbp/sbp_target.h | |||
@@ -125,7 +125,7 @@ struct sbp_login_descriptor { | |||
125 | struct sbp_session *sess; | 125 | struct sbp_session *sess; |
126 | struct list_head link; | 126 | struct list_head link; |
127 | 127 | ||
128 | struct se_lun *lun; | 128 | u32 login_lun; |
129 | 129 | ||
130 | u64 status_fifo_addr; | 130 | u64 status_fifo_addr; |
131 | int exclusive; | 131 | int exclusive; |
@@ -151,15 +151,6 @@ struct sbp_session { | |||
151 | u64 reconnect_expires; | 151 | u64 reconnect_expires; |
152 | }; | 152 | }; |
153 | 153 | ||
154 | struct sbp_nacl { | ||
155 | /* Initiator EUI-64 */ | ||
156 | u64 guid; | ||
157 | /* ASCII formatted GUID for SBP Initiator port */ | ||
158 | char iport_name[SBP_NAMELEN]; | ||
159 | /* Returned by sbp_make_nodeacl() */ | ||
160 | struct se_node_acl se_node_acl; | ||
161 | }; | ||
162 | |||
163 | struct sbp_tpg { | 154 | struct sbp_tpg { |
164 | /* Target portal group tag for TCM */ | 155 | /* Target portal group tag for TCM */ |
165 | u16 tport_tpgt; | 156 | u16 tport_tpgt; |
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 8ca373774276..49aba4a31747 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c | |||
@@ -34,7 +34,6 @@ | |||
34 | #include <target/target_core_base.h> | 34 | #include <target/target_core_base.h> |
35 | #include <target/target_core_backend.h> | 35 | #include <target/target_core_backend.h> |
36 | #include <target/target_core_fabric.h> | 36 | #include <target/target_core_fabric.h> |
37 | #include <target/target_core_configfs.h> | ||
38 | 37 | ||
39 | #include "target_core_internal.h" | 38 | #include "target_core_internal.h" |
40 | #include "target_core_alua.h" | 39 | #include "target_core_alua.h" |
@@ -43,11 +42,13 @@ | |||
43 | static sense_reason_t core_alua_check_transition(int state, int valid, | 42 | static sense_reason_t core_alua_check_transition(int state, int valid, |
44 | int *primary); | 43 | int *primary); |
45 | static int core_alua_set_tg_pt_secondary_state( | 44 | static int core_alua_set_tg_pt_secondary_state( |
46 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, | 45 | struct se_lun *lun, int explicit, int offline); |
47 | struct se_port *port, int explicit, int offline); | ||
48 | 46 | ||
49 | static char *core_alua_dump_state(int state); | 47 | static char *core_alua_dump_state(int state); |
50 | 48 | ||
49 | static void __target_attach_tg_pt_gp(struct se_lun *lun, | ||
50 | struct t10_alua_tg_pt_gp *tg_pt_gp); | ||
51 | |||
51 | static u16 alua_lu_gps_counter; | 52 | static u16 alua_lu_gps_counter; |
52 | static u32 alua_lu_gps_count; | 53 | static u32 alua_lu_gps_count; |
53 | 54 | ||
@@ -145,9 +146,8 @@ sense_reason_t | |||
145 | target_emulate_report_target_port_groups(struct se_cmd *cmd) | 146 | target_emulate_report_target_port_groups(struct se_cmd *cmd) |
146 | { | 147 | { |
147 | struct se_device *dev = cmd->se_dev; | 148 | struct se_device *dev = cmd->se_dev; |
148 | struct se_port *port; | ||
149 | struct t10_alua_tg_pt_gp *tg_pt_gp; | 149 | struct t10_alua_tg_pt_gp *tg_pt_gp; |
150 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; | 150 | struct se_lun *lun; |
151 | unsigned char *buf; | 151 | unsigned char *buf; |
152 | u32 rd_len = 0, off; | 152 | u32 rd_len = 0, off; |
153 | int ext_hdr = (cmd->t_task_cdb[1] & 0x20); | 153 | int ext_hdr = (cmd->t_task_cdb[1] & 0x20); |
@@ -222,9 +222,8 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd) | |||
222 | rd_len += 8; | 222 | rd_len += 8; |
223 | 223 | ||
224 | spin_lock(&tg_pt_gp->tg_pt_gp_lock); | 224 | spin_lock(&tg_pt_gp->tg_pt_gp_lock); |
225 | list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list, | 225 | list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list, |
226 | tg_pt_gp_mem_list) { | 226 | lun_tg_pt_gp_link) { |
227 | port = tg_pt_gp_mem->tg_pt; | ||
228 | /* | 227 | /* |
229 | * Start Target Port descriptor format | 228 | * Start Target Port descriptor format |
230 | * | 229 | * |
@@ -234,8 +233,8 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd) | |||
234 | /* | 233 | /* |
235 | * Set RELATIVE TARGET PORT IDENTIFIER | 234 | * Set RELATIVE TARGET PORT IDENTIFIER |
236 | */ | 235 | */ |
237 | buf[off++] = ((port->sep_rtpi >> 8) & 0xff); | 236 | buf[off++] = ((lun->lun_rtpi >> 8) & 0xff); |
238 | buf[off++] = (port->sep_rtpi & 0xff); | 237 | buf[off++] = (lun->lun_rtpi & 0xff); |
239 | rd_len += 4; | 238 | rd_len += 4; |
240 | } | 239 | } |
241 | spin_unlock(&tg_pt_gp->tg_pt_gp_lock); | 240 | spin_unlock(&tg_pt_gp->tg_pt_gp_lock); |
@@ -259,15 +258,11 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd) | |||
259 | * this CDB was received upon to determine this value individually | 258 | * this CDB was received upon to determine this value individually |
260 | * for ALUA target port group. | 259 | * for ALUA target port group. |
261 | */ | 260 | */ |
262 | port = cmd->se_lun->lun_sep; | 261 | spin_lock(&cmd->se_lun->lun_tg_pt_gp_lock); |
263 | tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; | 262 | tg_pt_gp = cmd->se_lun->lun_tg_pt_gp; |
264 | if (tg_pt_gp_mem) { | 263 | if (tg_pt_gp) |
265 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | 264 | buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs; |
266 | tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; | 265 | spin_unlock(&cmd->se_lun->lun_tg_pt_gp_lock); |
267 | if (tg_pt_gp) | ||
268 | buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs; | ||
269 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | ||
270 | } | ||
271 | } | 266 | } |
272 | transport_kunmap_data_sg(cmd); | 267 | transport_kunmap_data_sg(cmd); |
273 | 268 | ||
@@ -284,10 +279,9 @@ sense_reason_t | |||
284 | target_emulate_set_target_port_groups(struct se_cmd *cmd) | 279 | target_emulate_set_target_port_groups(struct se_cmd *cmd) |
285 | { | 280 | { |
286 | struct se_device *dev = cmd->se_dev; | 281 | struct se_device *dev = cmd->se_dev; |
287 | struct se_port *port, *l_port = cmd->se_lun->lun_sep; | 282 | struct se_lun *l_lun = cmd->se_lun; |
288 | struct se_node_acl *nacl = cmd->se_sess->se_node_acl; | 283 | struct se_node_acl *nacl = cmd->se_sess->se_node_acl; |
289 | struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp; | 284 | struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp; |
290 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem; | ||
291 | unsigned char *buf; | 285 | unsigned char *buf; |
292 | unsigned char *ptr; | 286 | unsigned char *ptr; |
293 | sense_reason_t rc = TCM_NO_SENSE; | 287 | sense_reason_t rc = TCM_NO_SENSE; |
@@ -295,9 +289,6 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) | |||
295 | int alua_access_state, primary = 0, valid_states; | 289 | int alua_access_state, primary = 0, valid_states; |
296 | u16 tg_pt_id, rtpi; | 290 | u16 tg_pt_id, rtpi; |
297 | 291 | ||
298 | if (!l_port) | ||
299 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
300 | |||
301 | if (cmd->data_length < 4) { | 292 | if (cmd->data_length < 4) { |
302 | pr_warn("SET TARGET PORT GROUPS parameter list length %u too" | 293 | pr_warn("SET TARGET PORT GROUPS parameter list length %u too" |
303 | " small\n", cmd->data_length); | 294 | " small\n", cmd->data_length); |
@@ -312,29 +303,24 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) | |||
312 | * Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed | 303 | * Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed |
313 | * for the local tg_pt_gp. | 304 | * for the local tg_pt_gp. |
314 | */ | 305 | */ |
315 | l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem; | 306 | spin_lock(&l_lun->lun_tg_pt_gp_lock); |
316 | if (!l_tg_pt_gp_mem) { | 307 | l_tg_pt_gp = l_lun->lun_tg_pt_gp; |
317 | pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n"); | ||
318 | rc = TCM_UNSUPPORTED_SCSI_OPCODE; | ||
319 | goto out; | ||
320 | } | ||
321 | spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); | ||
322 | l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp; | ||
323 | if (!l_tg_pt_gp) { | 308 | if (!l_tg_pt_gp) { |
324 | spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); | 309 | spin_unlock(&l_lun->lun_tg_pt_gp_lock); |
325 | pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n"); | 310 | pr_err("Unable to access l_lun->tg_pt_gp\n"); |
326 | rc = TCM_UNSUPPORTED_SCSI_OPCODE; | 311 | rc = TCM_UNSUPPORTED_SCSI_OPCODE; |
327 | goto out; | 312 | goto out; |
328 | } | 313 | } |
329 | spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); | ||
330 | 314 | ||
331 | if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) { | 315 | if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) { |
316 | spin_unlock(&l_lun->lun_tg_pt_gp_lock); | ||
332 | pr_debug("Unable to process SET_TARGET_PORT_GROUPS" | 317 | pr_debug("Unable to process SET_TARGET_PORT_GROUPS" |
333 | " while TPGS_EXPLICIT_ALUA is disabled\n"); | 318 | " while TPGS_EXPLICIT_ALUA is disabled\n"); |
334 | rc = TCM_UNSUPPORTED_SCSI_OPCODE; | 319 | rc = TCM_UNSUPPORTED_SCSI_OPCODE; |
335 | goto out; | 320 | goto out; |
336 | } | 321 | } |
337 | valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states; | 322 | valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states; |
323 | spin_unlock(&l_lun->lun_tg_pt_gp_lock); | ||
338 | 324 | ||
339 | ptr = &buf[4]; /* Skip over RESERVED area in header */ | 325 | ptr = &buf[4]; /* Skip over RESERVED area in header */ |
340 | 326 | ||
@@ -396,7 +382,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) | |||
396 | spin_unlock(&dev->t10_alua.tg_pt_gps_lock); | 382 | spin_unlock(&dev->t10_alua.tg_pt_gps_lock); |
397 | 383 | ||
398 | if (!core_alua_do_port_transition(tg_pt_gp, | 384 | if (!core_alua_do_port_transition(tg_pt_gp, |
399 | dev, l_port, nacl, | 385 | dev, l_lun, nacl, |
400 | alua_access_state, 1)) | 386 | alua_access_state, 1)) |
401 | found = true; | 387 | found = true; |
402 | 388 | ||
@@ -406,6 +392,8 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) | |||
406 | } | 392 | } |
407 | spin_unlock(&dev->t10_alua.tg_pt_gps_lock); | 393 | spin_unlock(&dev->t10_alua.tg_pt_gps_lock); |
408 | } else { | 394 | } else { |
395 | struct se_lun *lun; | ||
396 | |||
409 | /* | 397 | /* |
410 | * Extract the RELATIVE TARGET PORT IDENTIFIER to identify | 398 | * Extract the RELATIVE TARGET PORT IDENTIFIER to identify |
411 | * the Target Port in question for the the incoming | 399 | * the Target Port in question for the the incoming |
@@ -417,17 +405,16 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) | |||
417 | * for the struct se_device storage object. | 405 | * for the struct se_device storage object. |
418 | */ | 406 | */ |
419 | spin_lock(&dev->se_port_lock); | 407 | spin_lock(&dev->se_port_lock); |
420 | list_for_each_entry(port, &dev->dev_sep_list, | 408 | list_for_each_entry(lun, &dev->dev_sep_list, |
421 | sep_list) { | 409 | lun_dev_link) { |
422 | if (port->sep_rtpi != rtpi) | 410 | if (lun->lun_rtpi != rtpi) |
423 | continue; | 411 | continue; |
424 | 412 | ||
425 | tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; | 413 | // XXX: racy unlock |
426 | |||
427 | spin_unlock(&dev->se_port_lock); | 414 | spin_unlock(&dev->se_port_lock); |
428 | 415 | ||
429 | if (!core_alua_set_tg_pt_secondary_state( | 416 | if (!core_alua_set_tg_pt_secondary_state( |
430 | tg_pt_gp_mem, port, 1, 1)) | 417 | lun, 1, 1)) |
431 | found = true; | 418 | found = true; |
432 | 419 | ||
433 | spin_lock(&dev->se_port_lock); | 420 | spin_lock(&dev->se_port_lock); |
@@ -696,9 +683,7 @@ target_alua_state_check(struct se_cmd *cmd) | |||
696 | struct se_device *dev = cmd->se_dev; | 683 | struct se_device *dev = cmd->se_dev; |
697 | unsigned char *cdb = cmd->t_task_cdb; | 684 | unsigned char *cdb = cmd->t_task_cdb; |
698 | struct se_lun *lun = cmd->se_lun; | 685 | struct se_lun *lun = cmd->se_lun; |
699 | struct se_port *port = lun->lun_sep; | ||
700 | struct t10_alua_tg_pt_gp *tg_pt_gp; | 686 | struct t10_alua_tg_pt_gp *tg_pt_gp; |
701 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; | ||
702 | int out_alua_state, nonop_delay_msecs; | 687 | int out_alua_state, nonop_delay_msecs; |
703 | 688 | ||
704 | if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) | 689 | if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) |
@@ -706,33 +691,27 @@ target_alua_state_check(struct se_cmd *cmd) | |||
706 | if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) | 691 | if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) |
707 | return 0; | 692 | return 0; |
708 | 693 | ||
709 | if (!port) | ||
710 | return 0; | ||
711 | /* | 694 | /* |
712 | * First, check for a struct se_port specific secondary ALUA target port | 695 | * First, check for a struct se_port specific secondary ALUA target port |
713 | * access state: OFFLINE | 696 | * access state: OFFLINE |
714 | */ | 697 | */ |
715 | if (atomic_read(&port->sep_tg_pt_secondary_offline)) { | 698 | if (atomic_read(&lun->lun_tg_pt_secondary_offline)) { |
716 | pr_debug("ALUA: Got secondary offline status for local" | 699 | pr_debug("ALUA: Got secondary offline status for local" |
717 | " target port\n"); | 700 | " target port\n"); |
718 | set_ascq(cmd, ASCQ_04H_ALUA_OFFLINE); | 701 | set_ascq(cmd, ASCQ_04H_ALUA_OFFLINE); |
719 | return TCM_CHECK_CONDITION_NOT_READY; | 702 | return TCM_CHECK_CONDITION_NOT_READY; |
720 | } | 703 | } |
721 | /* | 704 | |
722 | * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the | 705 | if (!lun->lun_tg_pt_gp) |
723 | * ALUA target port group, to obtain current ALUA access state. | ||
724 | * Otherwise look for the underlying struct se_device association with | ||
725 | * a ALUA logical unit group. | ||
726 | */ | ||
727 | tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; | ||
728 | if (!tg_pt_gp_mem) | ||
729 | return 0; | 706 | return 0; |
730 | 707 | ||
731 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | 708 | spin_lock(&lun->lun_tg_pt_gp_lock); |
732 | tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; | 709 | tg_pt_gp = lun->lun_tg_pt_gp; |
733 | out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); | 710 | out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); |
734 | nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs; | 711 | nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs; |
735 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | 712 | |
713 | // XXX: keeps using tg_pt_gp witout reference after unlock | ||
714 | spin_unlock(&lun->lun_tg_pt_gp_lock); | ||
736 | /* | 715 | /* |
737 | * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional | 716 | * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional |
738 | * statement so the compiler knows explicitly to check this case first. | 717 | * statement so the compiler knows explicitly to check this case first. |
@@ -764,7 +743,7 @@ target_alua_state_check(struct se_cmd *cmd) | |||
764 | break; | 743 | break; |
765 | /* | 744 | /* |
766 | * OFFLINE is a secondary ALUA target port group access state, that is | 745 | * OFFLINE is a secondary ALUA target port group access state, that is |
767 | * handled above with struct se_port->sep_tg_pt_secondary_offline=1 | 746 | * handled above with struct se_lun->lun_tg_pt_secondary_offline=1 |
768 | */ | 747 | */ |
769 | case ALUA_ACCESS_STATE_OFFLINE: | 748 | case ALUA_ACCESS_STATE_OFFLINE: |
770 | default: | 749 | default: |
@@ -906,10 +885,6 @@ int core_alua_check_nonop_delay( | |||
906 | } | 885 | } |
907 | EXPORT_SYMBOL(core_alua_check_nonop_delay); | 886 | EXPORT_SYMBOL(core_alua_check_nonop_delay); |
908 | 887 | ||
909 | /* | ||
910 | * Called with tg_pt_gp->tg_pt_gp_md_mutex or tg_pt_gp_mem->sep_tg_pt_md_mutex | ||
911 | * | ||
912 | */ | ||
913 | static int core_alua_write_tpg_metadata( | 888 | static int core_alua_write_tpg_metadata( |
914 | const char *path, | 889 | const char *path, |
915 | unsigned char *md_buf, | 890 | unsigned char *md_buf, |
@@ -965,22 +940,15 @@ static int core_alua_update_tpg_primary_metadata( | |||
965 | return rc; | 940 | return rc; |
966 | } | 941 | } |
967 | 942 | ||
968 | static void core_alua_do_transition_tg_pt_work(struct work_struct *work) | 943 | static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp) |
969 | { | 944 | { |
970 | struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work, | ||
971 | struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work); | ||
972 | struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; | ||
973 | struct se_dev_entry *se_deve; | 945 | struct se_dev_entry *se_deve; |
946 | struct se_lun *lun; | ||
974 | struct se_lun_acl *lacl; | 947 | struct se_lun_acl *lacl; |
975 | struct se_port *port; | ||
976 | struct t10_alua_tg_pt_gp_member *mem; | ||
977 | bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status == | ||
978 | ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG); | ||
979 | 948 | ||
980 | spin_lock(&tg_pt_gp->tg_pt_gp_lock); | 949 | spin_lock(&tg_pt_gp->tg_pt_gp_lock); |
981 | list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list, | 950 | list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list, |
982 | tg_pt_gp_mem_list) { | 951 | lun_tg_pt_gp_link) { |
983 | port = mem->tg_pt; | ||
984 | /* | 952 | /* |
985 | * After an implicit target port asymmetric access state | 953 | * After an implicit target port asymmetric access state |
986 | * change, a device server shall establish a unit attention | 954 | * change, a device server shall establish a unit attention |
@@ -995,38 +963,58 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work) | |||
995 | * every I_T nexus other than the I_T nexus on which the SET | 963 | * every I_T nexus other than the I_T nexus on which the SET |
996 | * TARGET PORT GROUPS command | 964 | * TARGET PORT GROUPS command |
997 | */ | 965 | */ |
998 | atomic_inc_mb(&mem->tg_pt_gp_mem_ref_cnt); | 966 | if (!percpu_ref_tryget_live(&lun->lun_ref)) |
967 | continue; | ||
999 | spin_unlock(&tg_pt_gp->tg_pt_gp_lock); | 968 | spin_unlock(&tg_pt_gp->tg_pt_gp_lock); |
1000 | 969 | ||
1001 | spin_lock_bh(&port->sep_alua_lock); | 970 | spin_lock(&lun->lun_deve_lock); |
1002 | list_for_each_entry(se_deve, &port->sep_alua_list, | 971 | list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) { |
1003 | alua_port_list) { | 972 | lacl = rcu_dereference_check(se_deve->se_lun_acl, |
1004 | lacl = se_deve->se_lun_acl; | 973 | lockdep_is_held(&lun->lun_deve_lock)); |
974 | |||
1005 | /* | 975 | /* |
1006 | * se_deve->se_lun_acl pointer may be NULL for a | 976 | * spc4r37 p.242: |
1007 | * entry created without explicit Node+MappedLUN ACLs | 977 | * After an explicit target port asymmetric access |
978 | * state change, a device server shall establish a | ||
979 | * unit attention condition with the additional sense | ||
980 | * code set to ASYMMETRIC ACCESS STATE CHANGED for | ||
981 | * the initiator port associated with every I_T nexus | ||
982 | * other than the I_T nexus on which the SET TARGET | ||
983 | * PORT GROUPS command was received. | ||
1008 | */ | 984 | */ |
1009 | if (!lacl) | ||
1010 | continue; | ||
1011 | |||
1012 | if ((tg_pt_gp->tg_pt_gp_alua_access_status == | 985 | if ((tg_pt_gp->tg_pt_gp_alua_access_status == |
1013 | ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) && | 986 | ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) && |
1014 | (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) && | 987 | (tg_pt_gp->tg_pt_gp_alua_lun != NULL) && |
1015 | (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl) && | 988 | (tg_pt_gp->tg_pt_gp_alua_lun == lun)) |
1016 | (tg_pt_gp->tg_pt_gp_alua_port != NULL) && | ||
1017 | (tg_pt_gp->tg_pt_gp_alua_port == port)) | ||
1018 | continue; | 989 | continue; |
1019 | 990 | ||
1020 | core_scsi3_ua_allocate(lacl->se_lun_nacl, | 991 | /* |
1021 | se_deve->mapped_lun, 0x2A, | 992 | * se_deve->se_lun_acl pointer may be NULL for a |
993 | * entry created without explicit Node+MappedLUN ACLs | ||
994 | */ | ||
995 | if (lacl && (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) && | ||
996 | (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl)) | ||
997 | continue; | ||
998 | |||
999 | core_scsi3_ua_allocate(se_deve, 0x2A, | ||
1022 | ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED); | 1000 | ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED); |
1023 | } | 1001 | } |
1024 | spin_unlock_bh(&port->sep_alua_lock); | 1002 | spin_unlock(&lun->lun_deve_lock); |
1025 | 1003 | ||
1026 | spin_lock(&tg_pt_gp->tg_pt_gp_lock); | 1004 | spin_lock(&tg_pt_gp->tg_pt_gp_lock); |
1027 | atomic_dec_mb(&mem->tg_pt_gp_mem_ref_cnt); | 1005 | percpu_ref_put(&lun->lun_ref); |
1028 | } | 1006 | } |
1029 | spin_unlock(&tg_pt_gp->tg_pt_gp_lock); | 1007 | spin_unlock(&tg_pt_gp->tg_pt_gp_lock); |
1008 | } | ||
1009 | |||
1010 | static void core_alua_do_transition_tg_pt_work(struct work_struct *work) | ||
1011 | { | ||
1012 | struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work, | ||
1013 | struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work); | ||
1014 | struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; | ||
1015 | bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status == | ||
1016 | ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG); | ||
1017 | |||
1030 | /* | 1018 | /* |
1031 | * Update the ALUA metadata buf that has been allocated in | 1019 | * Update the ALUA metadata buf that has been allocated in |
1032 | * core_alua_do_port_transition(), this metadata will be written | 1020 | * core_alua_do_port_transition(), this metadata will be written |
@@ -1056,6 +1044,9 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work) | |||
1056 | tg_pt_gp->tg_pt_gp_id, | 1044 | tg_pt_gp->tg_pt_gp_id, |
1057 | core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state), | 1045 | core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state), |
1058 | core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state)); | 1046 | core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state)); |
1047 | |||
1048 | core_alua_queue_state_change_ua(tg_pt_gp); | ||
1049 | |||
1059 | spin_lock(&dev->t10_alua.tg_pt_gps_lock); | 1050 | spin_lock(&dev->t10_alua.tg_pt_gps_lock); |
1060 | atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); | 1051 | atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); |
1061 | spin_unlock(&dev->t10_alua.tg_pt_gps_lock); | 1052 | spin_unlock(&dev->t10_alua.tg_pt_gps_lock); |
@@ -1108,6 +1099,8 @@ static int core_alua_do_transition_tg_pt( | |||
1108 | ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : | 1099 | ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : |
1109 | ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; | 1100 | ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; |
1110 | 1101 | ||
1102 | core_alua_queue_state_change_ua(tg_pt_gp); | ||
1103 | |||
1111 | /* | 1104 | /* |
1112 | * Check for the optional ALUA primary state transition delay | 1105 | * Check for the optional ALUA primary state transition delay |
1113 | */ | 1106 | */ |
@@ -1142,7 +1135,7 @@ static int core_alua_do_transition_tg_pt( | |||
1142 | int core_alua_do_port_transition( | 1135 | int core_alua_do_port_transition( |
1143 | struct t10_alua_tg_pt_gp *l_tg_pt_gp, | 1136 | struct t10_alua_tg_pt_gp *l_tg_pt_gp, |
1144 | struct se_device *l_dev, | 1137 | struct se_device *l_dev, |
1145 | struct se_port *l_port, | 1138 | struct se_lun *l_lun, |
1146 | struct se_node_acl *l_nacl, | 1139 | struct se_node_acl *l_nacl, |
1147 | int new_state, | 1140 | int new_state, |
1148 | int explicit) | 1141 | int explicit) |
@@ -1172,7 +1165,7 @@ int core_alua_do_port_transition( | |||
1172 | * core_alua_do_transition_tg_pt() will always return | 1165 | * core_alua_do_transition_tg_pt() will always return |
1173 | * success. | 1166 | * success. |
1174 | */ | 1167 | */ |
1175 | l_tg_pt_gp->tg_pt_gp_alua_port = l_port; | 1168 | l_tg_pt_gp->tg_pt_gp_alua_lun = l_lun; |
1176 | l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl; | 1169 | l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl; |
1177 | rc = core_alua_do_transition_tg_pt(l_tg_pt_gp, | 1170 | rc = core_alua_do_transition_tg_pt(l_tg_pt_gp, |
1178 | new_state, explicit); | 1171 | new_state, explicit); |
@@ -1211,10 +1204,10 @@ int core_alua_do_port_transition( | |||
1211 | continue; | 1204 | continue; |
1212 | 1205 | ||
1213 | if (l_tg_pt_gp == tg_pt_gp) { | 1206 | if (l_tg_pt_gp == tg_pt_gp) { |
1214 | tg_pt_gp->tg_pt_gp_alua_port = l_port; | 1207 | tg_pt_gp->tg_pt_gp_alua_lun = l_lun; |
1215 | tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl; | 1208 | tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl; |
1216 | } else { | 1209 | } else { |
1217 | tg_pt_gp->tg_pt_gp_alua_port = NULL; | 1210 | tg_pt_gp->tg_pt_gp_alua_lun = NULL; |
1218 | tg_pt_gp->tg_pt_gp_alua_nacl = NULL; | 1211 | tg_pt_gp->tg_pt_gp_alua_nacl = NULL; |
1219 | } | 1212 | } |
1220 | atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt); | 1213 | atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt); |
@@ -1251,22 +1244,20 @@ int core_alua_do_port_transition( | |||
1251 | return rc; | 1244 | return rc; |
1252 | } | 1245 | } |
1253 | 1246 | ||
1254 | /* | 1247 | static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun) |
1255 | * Called with tg_pt_gp_mem->sep_tg_pt_md_mutex held | ||
1256 | */ | ||
1257 | static int core_alua_update_tpg_secondary_metadata( | ||
1258 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, | ||
1259 | struct se_port *port) | ||
1260 | { | 1248 | { |
1249 | struct se_portal_group *se_tpg = lun->lun_tpg; | ||
1261 | unsigned char *md_buf; | 1250 | unsigned char *md_buf; |
1262 | struct se_portal_group *se_tpg = port->sep_tpg; | ||
1263 | char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN]; | 1251 | char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN]; |
1264 | int len, rc; | 1252 | int len, rc; |
1265 | 1253 | ||
1254 | mutex_lock(&lun->lun_tg_pt_md_mutex); | ||
1255 | |||
1266 | md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL); | 1256 | md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL); |
1267 | if (!md_buf) { | 1257 | if (!md_buf) { |
1268 | pr_err("Unable to allocate buf for ALUA metadata\n"); | 1258 | pr_err("Unable to allocate buf for ALUA metadata\n"); |
1269 | return -ENOMEM; | 1259 | rc = -ENOMEM; |
1260 | goto out_unlock; | ||
1270 | } | 1261 | } |
1271 | 1262 | ||
1272 | memset(path, 0, ALUA_METADATA_PATH_LEN); | 1263 | memset(path, 0, ALUA_METADATA_PATH_LEN); |
@@ -1281,32 +1272,33 @@ static int core_alua_update_tpg_secondary_metadata( | |||
1281 | 1272 | ||
1282 | len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n" | 1273 | len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n" |
1283 | "alua_tg_pt_status=0x%02x\n", | 1274 | "alua_tg_pt_status=0x%02x\n", |
1284 | atomic_read(&port->sep_tg_pt_secondary_offline), | 1275 | atomic_read(&lun->lun_tg_pt_secondary_offline), |
1285 | port->sep_tg_pt_secondary_stat); | 1276 | lun->lun_tg_pt_secondary_stat); |
1286 | 1277 | ||
1287 | snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u", | 1278 | snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%llu", |
1288 | se_tpg->se_tpg_tfo->get_fabric_name(), wwn, | 1279 | se_tpg->se_tpg_tfo->get_fabric_name(), wwn, |
1289 | port->sep_lun->unpacked_lun); | 1280 | lun->unpacked_lun); |
1290 | 1281 | ||
1291 | rc = core_alua_write_tpg_metadata(path, md_buf, len); | 1282 | rc = core_alua_write_tpg_metadata(path, md_buf, len); |
1292 | kfree(md_buf); | 1283 | kfree(md_buf); |
1293 | 1284 | ||
1285 | out_unlock: | ||
1286 | mutex_unlock(&lun->lun_tg_pt_md_mutex); | ||
1294 | return rc; | 1287 | return rc; |
1295 | } | 1288 | } |
1296 | 1289 | ||
1297 | static int core_alua_set_tg_pt_secondary_state( | 1290 | static int core_alua_set_tg_pt_secondary_state( |
1298 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, | 1291 | struct se_lun *lun, |
1299 | struct se_port *port, | ||
1300 | int explicit, | 1292 | int explicit, |
1301 | int offline) | 1293 | int offline) |
1302 | { | 1294 | { |
1303 | struct t10_alua_tg_pt_gp *tg_pt_gp; | 1295 | struct t10_alua_tg_pt_gp *tg_pt_gp; |
1304 | int trans_delay_msecs; | 1296 | int trans_delay_msecs; |
1305 | 1297 | ||
1306 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | 1298 | spin_lock(&lun->lun_tg_pt_gp_lock); |
1307 | tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; | 1299 | tg_pt_gp = lun->lun_tg_pt_gp; |
1308 | if (!tg_pt_gp) { | 1300 | if (!tg_pt_gp) { |
1309 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | 1301 | spin_unlock(&lun->lun_tg_pt_gp_lock); |
1310 | pr_err("Unable to complete secondary state" | 1302 | pr_err("Unable to complete secondary state" |
1311 | " transition\n"); | 1303 | " transition\n"); |
1312 | return -EINVAL; | 1304 | return -EINVAL; |
@@ -1314,14 +1306,14 @@ static int core_alua_set_tg_pt_secondary_state( | |||
1314 | trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs; | 1306 | trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs; |
1315 | /* | 1307 | /* |
1316 | * Set the secondary ALUA target port access state to OFFLINE | 1308 | * Set the secondary ALUA target port access state to OFFLINE |
1317 | * or release the previously secondary state for struct se_port | 1309 | * or release the previously secondary state for struct se_lun |
1318 | */ | 1310 | */ |
1319 | if (offline) | 1311 | if (offline) |
1320 | atomic_set(&port->sep_tg_pt_secondary_offline, 1); | 1312 | atomic_set(&lun->lun_tg_pt_secondary_offline, 1); |
1321 | else | 1313 | else |
1322 | atomic_set(&port->sep_tg_pt_secondary_offline, 0); | 1314 | atomic_set(&lun->lun_tg_pt_secondary_offline, 0); |
1323 | 1315 | ||
1324 | port->sep_tg_pt_secondary_stat = (explicit) ? | 1316 | lun->lun_tg_pt_secondary_stat = (explicit) ? |
1325 | ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : | 1317 | ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : |
1326 | ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; | 1318 | ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; |
1327 | 1319 | ||
@@ -1330,7 +1322,7 @@ static int core_alua_set_tg_pt_secondary_state( | |||
1330 | "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), | 1322 | "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), |
1331 | tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE"); | 1323 | tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE"); |
1332 | 1324 | ||
1333 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | 1325 | spin_unlock(&lun->lun_tg_pt_gp_lock); |
1334 | /* | 1326 | /* |
1335 | * Do the optional transition delay after we set the secondary | 1327 | * Do the optional transition delay after we set the secondary |
1336 | * ALUA access state. | 1328 | * ALUA access state. |
@@ -1341,11 +1333,8 @@ static int core_alua_set_tg_pt_secondary_state( | |||
1341 | * See if we need to update the ALUA fabric port metadata for | 1333 | * See if we need to update the ALUA fabric port metadata for |
1342 | * secondary state and status | 1334 | * secondary state and status |
1343 | */ | 1335 | */ |
1344 | if (port->sep_tg_pt_secondary_write_md) { | 1336 | if (lun->lun_tg_pt_secondary_write_md) |
1345 | mutex_lock(&port->sep_tg_pt_md_mutex); | 1337 | core_alua_update_tpg_secondary_metadata(lun); |
1346 | core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port); | ||
1347 | mutex_unlock(&port->sep_tg_pt_md_mutex); | ||
1348 | } | ||
1349 | 1338 | ||
1350 | return 0; | 1339 | return 0; |
1351 | } | 1340 | } |
@@ -1699,7 +1688,7 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev, | |||
1699 | return NULL; | 1688 | return NULL; |
1700 | } | 1689 | } |
1701 | INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list); | 1690 | INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list); |
1702 | INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_mem_list); | 1691 | INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list); |
1703 | mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex); | 1692 | mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex); |
1704 | spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); | 1693 | spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); |
1705 | atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); | 1694 | atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); |
@@ -1793,32 +1782,11 @@ again: | |||
1793 | return 0; | 1782 | return 0; |
1794 | } | 1783 | } |
1795 | 1784 | ||
1796 | struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem( | ||
1797 | struct se_port *port) | ||
1798 | { | ||
1799 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; | ||
1800 | |||
1801 | tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache, | ||
1802 | GFP_KERNEL); | ||
1803 | if (!tg_pt_gp_mem) { | ||
1804 | pr_err("Unable to allocate struct t10_alua_tg_pt_gp_member\n"); | ||
1805 | return ERR_PTR(-ENOMEM); | ||
1806 | } | ||
1807 | INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list); | ||
1808 | spin_lock_init(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | ||
1809 | atomic_set(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt, 0); | ||
1810 | |||
1811 | tg_pt_gp_mem->tg_pt = port; | ||
1812 | port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem; | ||
1813 | |||
1814 | return tg_pt_gp_mem; | ||
1815 | } | ||
1816 | |||
1817 | void core_alua_free_tg_pt_gp( | 1785 | void core_alua_free_tg_pt_gp( |
1818 | struct t10_alua_tg_pt_gp *tg_pt_gp) | 1786 | struct t10_alua_tg_pt_gp *tg_pt_gp) |
1819 | { | 1787 | { |
1820 | struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; | 1788 | struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; |
1821 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp; | 1789 | struct se_lun *lun, *next; |
1822 | 1790 | ||
1823 | /* | 1791 | /* |
1824 | * Once we have reached this point, config_item_put() has already | 1792 | * Once we have reached this point, config_item_put() has already |
@@ -1849,30 +1817,24 @@ void core_alua_free_tg_pt_gp( | |||
1849 | * struct se_port. | 1817 | * struct se_port. |
1850 | */ | 1818 | */ |
1851 | spin_lock(&tg_pt_gp->tg_pt_gp_lock); | 1819 | spin_lock(&tg_pt_gp->tg_pt_gp_lock); |
1852 | list_for_each_entry_safe(tg_pt_gp_mem, tg_pt_gp_mem_tmp, | 1820 | list_for_each_entry_safe(lun, next, |
1853 | &tg_pt_gp->tg_pt_gp_mem_list, tg_pt_gp_mem_list) { | 1821 | &tg_pt_gp->tg_pt_gp_lun_list, lun_tg_pt_gp_link) { |
1854 | if (tg_pt_gp_mem->tg_pt_gp_assoc) { | 1822 | list_del_init(&lun->lun_tg_pt_gp_link); |
1855 | list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list); | 1823 | tg_pt_gp->tg_pt_gp_members--; |
1856 | tg_pt_gp->tg_pt_gp_members--; | 1824 | |
1857 | tg_pt_gp_mem->tg_pt_gp_assoc = 0; | ||
1858 | } | ||
1859 | spin_unlock(&tg_pt_gp->tg_pt_gp_lock); | 1825 | spin_unlock(&tg_pt_gp->tg_pt_gp_lock); |
1860 | /* | 1826 | /* |
1861 | * tg_pt_gp_mem is associated with a single | ||
1862 | * se_port->sep_alua_tg_pt_gp_mem, and is released via | ||
1863 | * core_alua_free_tg_pt_gp_mem(). | ||
1864 | * | ||
1865 | * If the passed tg_pt_gp does NOT match the default_tg_pt_gp, | 1827 | * If the passed tg_pt_gp does NOT match the default_tg_pt_gp, |
1866 | * assume we want to re-associate a given tg_pt_gp_mem with | 1828 | * assume we want to re-associate a given tg_pt_gp_mem with |
1867 | * default_tg_pt_gp. | 1829 | * default_tg_pt_gp. |
1868 | */ | 1830 | */ |
1869 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | 1831 | spin_lock(&lun->lun_tg_pt_gp_lock); |
1870 | if (tg_pt_gp != dev->t10_alua.default_tg_pt_gp) { | 1832 | if (tg_pt_gp != dev->t10_alua.default_tg_pt_gp) { |
1871 | __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, | 1833 | __target_attach_tg_pt_gp(lun, |
1872 | dev->t10_alua.default_tg_pt_gp); | 1834 | dev->t10_alua.default_tg_pt_gp); |
1873 | } else | 1835 | } else |
1874 | tg_pt_gp_mem->tg_pt_gp = NULL; | 1836 | lun->lun_tg_pt_gp = NULL; |
1875 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | 1837 | spin_unlock(&lun->lun_tg_pt_gp_lock); |
1876 | 1838 | ||
1877 | spin_lock(&tg_pt_gp->tg_pt_gp_lock); | 1839 | spin_lock(&tg_pt_gp->tg_pt_gp_lock); |
1878 | } | 1840 | } |
@@ -1881,35 +1843,6 @@ void core_alua_free_tg_pt_gp( | |||
1881 | kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); | 1843 | kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); |
1882 | } | 1844 | } |
1883 | 1845 | ||
1884 | void core_alua_free_tg_pt_gp_mem(struct se_port *port) | ||
1885 | { | ||
1886 | struct t10_alua_tg_pt_gp *tg_pt_gp; | ||
1887 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; | ||
1888 | |||
1889 | tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; | ||
1890 | if (!tg_pt_gp_mem) | ||
1891 | return; | ||
1892 | |||
1893 | while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt)) | ||
1894 | cpu_relax(); | ||
1895 | |||
1896 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | ||
1897 | tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; | ||
1898 | if (tg_pt_gp) { | ||
1899 | spin_lock(&tg_pt_gp->tg_pt_gp_lock); | ||
1900 | if (tg_pt_gp_mem->tg_pt_gp_assoc) { | ||
1901 | list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list); | ||
1902 | tg_pt_gp->tg_pt_gp_members--; | ||
1903 | tg_pt_gp_mem->tg_pt_gp_assoc = 0; | ||
1904 | } | ||
1905 | spin_unlock(&tg_pt_gp->tg_pt_gp_lock); | ||
1906 | tg_pt_gp_mem->tg_pt_gp = NULL; | ||
1907 | } | ||
1908 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | ||
1909 | |||
1910 | kmem_cache_free(t10_alua_tg_pt_gp_mem_cache, tg_pt_gp_mem); | ||
1911 | } | ||
1912 | |||
1913 | static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name( | 1846 | static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name( |
1914 | struct se_device *dev, const char *name) | 1847 | struct se_device *dev, const char *name) |
1915 | { | 1848 | { |
@@ -1943,50 +1876,65 @@ static void core_alua_put_tg_pt_gp_from_name( | |||
1943 | spin_unlock(&dev->t10_alua.tg_pt_gps_lock); | 1876 | spin_unlock(&dev->t10_alua.tg_pt_gps_lock); |
1944 | } | 1877 | } |
1945 | 1878 | ||
1946 | /* | 1879 | static void __target_attach_tg_pt_gp(struct se_lun *lun, |
1947 | * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held | 1880 | struct t10_alua_tg_pt_gp *tg_pt_gp) |
1948 | */ | ||
1949 | void __core_alua_attach_tg_pt_gp_mem( | ||
1950 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, | ||
1951 | struct t10_alua_tg_pt_gp *tg_pt_gp) | ||
1952 | { | 1881 | { |
1882 | struct se_dev_entry *se_deve; | ||
1883 | |||
1884 | assert_spin_locked(&lun->lun_tg_pt_gp_lock); | ||
1885 | |||
1953 | spin_lock(&tg_pt_gp->tg_pt_gp_lock); | 1886 | spin_lock(&tg_pt_gp->tg_pt_gp_lock); |
1954 | tg_pt_gp_mem->tg_pt_gp = tg_pt_gp; | 1887 | lun->lun_tg_pt_gp = tg_pt_gp; |
1955 | tg_pt_gp_mem->tg_pt_gp_assoc = 1; | 1888 | list_add_tail(&lun->lun_tg_pt_gp_link, &tg_pt_gp->tg_pt_gp_lun_list); |
1956 | list_add_tail(&tg_pt_gp_mem->tg_pt_gp_mem_list, | ||
1957 | &tg_pt_gp->tg_pt_gp_mem_list); | ||
1958 | tg_pt_gp->tg_pt_gp_members++; | 1889 | tg_pt_gp->tg_pt_gp_members++; |
1890 | spin_lock(&lun->lun_deve_lock); | ||
1891 | list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) | ||
1892 | core_scsi3_ua_allocate(se_deve, 0x3f, | ||
1893 | ASCQ_3FH_INQUIRY_DATA_HAS_CHANGED); | ||
1894 | spin_unlock(&lun->lun_deve_lock); | ||
1959 | spin_unlock(&tg_pt_gp->tg_pt_gp_lock); | 1895 | spin_unlock(&tg_pt_gp->tg_pt_gp_lock); |
1960 | } | 1896 | } |
1961 | 1897 | ||
1962 | /* | 1898 | void target_attach_tg_pt_gp(struct se_lun *lun, |
1963 | * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held | 1899 | struct t10_alua_tg_pt_gp *tg_pt_gp) |
1964 | */ | ||
1965 | static void __core_alua_drop_tg_pt_gp_mem( | ||
1966 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, | ||
1967 | struct t10_alua_tg_pt_gp *tg_pt_gp) | ||
1968 | { | 1900 | { |
1901 | spin_lock(&lun->lun_tg_pt_gp_lock); | ||
1902 | __target_attach_tg_pt_gp(lun, tg_pt_gp); | ||
1903 | spin_unlock(&lun->lun_tg_pt_gp_lock); | ||
1904 | } | ||
1905 | |||
1906 | static void __target_detach_tg_pt_gp(struct se_lun *lun, | ||
1907 | struct t10_alua_tg_pt_gp *tg_pt_gp) | ||
1908 | { | ||
1909 | assert_spin_locked(&lun->lun_tg_pt_gp_lock); | ||
1910 | |||
1969 | spin_lock(&tg_pt_gp->tg_pt_gp_lock); | 1911 | spin_lock(&tg_pt_gp->tg_pt_gp_lock); |
1970 | list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list); | 1912 | list_del_init(&lun->lun_tg_pt_gp_link); |
1971 | tg_pt_gp_mem->tg_pt_gp = NULL; | ||
1972 | tg_pt_gp_mem->tg_pt_gp_assoc = 0; | ||
1973 | tg_pt_gp->tg_pt_gp_members--; | 1913 | tg_pt_gp->tg_pt_gp_members--; |
1974 | spin_unlock(&tg_pt_gp->tg_pt_gp_lock); | 1914 | spin_unlock(&tg_pt_gp->tg_pt_gp_lock); |
1915 | |||
1916 | lun->lun_tg_pt_gp = NULL; | ||
1975 | } | 1917 | } |
1976 | 1918 | ||
1977 | ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page) | 1919 | void target_detach_tg_pt_gp(struct se_lun *lun) |
1920 | { | ||
1921 | struct t10_alua_tg_pt_gp *tg_pt_gp; | ||
1922 | |||
1923 | spin_lock(&lun->lun_tg_pt_gp_lock); | ||
1924 | tg_pt_gp = lun->lun_tg_pt_gp; | ||
1925 | if (tg_pt_gp) | ||
1926 | __target_detach_tg_pt_gp(lun, tg_pt_gp); | ||
1927 | spin_unlock(&lun->lun_tg_pt_gp_lock); | ||
1928 | } | ||
1929 | |||
1930 | ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page) | ||
1978 | { | 1931 | { |
1979 | struct config_item *tg_pt_ci; | 1932 | struct config_item *tg_pt_ci; |
1980 | struct t10_alua_tg_pt_gp *tg_pt_gp; | 1933 | struct t10_alua_tg_pt_gp *tg_pt_gp; |
1981 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; | ||
1982 | ssize_t len = 0; | 1934 | ssize_t len = 0; |
1983 | 1935 | ||
1984 | tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; | 1936 | spin_lock(&lun->lun_tg_pt_gp_lock); |
1985 | if (!tg_pt_gp_mem) | 1937 | tg_pt_gp = lun->lun_tg_pt_gp; |
1986 | return len; | ||
1987 | |||
1988 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | ||
1989 | tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; | ||
1990 | if (tg_pt_gp) { | 1938 | if (tg_pt_gp) { |
1991 | tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item; | 1939 | tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item; |
1992 | len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:" | 1940 | len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:" |
@@ -1998,34 +1946,33 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page) | |||
1998 | &tg_pt_gp->tg_pt_gp_alua_access_state)), | 1946 | &tg_pt_gp->tg_pt_gp_alua_access_state)), |
1999 | core_alua_dump_status( | 1947 | core_alua_dump_status( |
2000 | tg_pt_gp->tg_pt_gp_alua_access_status), | 1948 | tg_pt_gp->tg_pt_gp_alua_access_status), |
2001 | (atomic_read(&port->sep_tg_pt_secondary_offline)) ? | 1949 | atomic_read(&lun->lun_tg_pt_secondary_offline) ? |
2002 | "Offline" : "None", | 1950 | "Offline" : "None", |
2003 | core_alua_dump_status(port->sep_tg_pt_secondary_stat)); | 1951 | core_alua_dump_status(lun->lun_tg_pt_secondary_stat)); |
2004 | } | 1952 | } |
2005 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | 1953 | spin_unlock(&lun->lun_tg_pt_gp_lock); |
2006 | 1954 | ||
2007 | return len; | 1955 | return len; |
2008 | } | 1956 | } |
2009 | 1957 | ||
2010 | ssize_t core_alua_store_tg_pt_gp_info( | 1958 | ssize_t core_alua_store_tg_pt_gp_info( |
2011 | struct se_port *port, | 1959 | struct se_lun *lun, |
2012 | const char *page, | 1960 | const char *page, |
2013 | size_t count) | 1961 | size_t count) |
2014 | { | 1962 | { |
2015 | struct se_portal_group *tpg; | 1963 | struct se_portal_group *tpg = lun->lun_tpg; |
2016 | struct se_lun *lun; | 1964 | /* |
2017 | struct se_device *dev = port->sep_lun->lun_se_dev; | 1965 | * rcu_dereference_raw protected by se_lun->lun_group symlink |
1966 | * reference to se_device->dev_group. | ||
1967 | */ | ||
1968 | struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); | ||
2018 | struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL; | 1969 | struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL; |
2019 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; | ||
2020 | unsigned char buf[TG_PT_GROUP_NAME_BUF]; | 1970 | unsigned char buf[TG_PT_GROUP_NAME_BUF]; |
2021 | int move = 0; | 1971 | int move = 0; |
2022 | 1972 | ||
2023 | tpg = port->sep_tpg; | 1973 | if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH || |
2024 | lun = port->sep_lun; | 1974 | (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) |
2025 | 1975 | return -ENODEV; | |
2026 | tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; | ||
2027 | if (!tg_pt_gp_mem) | ||
2028 | return 0; | ||
2029 | 1976 | ||
2030 | if (count > TG_PT_GROUP_NAME_BUF) { | 1977 | if (count > TG_PT_GROUP_NAME_BUF) { |
2031 | pr_err("ALUA Target Port Group alias too large!\n"); | 1978 | pr_err("ALUA Target Port Group alias too large!\n"); |
@@ -2049,8 +1996,8 @@ ssize_t core_alua_store_tg_pt_gp_info( | |||
2049 | return -ENODEV; | 1996 | return -ENODEV; |
2050 | } | 1997 | } |
2051 | 1998 | ||
2052 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | 1999 | spin_lock(&lun->lun_tg_pt_gp_lock); |
2053 | tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; | 2000 | tg_pt_gp = lun->lun_tg_pt_gp; |
2054 | if (tg_pt_gp) { | 2001 | if (tg_pt_gp) { |
2055 | /* | 2002 | /* |
2056 | * Clearing an existing tg_pt_gp association, and replacing | 2003 | * Clearing an existing tg_pt_gp association, and replacing |
@@ -2068,24 +2015,19 @@ ssize_t core_alua_store_tg_pt_gp_info( | |||
2068 | &tg_pt_gp->tg_pt_gp_group.cg_item), | 2015 | &tg_pt_gp->tg_pt_gp_group.cg_item), |
2069 | tg_pt_gp->tg_pt_gp_id); | 2016 | tg_pt_gp->tg_pt_gp_id); |
2070 | 2017 | ||
2071 | __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp); | 2018 | __target_detach_tg_pt_gp(lun, tg_pt_gp); |
2072 | __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, | 2019 | __target_attach_tg_pt_gp(lun, |
2073 | dev->t10_alua.default_tg_pt_gp); | 2020 | dev->t10_alua.default_tg_pt_gp); |
2074 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | 2021 | spin_unlock(&lun->lun_tg_pt_gp_lock); |
2075 | 2022 | ||
2076 | return count; | 2023 | return count; |
2077 | } | 2024 | } |
2078 | /* | 2025 | __target_detach_tg_pt_gp(lun, tg_pt_gp); |
2079 | * Removing existing association of tg_pt_gp_mem with tg_pt_gp | ||
2080 | */ | ||
2081 | __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp); | ||
2082 | move = 1; | 2026 | move = 1; |
2083 | } | 2027 | } |
2084 | /* | 2028 | |
2085 | * Associate tg_pt_gp_mem with tg_pt_gp_new. | 2029 | __target_attach_tg_pt_gp(lun, tg_pt_gp_new); |
2086 | */ | 2030 | spin_unlock(&lun->lun_tg_pt_gp_lock); |
2087 | __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new); | ||
2088 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | ||
2089 | pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA" | 2031 | pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA" |
2090 | " Target Port Group: alua/%s, ID: %hu\n", (move) ? | 2032 | " Target Port Group: alua/%s, ID: %hu\n", (move) ? |
2091 | "Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg), | 2033 | "Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg), |
@@ -2268,11 +2210,8 @@ ssize_t core_alua_store_preferred_bit( | |||
2268 | 2210 | ||
2269 | ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page) | 2211 | ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page) |
2270 | { | 2212 | { |
2271 | if (!lun->lun_sep) | ||
2272 | return -ENODEV; | ||
2273 | |||
2274 | return sprintf(page, "%d\n", | 2213 | return sprintf(page, "%d\n", |
2275 | atomic_read(&lun->lun_sep->sep_tg_pt_secondary_offline)); | 2214 | atomic_read(&lun->lun_tg_pt_secondary_offline)); |
2276 | } | 2215 | } |
2277 | 2216 | ||
2278 | ssize_t core_alua_store_offline_bit( | 2217 | ssize_t core_alua_store_offline_bit( |
@@ -2280,11 +2219,16 @@ ssize_t core_alua_store_offline_bit( | |||
2280 | const char *page, | 2219 | const char *page, |
2281 | size_t count) | 2220 | size_t count) |
2282 | { | 2221 | { |
2283 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; | 2222 | /* |
2223 | * rcu_dereference_raw protected by se_lun->lun_group symlink | ||
2224 | * reference to se_device->dev_group. | ||
2225 | */ | ||
2226 | struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); | ||
2284 | unsigned long tmp; | 2227 | unsigned long tmp; |
2285 | int ret; | 2228 | int ret; |
2286 | 2229 | ||
2287 | if (!lun->lun_sep) | 2230 | if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH || |
2231 | (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) | ||
2288 | return -ENODEV; | 2232 | return -ENODEV; |
2289 | 2233 | ||
2290 | ret = kstrtoul(page, 0, &tmp); | 2234 | ret = kstrtoul(page, 0, &tmp); |
@@ -2297,14 +2241,8 @@ ssize_t core_alua_store_offline_bit( | |||
2297 | tmp); | 2241 | tmp); |
2298 | return -EINVAL; | 2242 | return -EINVAL; |
2299 | } | 2243 | } |
2300 | tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem; | ||
2301 | if (!tg_pt_gp_mem) { | ||
2302 | pr_err("Unable to locate *tg_pt_gp_mem\n"); | ||
2303 | return -EINVAL; | ||
2304 | } | ||
2305 | 2244 | ||
2306 | ret = core_alua_set_tg_pt_secondary_state(tg_pt_gp_mem, | 2245 | ret = core_alua_set_tg_pt_secondary_state(lun, 0, (int)tmp); |
2307 | lun->lun_sep, 0, (int)tmp); | ||
2308 | if (ret < 0) | 2246 | if (ret < 0) |
2309 | return -EINVAL; | 2247 | return -EINVAL; |
2310 | 2248 | ||
@@ -2315,7 +2253,7 @@ ssize_t core_alua_show_secondary_status( | |||
2315 | struct se_lun *lun, | 2253 | struct se_lun *lun, |
2316 | char *page) | 2254 | char *page) |
2317 | { | 2255 | { |
2318 | return sprintf(page, "%d\n", lun->lun_sep->sep_tg_pt_secondary_stat); | 2256 | return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_stat); |
2319 | } | 2257 | } |
2320 | 2258 | ||
2321 | ssize_t core_alua_store_secondary_status( | 2259 | ssize_t core_alua_store_secondary_status( |
@@ -2338,7 +2276,7 @@ ssize_t core_alua_store_secondary_status( | |||
2338 | tmp); | 2276 | tmp); |
2339 | return -EINVAL; | 2277 | return -EINVAL; |
2340 | } | 2278 | } |
2341 | lun->lun_sep->sep_tg_pt_secondary_stat = (int)tmp; | 2279 | lun->lun_tg_pt_secondary_stat = (int)tmp; |
2342 | 2280 | ||
2343 | return count; | 2281 | return count; |
2344 | } | 2282 | } |
@@ -2347,8 +2285,7 @@ ssize_t core_alua_show_secondary_write_metadata( | |||
2347 | struct se_lun *lun, | 2285 | struct se_lun *lun, |
2348 | char *page) | 2286 | char *page) |
2349 | { | 2287 | { |
2350 | return sprintf(page, "%d\n", | 2288 | return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_write_md); |
2351 | lun->lun_sep->sep_tg_pt_secondary_write_md); | ||
2352 | } | 2289 | } |
2353 | 2290 | ||
2354 | ssize_t core_alua_store_secondary_write_metadata( | 2291 | ssize_t core_alua_store_secondary_write_metadata( |
@@ -2369,7 +2306,7 @@ ssize_t core_alua_store_secondary_write_metadata( | |||
2369 | " %lu\n", tmp); | 2306 | " %lu\n", tmp); |
2370 | return -EINVAL; | 2307 | return -EINVAL; |
2371 | } | 2308 | } |
2372 | lun->lun_sep->sep_tg_pt_secondary_write_md = (int)tmp; | 2309 | lun->lun_tg_pt_secondary_write_md = (int)tmp; |
2373 | 2310 | ||
2374 | return count; | 2311 | return count; |
2375 | } | 2312 | } |
diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h index 0a7d65e80404..9b250f9b33bf 100644 --- a/drivers/target/target_core_alua.h +++ b/drivers/target/target_core_alua.h | |||
@@ -85,7 +85,6 @@ | |||
85 | extern struct kmem_cache *t10_alua_lu_gp_cache; | 85 | extern struct kmem_cache *t10_alua_lu_gp_cache; |
86 | extern struct kmem_cache *t10_alua_lu_gp_mem_cache; | 86 | extern struct kmem_cache *t10_alua_lu_gp_mem_cache; |
87 | extern struct kmem_cache *t10_alua_tg_pt_gp_cache; | 87 | extern struct kmem_cache *t10_alua_tg_pt_gp_cache; |
88 | extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; | ||
89 | extern struct kmem_cache *t10_alua_lba_map_cache; | 88 | extern struct kmem_cache *t10_alua_lba_map_cache; |
90 | extern struct kmem_cache *t10_alua_lba_map_mem_cache; | 89 | extern struct kmem_cache *t10_alua_lba_map_mem_cache; |
91 | 90 | ||
@@ -94,7 +93,7 @@ extern sense_reason_t target_emulate_set_target_port_groups(struct se_cmd *); | |||
94 | extern sense_reason_t target_emulate_report_referrals(struct se_cmd *); | 93 | extern sense_reason_t target_emulate_report_referrals(struct se_cmd *); |
95 | extern int core_alua_check_nonop_delay(struct se_cmd *); | 94 | extern int core_alua_check_nonop_delay(struct se_cmd *); |
96 | extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *, | 95 | extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *, |
97 | struct se_device *, struct se_port *, | 96 | struct se_device *, struct se_lun *, |
98 | struct se_node_acl *, int, int); | 97 | struct se_node_acl *, int, int); |
99 | extern char *core_alua_dump_status(int); | 98 | extern char *core_alua_dump_status(int); |
100 | extern struct t10_alua_lba_map *core_alua_allocate_lba_map( | 99 | extern struct t10_alua_lba_map *core_alua_allocate_lba_map( |
@@ -117,14 +116,11 @@ extern void core_alua_drop_lu_gp_dev(struct se_device *); | |||
117 | extern struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp( | 116 | extern struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp( |
118 | struct se_device *, const char *, int); | 117 | struct se_device *, const char *, int); |
119 | extern int core_alua_set_tg_pt_gp_id(struct t10_alua_tg_pt_gp *, u16); | 118 | extern int core_alua_set_tg_pt_gp_id(struct t10_alua_tg_pt_gp *, u16); |
120 | extern struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem( | ||
121 | struct se_port *); | ||
122 | extern void core_alua_free_tg_pt_gp(struct t10_alua_tg_pt_gp *); | 119 | extern void core_alua_free_tg_pt_gp(struct t10_alua_tg_pt_gp *); |
123 | extern void core_alua_free_tg_pt_gp_mem(struct se_port *); | 120 | extern void target_detach_tg_pt_gp(struct se_lun *); |
124 | extern void __core_alua_attach_tg_pt_gp_mem(struct t10_alua_tg_pt_gp_member *, | 121 | extern void target_attach_tg_pt_gp(struct se_lun *, struct t10_alua_tg_pt_gp *); |
125 | struct t10_alua_tg_pt_gp *); | 122 | extern ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *, char *); |
126 | extern ssize_t core_alua_show_tg_pt_gp_info(struct se_port *, char *); | 123 | extern ssize_t core_alua_store_tg_pt_gp_info(struct se_lun *, const char *, |
127 | extern ssize_t core_alua_store_tg_pt_gp_info(struct se_port *, const char *, | ||
128 | size_t); | 124 | size_t); |
129 | extern ssize_t core_alua_show_access_type(struct t10_alua_tg_pt_gp *, char *); | 125 | extern ssize_t core_alua_show_access_type(struct t10_alua_tg_pt_gp *, char *); |
130 | extern ssize_t core_alua_store_access_type(struct t10_alua_tg_pt_gp *, | 126 | extern ssize_t core_alua_store_access_type(struct t10_alua_tg_pt_gp *, |
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index e7b0430a0575..0b0de3647478 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c | |||
@@ -41,7 +41,6 @@ | |||
41 | #include <target/target_core_backend.h> | 41 | #include <target/target_core_backend.h> |
42 | #include <target/target_core_fabric.h> | 42 | #include <target/target_core_fabric.h> |
43 | #include <target/target_core_fabric_configfs.h> | 43 | #include <target/target_core_fabric_configfs.h> |
44 | #include <target/target_core_configfs.h> | ||
45 | #include <target/configfs_macros.h> | 44 | #include <target/configfs_macros.h> |
46 | 45 | ||
47 | #include "target_core_internal.h" | 46 | #include "target_core_internal.h" |
@@ -51,15 +50,26 @@ | |||
51 | #include "target_core_xcopy.h" | 50 | #include "target_core_xcopy.h" |
52 | 51 | ||
53 | #define TB_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \ | 52 | #define TB_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \ |
54 | static void target_core_setup_##_name##_cit(struct se_subsystem_api *sa) \ | 53 | static void target_core_setup_##_name##_cit(struct target_backend *tb) \ |
55 | { \ | 54 | { \ |
56 | struct target_backend_cits *tbc = &sa->tb_cits; \ | 55 | struct config_item_type *cit = &tb->tb_##_name##_cit; \ |
57 | struct config_item_type *cit = &tbc->tb_##_name##_cit; \ | ||
58 | \ | 56 | \ |
59 | cit->ct_item_ops = _item_ops; \ | 57 | cit->ct_item_ops = _item_ops; \ |
60 | cit->ct_group_ops = _group_ops; \ | 58 | cit->ct_group_ops = _group_ops; \ |
61 | cit->ct_attrs = _attrs; \ | 59 | cit->ct_attrs = _attrs; \ |
62 | cit->ct_owner = sa->owner; \ | 60 | cit->ct_owner = tb->ops->owner; \ |
61 | pr_debug("Setup generic %s\n", __stringify(_name)); \ | ||
62 | } | ||
63 | |||
64 | #define TB_CIT_SETUP_DRV(_name, _item_ops, _group_ops) \ | ||
65 | static void target_core_setup_##_name##_cit(struct target_backend *tb) \ | ||
66 | { \ | ||
67 | struct config_item_type *cit = &tb->tb_##_name##_cit; \ | ||
68 | \ | ||
69 | cit->ct_item_ops = _item_ops; \ | ||
70 | cit->ct_group_ops = _group_ops; \ | ||
71 | cit->ct_attrs = tb->ops->tb_##_name##_attrs; \ | ||
72 | cit->ct_owner = tb->ops->owner; \ | ||
63 | pr_debug("Setup generic %s\n", __stringify(_name)); \ | 73 | pr_debug("Setup generic %s\n", __stringify(_name)); \ |
64 | } | 74 | } |
65 | 75 | ||
@@ -92,7 +102,7 @@ static ssize_t target_core_attr_show(struct config_item *item, | |||
92 | char *page) | 102 | char *page) |
93 | { | 103 | { |
94 | return sprintf(page, "Target Engine Core ConfigFS Infrastructure %s" | 104 | return sprintf(page, "Target Engine Core ConfigFS Infrastructure %s" |
95 | " on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_CONFIGFS_VERSION, | 105 | " on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_VERSION, |
96 | utsname()->sysname, utsname()->machine); | 106 | utsname()->sysname, utsname()->machine); |
97 | } | 107 | } |
98 | 108 | ||
@@ -116,7 +126,7 @@ static struct target_fabric_configfs *target_core_get_fabric( | |||
116 | 126 | ||
117 | mutex_lock(&g_tf_lock); | 127 | mutex_lock(&g_tf_lock); |
118 | list_for_each_entry(tf, &g_tf_list, tf_list) { | 128 | list_for_each_entry(tf, &g_tf_list, tf_list) { |
119 | if (!strcmp(tf->tf_name, name)) { | 129 | if (!strcmp(tf->tf_ops->name, name)) { |
120 | atomic_inc(&tf->tf_access_cnt); | 130 | atomic_inc(&tf->tf_access_cnt); |
121 | mutex_unlock(&g_tf_lock); | 131 | mutex_unlock(&g_tf_lock); |
122 | return tf; | 132 | return tf; |
@@ -193,29 +203,24 @@ static struct config_group *target_core_register_fabric( | |||
193 | return ERR_PTR(-EINVAL); | 203 | return ERR_PTR(-EINVAL); |
194 | } | 204 | } |
195 | pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:" | 205 | pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:" |
196 | " %s\n", tf->tf_name); | 206 | " %s\n", tf->tf_ops->name); |
197 | /* | 207 | /* |
198 | * On a successful target_core_get_fabric() look, the returned | 208 | * On a successful target_core_get_fabric() look, the returned |
199 | * struct target_fabric_configfs *tf will contain a usage reference. | 209 | * struct target_fabric_configfs *tf will contain a usage reference. |
200 | */ | 210 | */ |
201 | pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n", | 211 | pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n", |
202 | &tf->tf_cit_tmpl.tfc_wwn_cit); | 212 | &tf->tf_wwn_cit); |
203 | 213 | ||
204 | tf->tf_group.default_groups = tf->tf_default_groups; | 214 | tf->tf_group.default_groups = tf->tf_default_groups; |
205 | tf->tf_group.default_groups[0] = &tf->tf_disc_group; | 215 | tf->tf_group.default_groups[0] = &tf->tf_disc_group; |
206 | tf->tf_group.default_groups[1] = NULL; | 216 | tf->tf_group.default_groups[1] = NULL; |
207 | 217 | ||
208 | config_group_init_type_name(&tf->tf_group, name, | 218 | config_group_init_type_name(&tf->tf_group, name, &tf->tf_wwn_cit); |
209 | &tf->tf_cit_tmpl.tfc_wwn_cit); | ||
210 | config_group_init_type_name(&tf->tf_disc_group, "discovery_auth", | 219 | config_group_init_type_name(&tf->tf_disc_group, "discovery_auth", |
211 | &tf->tf_cit_tmpl.tfc_discovery_cit); | 220 | &tf->tf_discovery_cit); |
212 | 221 | ||
213 | pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:" | 222 | pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:" |
214 | " %s\n", tf->tf_group.cg_item.ci_name); | 223 | " %s\n", tf->tf_group.cg_item.ci_name); |
215 | tf->tf_fabric = &tf->tf_group.cg_item; | ||
216 | pr_debug("Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric" | ||
217 | " for %s\n", name); | ||
218 | |||
219 | return &tf->tf_group; | 224 | return &tf->tf_group; |
220 | } | 225 | } |
221 | 226 | ||
@@ -236,13 +241,9 @@ static void target_core_deregister_fabric( | |||
236 | " tf list\n", config_item_name(item)); | 241 | " tf list\n", config_item_name(item)); |
237 | 242 | ||
238 | pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:" | 243 | pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:" |
239 | " %s\n", tf->tf_name); | 244 | " %s\n", tf->tf_ops->name); |
240 | atomic_dec(&tf->tf_access_cnt); | 245 | atomic_dec(&tf->tf_access_cnt); |
241 | 246 | ||
242 | pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing" | ||
243 | " tf->tf_fabric for %s\n", tf->tf_name); | ||
244 | tf->tf_fabric = NULL; | ||
245 | |||
246 | pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci" | 247 | pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci" |
247 | " %s\n", config_item_name(item)); | 248 | " %s\n", config_item_name(item)); |
248 | 249 | ||
@@ -318,10 +319,6 @@ static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo) | |||
318 | pr_err("Missing tfo->get_fabric_name()\n"); | 319 | pr_err("Missing tfo->get_fabric_name()\n"); |
319 | return -EINVAL; | 320 | return -EINVAL; |
320 | } | 321 | } |
321 | if (!tfo->get_fabric_proto_ident) { | ||
322 | pr_err("Missing tfo->get_fabric_proto_ident()\n"); | ||
323 | return -EINVAL; | ||
324 | } | ||
325 | if (!tfo->tpg_get_wwn) { | 322 | if (!tfo->tpg_get_wwn) { |
326 | pr_err("Missing tfo->tpg_get_wwn()\n"); | 323 | pr_err("Missing tfo->tpg_get_wwn()\n"); |
327 | return -EINVAL; | 324 | return -EINVAL; |
@@ -330,18 +327,6 @@ static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo) | |||
330 | pr_err("Missing tfo->tpg_get_tag()\n"); | 327 | pr_err("Missing tfo->tpg_get_tag()\n"); |
331 | return -EINVAL; | 328 | return -EINVAL; |
332 | } | 329 | } |
333 | if (!tfo->tpg_get_default_depth) { | ||
334 | pr_err("Missing tfo->tpg_get_default_depth()\n"); | ||
335 | return -EINVAL; | ||
336 | } | ||
337 | if (!tfo->tpg_get_pr_transport_id) { | ||
338 | pr_err("Missing tfo->tpg_get_pr_transport_id()\n"); | ||
339 | return -EINVAL; | ||
340 | } | ||
341 | if (!tfo->tpg_get_pr_transport_id_len) { | ||
342 | pr_err("Missing tfo->tpg_get_pr_transport_id_len()\n"); | ||
343 | return -EINVAL; | ||
344 | } | ||
345 | if (!tfo->tpg_check_demo_mode) { | 330 | if (!tfo->tpg_check_demo_mode) { |
346 | pr_err("Missing tfo->tpg_check_demo_mode()\n"); | 331 | pr_err("Missing tfo->tpg_check_demo_mode()\n"); |
347 | return -EINVAL; | 332 | return -EINVAL; |
@@ -358,14 +343,6 @@ static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo) | |||
358 | pr_err("Missing tfo->tpg_check_prod_mode_write_protect()\n"); | 343 | pr_err("Missing tfo->tpg_check_prod_mode_write_protect()\n"); |
359 | return -EINVAL; | 344 | return -EINVAL; |
360 | } | 345 | } |
361 | if (!tfo->tpg_alloc_fabric_acl) { | ||
362 | pr_err("Missing tfo->tpg_alloc_fabric_acl()\n"); | ||
363 | return -EINVAL; | ||
364 | } | ||
365 | if (!tfo->tpg_release_fabric_acl) { | ||
366 | pr_err("Missing tfo->tpg_release_fabric_acl()\n"); | ||
367 | return -EINVAL; | ||
368 | } | ||
369 | if (!tfo->tpg_get_inst_index) { | 346 | if (!tfo->tpg_get_inst_index) { |
370 | pr_err("Missing tfo->tpg_get_inst_index()\n"); | 347 | pr_err("Missing tfo->tpg_get_inst_index()\n"); |
371 | return -EINVAL; | 348 | return -EINVAL; |
@@ -398,10 +375,6 @@ static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo) | |||
398 | pr_err("Missing tfo->set_default_node_attributes()\n"); | 375 | pr_err("Missing tfo->set_default_node_attributes()\n"); |
399 | return -EINVAL; | 376 | return -EINVAL; |
400 | } | 377 | } |
401 | if (!tfo->get_task_tag) { | ||
402 | pr_err("Missing tfo->get_task_tag()\n"); | ||
403 | return -EINVAL; | ||
404 | } | ||
405 | if (!tfo->get_cmd_state) { | 378 | if (!tfo->get_cmd_state) { |
406 | pr_err("Missing tfo->get_cmd_state()\n"); | 379 | pr_err("Missing tfo->get_cmd_state()\n"); |
407 | return -EINVAL; | 380 | return -EINVAL; |
@@ -464,15 +437,7 @@ int target_register_template(const struct target_core_fabric_ops *fo) | |||
464 | 437 | ||
465 | INIT_LIST_HEAD(&tf->tf_list); | 438 | INIT_LIST_HEAD(&tf->tf_list); |
466 | atomic_set(&tf->tf_access_cnt, 0); | 439 | atomic_set(&tf->tf_access_cnt, 0); |
467 | 440 | tf->tf_ops = fo; | |
468 | /* | ||
469 | * Setup the default generic struct config_item_type's (cits) in | ||
470 | * struct target_fabric_configfs->tf_cit_tmpl | ||
471 | */ | ||
472 | tf->tf_module = fo->module; | ||
473 | snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", fo->name); | ||
474 | |||
475 | tf->tf_ops = *fo; | ||
476 | target_fabric_setup_cits(tf); | 441 | target_fabric_setup_cits(tf); |
477 | 442 | ||
478 | mutex_lock(&g_tf_lock); | 443 | mutex_lock(&g_tf_lock); |
@@ -489,7 +454,7 @@ void target_unregister_template(const struct target_core_fabric_ops *fo) | |||
489 | 454 | ||
490 | mutex_lock(&g_tf_lock); | 455 | mutex_lock(&g_tf_lock); |
491 | list_for_each_entry(t, &g_tf_list, tf_list) { | 456 | list_for_each_entry(t, &g_tf_list, tf_list) { |
492 | if (!strcmp(t->tf_name, fo->name)) { | 457 | if (!strcmp(t->tf_ops->name, fo->name)) { |
493 | BUG_ON(atomic_read(&t->tf_access_cnt)); | 458 | BUG_ON(atomic_read(&t->tf_access_cnt)); |
494 | list_del(&t->tf_list); | 459 | list_del(&t->tf_list); |
495 | kfree(t); | 460 | kfree(t); |
@@ -505,16 +470,605 @@ EXPORT_SYMBOL(target_unregister_template); | |||
505 | //############################################################################*/ | 470 | //############################################################################*/ |
506 | 471 | ||
507 | /* Start functions for struct config_item_type tb_dev_attrib_cit */ | 472 | /* Start functions for struct config_item_type tb_dev_attrib_cit */ |
473 | #define DEF_TB_DEV_ATTRIB_SHOW(_name) \ | ||
474 | static ssize_t show_##_name(struct se_dev_attrib *da, char *page) \ | ||
475 | { \ | ||
476 | return snprintf(page, PAGE_SIZE, "%u\n", da->_name); \ | ||
477 | } | ||
478 | |||
479 | DEF_TB_DEV_ATTRIB_SHOW(emulate_model_alias); | ||
480 | DEF_TB_DEV_ATTRIB_SHOW(emulate_dpo); | ||
481 | DEF_TB_DEV_ATTRIB_SHOW(emulate_fua_write); | ||
482 | DEF_TB_DEV_ATTRIB_SHOW(emulate_fua_read); | ||
483 | DEF_TB_DEV_ATTRIB_SHOW(emulate_write_cache); | ||
484 | DEF_TB_DEV_ATTRIB_SHOW(emulate_ua_intlck_ctrl); | ||
485 | DEF_TB_DEV_ATTRIB_SHOW(emulate_tas); | ||
486 | DEF_TB_DEV_ATTRIB_SHOW(emulate_tpu); | ||
487 | DEF_TB_DEV_ATTRIB_SHOW(emulate_tpws); | ||
488 | DEF_TB_DEV_ATTRIB_SHOW(emulate_caw); | ||
489 | DEF_TB_DEV_ATTRIB_SHOW(emulate_3pc); | ||
490 | DEF_TB_DEV_ATTRIB_SHOW(pi_prot_type); | ||
491 | DEF_TB_DEV_ATTRIB_SHOW(hw_pi_prot_type); | ||
492 | DEF_TB_DEV_ATTRIB_SHOW(pi_prot_format); | ||
493 | DEF_TB_DEV_ATTRIB_SHOW(enforce_pr_isids); | ||
494 | DEF_TB_DEV_ATTRIB_SHOW(is_nonrot); | ||
495 | DEF_TB_DEV_ATTRIB_SHOW(emulate_rest_reord); | ||
496 | DEF_TB_DEV_ATTRIB_SHOW(force_pr_aptpl); | ||
497 | DEF_TB_DEV_ATTRIB_SHOW(hw_block_size); | ||
498 | DEF_TB_DEV_ATTRIB_SHOW(block_size); | ||
499 | DEF_TB_DEV_ATTRIB_SHOW(hw_max_sectors); | ||
500 | DEF_TB_DEV_ATTRIB_SHOW(optimal_sectors); | ||
501 | DEF_TB_DEV_ATTRIB_SHOW(hw_queue_depth); | ||
502 | DEF_TB_DEV_ATTRIB_SHOW(queue_depth); | ||
503 | DEF_TB_DEV_ATTRIB_SHOW(max_unmap_lba_count); | ||
504 | DEF_TB_DEV_ATTRIB_SHOW(max_unmap_block_desc_count); | ||
505 | DEF_TB_DEV_ATTRIB_SHOW(unmap_granularity); | ||
506 | DEF_TB_DEV_ATTRIB_SHOW(unmap_granularity_alignment); | ||
507 | DEF_TB_DEV_ATTRIB_SHOW(max_write_same_len); | ||
508 | |||
509 | #define DEF_TB_DEV_ATTRIB_STORE_U32(_name) \ | ||
510 | static ssize_t store_##_name(struct se_dev_attrib *da, const char *page,\ | ||
511 | size_t count) \ | ||
512 | { \ | ||
513 | u32 val; \ | ||
514 | int ret; \ | ||
515 | \ | ||
516 | ret = kstrtou32(page, 0, &val); \ | ||
517 | if (ret < 0) \ | ||
518 | return ret; \ | ||
519 | da->_name = val; \ | ||
520 | return count; \ | ||
521 | } | ||
522 | |||
523 | DEF_TB_DEV_ATTRIB_STORE_U32(max_unmap_lba_count); | ||
524 | DEF_TB_DEV_ATTRIB_STORE_U32(max_unmap_block_desc_count); | ||
525 | DEF_TB_DEV_ATTRIB_STORE_U32(unmap_granularity); | ||
526 | DEF_TB_DEV_ATTRIB_STORE_U32(unmap_granularity_alignment); | ||
527 | DEF_TB_DEV_ATTRIB_STORE_U32(max_write_same_len); | ||
528 | |||
529 | #define DEF_TB_DEV_ATTRIB_STORE_BOOL(_name) \ | ||
530 | static ssize_t store_##_name(struct se_dev_attrib *da, const char *page,\ | ||
531 | size_t count) \ | ||
532 | { \ | ||
533 | bool flag; \ | ||
534 | int ret; \ | ||
535 | \ | ||
536 | ret = strtobool(page, &flag); \ | ||
537 | if (ret < 0) \ | ||
538 | return ret; \ | ||
539 | da->_name = flag; \ | ||
540 | return count; \ | ||
541 | } | ||
542 | |||
543 | DEF_TB_DEV_ATTRIB_STORE_BOOL(emulate_fua_write); | ||
544 | DEF_TB_DEV_ATTRIB_STORE_BOOL(emulate_caw); | ||
545 | DEF_TB_DEV_ATTRIB_STORE_BOOL(emulate_3pc); | ||
546 | DEF_TB_DEV_ATTRIB_STORE_BOOL(enforce_pr_isids); | ||
547 | DEF_TB_DEV_ATTRIB_STORE_BOOL(is_nonrot); | ||
548 | |||
549 | #define DEF_TB_DEV_ATTRIB_STORE_STUB(_name) \ | ||
550 | static ssize_t store_##_name(struct se_dev_attrib *da, const char *page,\ | ||
551 | size_t count) \ | ||
552 | { \ | ||
553 | printk_once(KERN_WARNING \ | ||
554 | "ignoring deprecated ##_name## attribute\n"); \ | ||
555 | return count; \ | ||
556 | } | ||
557 | |||
558 | DEF_TB_DEV_ATTRIB_STORE_STUB(emulate_dpo); | ||
559 | DEF_TB_DEV_ATTRIB_STORE_STUB(emulate_fua_read); | ||
560 | |||
561 | static void dev_set_t10_wwn_model_alias(struct se_device *dev) | ||
562 | { | ||
563 | const char *configname; | ||
564 | |||
565 | configname = config_item_name(&dev->dev_group.cg_item); | ||
566 | if (strlen(configname) >= 16) { | ||
567 | pr_warn("dev[%p]: Backstore name '%s' is too long for " | ||
568 | "INQUIRY_MODEL, truncating to 16 bytes\n", dev, | ||
569 | configname); | ||
570 | } | ||
571 | snprintf(&dev->t10_wwn.model[0], 16, "%s", configname); | ||
572 | } | ||
573 | |||
574 | static ssize_t store_emulate_model_alias(struct se_dev_attrib *da, | ||
575 | const char *page, size_t count) | ||
576 | { | ||
577 | struct se_device *dev = da->da_dev; | ||
578 | bool flag; | ||
579 | int ret; | ||
580 | |||
581 | if (dev->export_count) { | ||
582 | pr_err("dev[%p]: Unable to change model alias" | ||
583 | " while export_count is %d\n", | ||
584 | dev, dev->export_count); | ||
585 | return -EINVAL; | ||
586 | } | ||
587 | |||
588 | ret = strtobool(page, &flag); | ||
589 | if (ret < 0) | ||
590 | return ret; | ||
591 | |||
592 | if (flag) { | ||
593 | dev_set_t10_wwn_model_alias(dev); | ||
594 | } else { | ||
595 | strncpy(&dev->t10_wwn.model[0], | ||
596 | dev->transport->inquiry_prod, 16); | ||
597 | } | ||
598 | da->emulate_model_alias = flag; | ||
599 | return count; | ||
600 | } | ||
601 | |||
602 | static ssize_t store_emulate_write_cache(struct se_dev_attrib *da, | ||
603 | const char *page, size_t count) | ||
604 | { | ||
605 | bool flag; | ||
606 | int ret; | ||
607 | |||
608 | ret = strtobool(page, &flag); | ||
609 | if (ret < 0) | ||
610 | return ret; | ||
611 | |||
612 | if (flag && da->da_dev->transport->get_write_cache) { | ||
613 | pr_err("emulate_write_cache not supported for this device\n"); | ||
614 | return -EINVAL; | ||
615 | } | ||
616 | |||
617 | da->emulate_write_cache = flag; | ||
618 | pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", | ||
619 | da->da_dev, flag); | ||
620 | return count; | ||
621 | } | ||
622 | |||
623 | static ssize_t store_emulate_ua_intlck_ctrl(struct se_dev_attrib *da, | ||
624 | const char *page, size_t count) | ||
625 | { | ||
626 | u32 val; | ||
627 | int ret; | ||
628 | |||
629 | ret = kstrtou32(page, 0, &val); | ||
630 | if (ret < 0) | ||
631 | return ret; | ||
632 | |||
633 | if (val != 0 && val != 1 && val != 2) { | ||
634 | pr_err("Illegal value %d\n", val); | ||
635 | return -EINVAL; | ||
636 | } | ||
637 | |||
638 | if (da->da_dev->export_count) { | ||
639 | pr_err("dev[%p]: Unable to change SE Device" | ||
640 | " UA_INTRLCK_CTRL while export_count is %d\n", | ||
641 | da->da_dev, da->da_dev->export_count); | ||
642 | return -EINVAL; | ||
643 | } | ||
644 | da->emulate_ua_intlck_ctrl = val; | ||
645 | pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", | ||
646 | da->da_dev, val); | ||
647 | return count; | ||
648 | } | ||
649 | |||
650 | static ssize_t store_emulate_tas(struct se_dev_attrib *da, | ||
651 | const char *page, size_t count) | ||
652 | { | ||
653 | bool flag; | ||
654 | int ret; | ||
655 | |||
656 | ret = strtobool(page, &flag); | ||
657 | if (ret < 0) | ||
658 | return ret; | ||
659 | |||
660 | if (da->da_dev->export_count) { | ||
661 | pr_err("dev[%p]: Unable to change SE Device TAS while" | ||
662 | " export_count is %d\n", | ||
663 | da->da_dev, da->da_dev->export_count); | ||
664 | return -EINVAL; | ||
665 | } | ||
666 | da->emulate_tas = flag; | ||
667 | pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n", | ||
668 | da->da_dev, flag ? "Enabled" : "Disabled"); | ||
669 | |||
670 | return count; | ||
671 | } | ||
672 | |||
673 | static ssize_t store_emulate_tpu(struct se_dev_attrib *da, | ||
674 | const char *page, size_t count) | ||
675 | { | ||
676 | bool flag; | ||
677 | int ret; | ||
678 | |||
679 | ret = strtobool(page, &flag); | ||
680 | if (ret < 0) | ||
681 | return ret; | ||
682 | |||
683 | /* | ||
684 | * We expect this value to be non-zero when generic Block Layer | ||
685 | * Discard supported is detected iblock_create_virtdevice(). | ||
686 | */ | ||
687 | if (flag && !da->max_unmap_block_desc_count) { | ||
688 | pr_err("Generic Block Discard not supported\n"); | ||
689 | return -ENOSYS; | ||
690 | } | ||
691 | |||
692 | da->emulate_tpu = flag; | ||
693 | pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", | ||
694 | da->da_dev, flag); | ||
695 | return count; | ||
696 | } | ||
697 | |||
698 | static ssize_t store_emulate_tpws(struct se_dev_attrib *da, | ||
699 | const char *page, size_t count) | ||
700 | { | ||
701 | bool flag; | ||
702 | int ret; | ||
703 | |||
704 | ret = strtobool(page, &flag); | ||
705 | if (ret < 0) | ||
706 | return ret; | ||
707 | |||
708 | /* | ||
709 | * We expect this value to be non-zero when generic Block Layer | ||
710 | * Discard supported is detected iblock_create_virtdevice(). | ||
711 | */ | ||
712 | if (flag && !da->max_unmap_block_desc_count) { | ||
713 | pr_err("Generic Block Discard not supported\n"); | ||
714 | return -ENOSYS; | ||
715 | } | ||
716 | |||
717 | da->emulate_tpws = flag; | ||
718 | pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", | ||
719 | da->da_dev, flag); | ||
720 | return count; | ||
721 | } | ||
722 | |||
723 | static ssize_t store_pi_prot_type(struct se_dev_attrib *da, | ||
724 | const char *page, size_t count) | ||
725 | { | ||
726 | int old_prot = da->pi_prot_type, ret; | ||
727 | struct se_device *dev = da->da_dev; | ||
728 | u32 flag; | ||
729 | |||
730 | ret = kstrtou32(page, 0, &flag); | ||
731 | if (ret < 0) | ||
732 | return ret; | ||
733 | |||
734 | if (flag != 0 && flag != 1 && flag != 2 && flag != 3) { | ||
735 | pr_err("Illegal value %d for pi_prot_type\n", flag); | ||
736 | return -EINVAL; | ||
737 | } | ||
738 | if (flag == 2) { | ||
739 | pr_err("DIF TYPE2 protection currently not supported\n"); | ||
740 | return -ENOSYS; | ||
741 | } | ||
742 | if (da->hw_pi_prot_type) { | ||
743 | pr_warn("DIF protection enabled on underlying hardware," | ||
744 | " ignoring\n"); | ||
745 | return count; | ||
746 | } | ||
747 | if (!dev->transport->init_prot || !dev->transport->free_prot) { | ||
748 | /* 0 is only allowed value for non-supporting backends */ | ||
749 | if (flag == 0) | ||
750 | return 0; | ||
751 | |||
752 | pr_err("DIF protection not supported by backend: %s\n", | ||
753 | dev->transport->name); | ||
754 | return -ENOSYS; | ||
755 | } | ||
756 | if (!(dev->dev_flags & DF_CONFIGURED)) { | ||
757 | pr_err("DIF protection requires device to be configured\n"); | ||
758 | return -ENODEV; | ||
759 | } | ||
760 | if (dev->export_count) { | ||
761 | pr_err("dev[%p]: Unable to change SE Device PROT type while" | ||
762 | " export_count is %d\n", dev, dev->export_count); | ||
763 | return -EINVAL; | ||
764 | } | ||
765 | |||
766 | da->pi_prot_type = flag; | ||
767 | |||
768 | if (flag && !old_prot) { | ||
769 | ret = dev->transport->init_prot(dev); | ||
770 | if (ret) { | ||
771 | da->pi_prot_type = old_prot; | ||
772 | return ret; | ||
773 | } | ||
774 | |||
775 | } else if (!flag && old_prot) { | ||
776 | dev->transport->free_prot(dev); | ||
777 | } | ||
778 | |||
779 | pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag); | ||
780 | return count; | ||
781 | } | ||
782 | |||
783 | static ssize_t store_pi_prot_format(struct se_dev_attrib *da, | ||
784 | const char *page, size_t count) | ||
785 | { | ||
786 | struct se_device *dev = da->da_dev; | ||
787 | bool flag; | ||
788 | int ret; | ||
789 | |||
790 | ret = strtobool(page, &flag); | ||
791 | if (ret < 0) | ||
792 | return ret; | ||
793 | |||
794 | if (!flag) | ||
795 | return count; | ||
796 | |||
797 | if (!dev->transport->format_prot) { | ||
798 | pr_err("DIF protection format not supported by backend %s\n", | ||
799 | dev->transport->name); | ||
800 | return -ENOSYS; | ||
801 | } | ||
802 | if (!(dev->dev_flags & DF_CONFIGURED)) { | ||
803 | pr_err("DIF protection format requires device to be configured\n"); | ||
804 | return -ENODEV; | ||
805 | } | ||
806 | if (dev->export_count) { | ||
807 | pr_err("dev[%p]: Unable to format SE Device PROT type while" | ||
808 | " export_count is %d\n", dev, dev->export_count); | ||
809 | return -EINVAL; | ||
810 | } | ||
811 | |||
812 | ret = dev->transport->format_prot(dev); | ||
813 | if (ret) | ||
814 | return ret; | ||
815 | |||
816 | pr_debug("dev[%p]: SE Device Protection Format complete\n", dev); | ||
817 | return count; | ||
818 | } | ||
819 | |||
820 | static ssize_t store_force_pr_aptpl(struct se_dev_attrib *da, | ||
821 | const char *page, size_t count) | ||
822 | { | ||
823 | bool flag; | ||
824 | int ret; | ||
825 | |||
826 | ret = strtobool(page, &flag); | ||
827 | if (ret < 0) | ||
828 | return ret; | ||
829 | if (da->da_dev->export_count) { | ||
830 | pr_err("dev[%p]: Unable to set force_pr_aptpl while" | ||
831 | " export_count is %d\n", | ||
832 | da->da_dev, da->da_dev->export_count); | ||
833 | return -EINVAL; | ||
834 | } | ||
835 | |||
836 | da->force_pr_aptpl = flag; | ||
837 | pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", da->da_dev, flag); | ||
838 | return count; | ||
839 | } | ||
840 | |||
841 | static ssize_t store_emulate_rest_reord(struct se_dev_attrib *da, | ||
842 | const char *page, size_t count) | ||
843 | { | ||
844 | bool flag; | ||
845 | int ret; | ||
846 | |||
847 | ret = strtobool(page, &flag); | ||
848 | if (ret < 0) | ||
849 | return ret; | ||
850 | |||
851 | if (flag != 0) { | ||
852 | printk(KERN_ERR "dev[%p]: SE Device emulation of restricted" | ||
853 | " reordering not implemented\n", da->da_dev); | ||
854 | return -ENOSYS; | ||
855 | } | ||
856 | da->emulate_rest_reord = flag; | ||
857 | pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", | ||
858 | da->da_dev, flag); | ||
859 | return count; | ||
860 | } | ||
861 | |||
862 | /* | ||
863 | * Note, this can only be called on unexported SE Device Object. | ||
864 | */ | ||
865 | static ssize_t store_queue_depth(struct se_dev_attrib *da, | ||
866 | const char *page, size_t count) | ||
867 | { | ||
868 | struct se_device *dev = da->da_dev; | ||
869 | u32 val; | ||
870 | int ret; | ||
871 | |||
872 | ret = kstrtou32(page, 0, &val); | ||
873 | if (ret < 0) | ||
874 | return ret; | ||
875 | |||
876 | if (dev->export_count) { | ||
877 | pr_err("dev[%p]: Unable to change SE Device TCQ while" | ||
878 | " export_count is %d\n", | ||
879 | dev, dev->export_count); | ||
880 | return -EINVAL; | ||
881 | } | ||
882 | if (!val) { | ||
883 | pr_err("dev[%p]: Illegal ZERO value for queue_depth\n", dev); | ||
884 | return -EINVAL; | ||
885 | } | ||
886 | |||
887 | if (val > dev->dev_attrib.queue_depth) { | ||
888 | if (val > dev->dev_attrib.hw_queue_depth) { | ||
889 | pr_err("dev[%p]: Passed queue_depth:" | ||
890 | " %u exceeds TCM/SE_Device MAX" | ||
891 | " TCQ: %u\n", dev, val, | ||
892 | dev->dev_attrib.hw_queue_depth); | ||
893 | return -EINVAL; | ||
894 | } | ||
895 | } | ||
896 | da->queue_depth = dev->queue_depth = val; | ||
897 | pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", dev, val); | ||
898 | return count; | ||
899 | } | ||
900 | |||
901 | static ssize_t store_optimal_sectors(struct se_dev_attrib *da, | ||
902 | const char *page, size_t count) | ||
903 | { | ||
904 | u32 val; | ||
905 | int ret; | ||
906 | |||
907 | ret = kstrtou32(page, 0, &val); | ||
908 | if (ret < 0) | ||
909 | return ret; | ||
910 | |||
911 | if (da->da_dev->export_count) { | ||
912 | pr_err("dev[%p]: Unable to change SE Device" | ||
913 | " optimal_sectors while export_count is %d\n", | ||
914 | da->da_dev, da->da_dev->export_count); | ||
915 | return -EINVAL; | ||
916 | } | ||
917 | if (val > da->hw_max_sectors) { | ||
918 | pr_err("dev[%p]: Passed optimal_sectors %u cannot be" | ||
919 | " greater than hw_max_sectors: %u\n", | ||
920 | da->da_dev, val, da->hw_max_sectors); | ||
921 | return -EINVAL; | ||
922 | } | ||
923 | |||
924 | da->optimal_sectors = val; | ||
925 | pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n", | ||
926 | da->da_dev, val); | ||
927 | return count; | ||
928 | } | ||
929 | |||
930 | static ssize_t store_block_size(struct se_dev_attrib *da, | ||
931 | const char *page, size_t count) | ||
932 | { | ||
933 | u32 val; | ||
934 | int ret; | ||
935 | |||
936 | ret = kstrtou32(page, 0, &val); | ||
937 | if (ret < 0) | ||
938 | return ret; | ||
939 | |||
940 | if (da->da_dev->export_count) { | ||
941 | pr_err("dev[%p]: Unable to change SE Device block_size" | ||
942 | " while export_count is %d\n", | ||
943 | da->da_dev, da->da_dev->export_count); | ||
944 | return -EINVAL; | ||
945 | } | ||
946 | |||
947 | if (val != 512 && val != 1024 && val != 2048 && val != 4096) { | ||
948 | pr_err("dev[%p]: Illegal value for block_device: %u" | ||
949 | " for SE device, must be 512, 1024, 2048 or 4096\n", | ||
950 | da->da_dev, val); | ||
951 | return -EINVAL; | ||
952 | } | ||
953 | |||
954 | da->block_size = val; | ||
955 | if (da->max_bytes_per_io) | ||
956 | da->hw_max_sectors = da->max_bytes_per_io / val; | ||
957 | |||
958 | pr_debug("dev[%p]: SE Device block_size changed to %u\n", | ||
959 | da->da_dev, val); | ||
960 | return count; | ||
961 | } | ||
962 | |||
963 | CONFIGFS_EATTR_STRUCT(target_backend_dev_attrib, se_dev_attrib); | ||
964 | #define TB_DEV_ATTR(_backend, _name, _mode) \ | ||
965 | static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name = \ | ||
966 | __CONFIGFS_EATTR(_name, _mode, \ | ||
967 | show_##_name, \ | ||
968 | store_##_name); | ||
969 | |||
970 | #define TB_DEV_ATTR_RO(_backend, _name) \ | ||
971 | static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name = \ | ||
972 | __CONFIGFS_EATTR_RO(_name, \ | ||
973 | show_##_name); | ||
974 | |||
975 | TB_DEV_ATTR(target_core, emulate_model_alias, S_IRUGO | S_IWUSR); | ||
976 | TB_DEV_ATTR(target_core, emulate_dpo, S_IRUGO | S_IWUSR); | ||
977 | TB_DEV_ATTR(target_core, emulate_fua_write, S_IRUGO | S_IWUSR); | ||
978 | TB_DEV_ATTR(target_core, emulate_fua_read, S_IRUGO | S_IWUSR); | ||
979 | TB_DEV_ATTR(target_core, emulate_write_cache, S_IRUGO | S_IWUSR); | ||
980 | TB_DEV_ATTR(target_core, emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR); | ||
981 | TB_DEV_ATTR(target_core, emulate_tas, S_IRUGO | S_IWUSR); | ||
982 | TB_DEV_ATTR(target_core, emulate_tpu, S_IRUGO | S_IWUSR); | ||
983 | TB_DEV_ATTR(target_core, emulate_tpws, S_IRUGO | S_IWUSR); | ||
984 | TB_DEV_ATTR(target_core, emulate_caw, S_IRUGO | S_IWUSR); | ||
985 | TB_DEV_ATTR(target_core, emulate_3pc, S_IRUGO | S_IWUSR); | ||
986 | TB_DEV_ATTR(target_core, pi_prot_type, S_IRUGO | S_IWUSR); | ||
987 | TB_DEV_ATTR_RO(target_core, hw_pi_prot_type); | ||
988 | TB_DEV_ATTR(target_core, pi_prot_format, S_IRUGO | S_IWUSR); | ||
989 | TB_DEV_ATTR(target_core, enforce_pr_isids, S_IRUGO | S_IWUSR); | ||
990 | TB_DEV_ATTR(target_core, is_nonrot, S_IRUGO | S_IWUSR); | ||
991 | TB_DEV_ATTR(target_core, emulate_rest_reord, S_IRUGO | S_IWUSR); | ||
992 | TB_DEV_ATTR(target_core, force_pr_aptpl, S_IRUGO | S_IWUSR) | ||
993 | TB_DEV_ATTR_RO(target_core, hw_block_size); | ||
994 | TB_DEV_ATTR(target_core, block_size, S_IRUGO | S_IWUSR) | ||
995 | TB_DEV_ATTR_RO(target_core, hw_max_sectors); | ||
996 | TB_DEV_ATTR(target_core, optimal_sectors, S_IRUGO | S_IWUSR); | ||
997 | TB_DEV_ATTR_RO(target_core, hw_queue_depth); | ||
998 | TB_DEV_ATTR(target_core, queue_depth, S_IRUGO | S_IWUSR); | ||
999 | TB_DEV_ATTR(target_core, max_unmap_lba_count, S_IRUGO | S_IWUSR); | ||
1000 | TB_DEV_ATTR(target_core, max_unmap_block_desc_count, S_IRUGO | S_IWUSR); | ||
1001 | TB_DEV_ATTR(target_core, unmap_granularity, S_IRUGO | S_IWUSR); | ||
1002 | TB_DEV_ATTR(target_core, unmap_granularity_alignment, S_IRUGO | S_IWUSR); | ||
1003 | TB_DEV_ATTR(target_core, max_write_same_len, S_IRUGO | S_IWUSR); | ||
508 | 1004 | ||
509 | CONFIGFS_EATTR_STRUCT(target_core_dev_attrib, se_dev_attrib); | 1005 | CONFIGFS_EATTR_STRUCT(target_core_dev_attrib, se_dev_attrib); |
510 | CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group); | 1006 | CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group); |
511 | 1007 | ||
1008 | /* | ||
1009 | * dev_attrib attributes for devices using the target core SBC/SPC | ||
1010 | * interpreter. Any backend using spc_parse_cdb should be using | ||
1011 | * these. | ||
1012 | */ | ||
1013 | struct configfs_attribute *sbc_attrib_attrs[] = { | ||
1014 | &target_core_dev_attrib_emulate_model_alias.attr, | ||
1015 | &target_core_dev_attrib_emulate_dpo.attr, | ||
1016 | &target_core_dev_attrib_emulate_fua_write.attr, | ||
1017 | &target_core_dev_attrib_emulate_fua_read.attr, | ||
1018 | &target_core_dev_attrib_emulate_write_cache.attr, | ||
1019 | &target_core_dev_attrib_emulate_ua_intlck_ctrl.attr, | ||
1020 | &target_core_dev_attrib_emulate_tas.attr, | ||
1021 | &target_core_dev_attrib_emulate_tpu.attr, | ||
1022 | &target_core_dev_attrib_emulate_tpws.attr, | ||
1023 | &target_core_dev_attrib_emulate_caw.attr, | ||
1024 | &target_core_dev_attrib_emulate_3pc.attr, | ||
1025 | &target_core_dev_attrib_pi_prot_type.attr, | ||
1026 | &target_core_dev_attrib_hw_pi_prot_type.attr, | ||
1027 | &target_core_dev_attrib_pi_prot_format.attr, | ||
1028 | &target_core_dev_attrib_enforce_pr_isids.attr, | ||
1029 | &target_core_dev_attrib_is_nonrot.attr, | ||
1030 | &target_core_dev_attrib_emulate_rest_reord.attr, | ||
1031 | &target_core_dev_attrib_force_pr_aptpl.attr, | ||
1032 | &target_core_dev_attrib_hw_block_size.attr, | ||
1033 | &target_core_dev_attrib_block_size.attr, | ||
1034 | &target_core_dev_attrib_hw_max_sectors.attr, | ||
1035 | &target_core_dev_attrib_optimal_sectors.attr, | ||
1036 | &target_core_dev_attrib_hw_queue_depth.attr, | ||
1037 | &target_core_dev_attrib_queue_depth.attr, | ||
1038 | &target_core_dev_attrib_max_unmap_lba_count.attr, | ||
1039 | &target_core_dev_attrib_max_unmap_block_desc_count.attr, | ||
1040 | &target_core_dev_attrib_unmap_granularity.attr, | ||
1041 | &target_core_dev_attrib_unmap_granularity_alignment.attr, | ||
1042 | &target_core_dev_attrib_max_write_same_len.attr, | ||
1043 | NULL, | ||
1044 | }; | ||
1045 | EXPORT_SYMBOL(sbc_attrib_attrs); | ||
1046 | |||
1047 | TB_DEV_ATTR_RO(target_pt, hw_pi_prot_type); | ||
1048 | TB_DEV_ATTR_RO(target_pt, hw_block_size); | ||
1049 | TB_DEV_ATTR_RO(target_pt, hw_max_sectors); | ||
1050 | TB_DEV_ATTR_RO(target_pt, hw_queue_depth); | ||
1051 | |||
1052 | /* | ||
1053 | * Minimal dev_attrib attributes for devices passing through CDBs. | ||
1054 | * In this case we only provide a few read-only attributes for | ||
1055 | * backwards compatibility. | ||
1056 | */ | ||
1057 | struct configfs_attribute *passthrough_attrib_attrs[] = { | ||
1058 | &target_pt_dev_attrib_hw_pi_prot_type.attr, | ||
1059 | &target_pt_dev_attrib_hw_block_size.attr, | ||
1060 | &target_pt_dev_attrib_hw_max_sectors.attr, | ||
1061 | &target_pt_dev_attrib_hw_queue_depth.attr, | ||
1062 | NULL, | ||
1063 | }; | ||
1064 | EXPORT_SYMBOL(passthrough_attrib_attrs); | ||
1065 | |||
512 | static struct configfs_item_operations target_core_dev_attrib_ops = { | 1066 | static struct configfs_item_operations target_core_dev_attrib_ops = { |
513 | .show_attribute = target_core_dev_attrib_attr_show, | 1067 | .show_attribute = target_core_dev_attrib_attr_show, |
514 | .store_attribute = target_core_dev_attrib_attr_store, | 1068 | .store_attribute = target_core_dev_attrib_attr_store, |
515 | }; | 1069 | }; |
516 | 1070 | ||
517 | TB_CIT_SETUP(dev_attrib, &target_core_dev_attrib_ops, NULL, NULL); | 1071 | TB_CIT_SETUP_DRV(dev_attrib, &target_core_dev_attrib_ops, NULL); |
518 | 1072 | ||
519 | /* End functions for struct config_item_type tb_dev_attrib_cit */ | 1073 | /* End functions for struct config_item_type tb_dev_attrib_cit */ |
520 | 1074 | ||
@@ -862,7 +1416,6 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port( | |||
862 | struct se_device *dev, char *page) | 1416 | struct se_device *dev, char *page) |
863 | { | 1417 | { |
864 | struct se_node_acl *se_nacl; | 1418 | struct se_node_acl *se_nacl; |
865 | struct se_lun *lun; | ||
866 | struct se_portal_group *se_tpg; | 1419 | struct se_portal_group *se_tpg; |
867 | struct t10_pr_registration *pr_reg; | 1420 | struct t10_pr_registration *pr_reg; |
868 | const struct target_core_fabric_ops *tfo; | 1421 | const struct target_core_fabric_ops *tfo; |
@@ -877,7 +1430,6 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port( | |||
877 | 1430 | ||
878 | se_nacl = pr_reg->pr_reg_nacl; | 1431 | se_nacl = pr_reg->pr_reg_nacl; |
879 | se_tpg = se_nacl->se_tpg; | 1432 | se_tpg = se_nacl->se_tpg; |
880 | lun = pr_reg->pr_reg_tg_pt_lun; | ||
881 | tfo = se_tpg->se_tpg_tfo; | 1433 | tfo = se_tpg->se_tpg_tfo; |
882 | 1434 | ||
883 | len += sprintf(page+len, "SPC-3 Reservation: %s" | 1435 | len += sprintf(page+len, "SPC-3 Reservation: %s" |
@@ -885,9 +1437,9 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port( | |||
885 | tfo->tpg_get_wwn(se_tpg)); | 1437 | tfo->tpg_get_wwn(se_tpg)); |
886 | len += sprintf(page+len, "SPC-3 Reservation: Relative Port" | 1438 | len += sprintf(page+len, "SPC-3 Reservation: Relative Port" |
887 | " Identifier Tag: %hu %s Portal Group Tag: %hu" | 1439 | " Identifier Tag: %hu %s Portal Group Tag: %hu" |
888 | " %s Logical Unit: %u\n", lun->lun_sep->sep_rtpi, | 1440 | " %s Logical Unit: %llu\n", pr_reg->tg_pt_sep_rtpi, |
889 | tfo->get_fabric_name(), tfo->tpg_get_tag(se_tpg), | 1441 | tfo->get_fabric_name(), tfo->tpg_get_tag(se_tpg), |
890 | tfo->get_fabric_name(), lun->unpacked_lun); | 1442 | tfo->get_fabric_name(), pr_reg->pr_aptpl_target_lun); |
891 | 1443 | ||
892 | out_unlock: | 1444 | out_unlock: |
893 | spin_unlock(&dev->dev_reservation_lock); | 1445 | spin_unlock(&dev->dev_reservation_lock); |
@@ -1012,12 +1564,12 @@ static match_table_t tokens = { | |||
1012 | {Opt_res_type, "res_type=%d"}, | 1564 | {Opt_res_type, "res_type=%d"}, |
1013 | {Opt_res_scope, "res_scope=%d"}, | 1565 | {Opt_res_scope, "res_scope=%d"}, |
1014 | {Opt_res_all_tg_pt, "res_all_tg_pt=%d"}, | 1566 | {Opt_res_all_tg_pt, "res_all_tg_pt=%d"}, |
1015 | {Opt_mapped_lun, "mapped_lun=%d"}, | 1567 | {Opt_mapped_lun, "mapped_lun=%lld"}, |
1016 | {Opt_target_fabric, "target_fabric=%s"}, | 1568 | {Opt_target_fabric, "target_fabric=%s"}, |
1017 | {Opt_target_node, "target_node=%s"}, | 1569 | {Opt_target_node, "target_node=%s"}, |
1018 | {Opt_tpgt, "tpgt=%d"}, | 1570 | {Opt_tpgt, "tpgt=%d"}, |
1019 | {Opt_port_rtpi, "port_rtpi=%d"}, | 1571 | {Opt_port_rtpi, "port_rtpi=%d"}, |
1020 | {Opt_target_lun, "target_lun=%d"}, | 1572 | {Opt_target_lun, "target_lun=%lld"}, |
1021 | {Opt_err, NULL} | 1573 | {Opt_err, NULL} |
1022 | }; | 1574 | }; |
1023 | 1575 | ||
@@ -1032,10 +1584,10 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( | |||
1032 | substring_t args[MAX_OPT_ARGS]; | 1584 | substring_t args[MAX_OPT_ARGS]; |
1033 | unsigned long long tmp_ll; | 1585 | unsigned long long tmp_ll; |
1034 | u64 sa_res_key = 0; | 1586 | u64 sa_res_key = 0; |
1035 | u32 mapped_lun = 0, target_lun = 0; | 1587 | u64 mapped_lun = 0, target_lun = 0; |
1036 | int ret = -1, res_holder = 0, all_tg_pt = 0, arg, token; | 1588 | int ret = -1, res_holder = 0, all_tg_pt = 0, arg, token; |
1037 | u16 port_rpti = 0, tpgt = 0; | 1589 | u16 tpgt = 0; |
1038 | u8 type = 0, scope; | 1590 | u8 type = 0; |
1039 | 1591 | ||
1040 | if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) | 1592 | if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) |
1041 | return 0; | 1593 | return 0; |
@@ -1115,7 +1667,6 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( | |||
1115 | break; | 1667 | break; |
1116 | case Opt_res_scope: | 1668 | case Opt_res_scope: |
1117 | match_int(args, &arg); | 1669 | match_int(args, &arg); |
1118 | scope = (u8)arg; | ||
1119 | break; | 1670 | break; |
1120 | case Opt_res_all_tg_pt: | 1671 | case Opt_res_all_tg_pt: |
1121 | match_int(args, &arg); | 1672 | match_int(args, &arg); |
@@ -1123,7 +1674,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( | |||
1123 | break; | 1674 | break; |
1124 | case Opt_mapped_lun: | 1675 | case Opt_mapped_lun: |
1125 | match_int(args, &arg); | 1676 | match_int(args, &arg); |
1126 | mapped_lun = (u32)arg; | 1677 | mapped_lun = (u64)arg; |
1127 | break; | 1678 | break; |
1128 | /* | 1679 | /* |
1129 | * PR APTPL Metadata for Target Port | 1680 | * PR APTPL Metadata for Target Port |
@@ -1155,11 +1706,10 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( | |||
1155 | break; | 1706 | break; |
1156 | case Opt_port_rtpi: | 1707 | case Opt_port_rtpi: |
1157 | match_int(args, &arg); | 1708 | match_int(args, &arg); |
1158 | port_rpti = (u16)arg; | ||
1159 | break; | 1709 | break; |
1160 | case Opt_target_lun: | 1710 | case Opt_target_lun: |
1161 | match_int(args, &arg); | 1711 | match_int(args, &arg); |
1162 | target_lun = (u32)arg; | 1712 | target_lun = (u64)arg; |
1163 | break; | 1713 | break; |
1164 | default: | 1714 | default: |
1165 | break; | 1715 | break; |
@@ -1223,13 +1773,13 @@ TB_CIT_SETUP(dev_pr, &target_core_dev_pr_ops, NULL, target_core_dev_pr_attrs); | |||
1223 | static ssize_t target_core_show_dev_info(void *p, char *page) | 1773 | static ssize_t target_core_show_dev_info(void *p, char *page) |
1224 | { | 1774 | { |
1225 | struct se_device *dev = p; | 1775 | struct se_device *dev = p; |
1226 | struct se_subsystem_api *t = dev->transport; | ||
1227 | int bl = 0; | 1776 | int bl = 0; |
1228 | ssize_t read_bytes = 0; | 1777 | ssize_t read_bytes = 0; |
1229 | 1778 | ||
1230 | transport_dump_dev_state(dev, page, &bl); | 1779 | transport_dump_dev_state(dev, page, &bl); |
1231 | read_bytes += bl; | 1780 | read_bytes += bl; |
1232 | read_bytes += t->show_configfs_dev_params(dev, page+read_bytes); | 1781 | read_bytes += dev->transport->show_configfs_dev_params(dev, |
1782 | page+read_bytes); | ||
1233 | return read_bytes; | 1783 | return read_bytes; |
1234 | } | 1784 | } |
1235 | 1785 | ||
@@ -1247,9 +1797,8 @@ static ssize_t target_core_store_dev_control( | |||
1247 | size_t count) | 1797 | size_t count) |
1248 | { | 1798 | { |
1249 | struct se_device *dev = p; | 1799 | struct se_device *dev = p; |
1250 | struct se_subsystem_api *t = dev->transport; | ||
1251 | 1800 | ||
1252 | return t->set_configfs_dev_params(dev, page, count); | 1801 | return dev->transport->set_configfs_dev_params(dev, page, count); |
1253 | } | 1802 | } |
1254 | 1803 | ||
1255 | static struct target_core_configfs_attribute target_core_attr_dev_control = { | 1804 | static struct target_core_configfs_attribute target_core_attr_dev_control = { |
@@ -2339,21 +2888,16 @@ static ssize_t target_core_alua_tg_pt_gp_show_attr_members( | |||
2339 | struct t10_alua_tg_pt_gp *tg_pt_gp, | 2888 | struct t10_alua_tg_pt_gp *tg_pt_gp, |
2340 | char *page) | 2889 | char *page) |
2341 | { | 2890 | { |
2342 | struct se_port *port; | ||
2343 | struct se_portal_group *tpg; | ||
2344 | struct se_lun *lun; | 2891 | struct se_lun *lun; |
2345 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; | ||
2346 | ssize_t len = 0, cur_len; | 2892 | ssize_t len = 0, cur_len; |
2347 | unsigned char buf[TG_PT_GROUP_NAME_BUF]; | 2893 | unsigned char buf[TG_PT_GROUP_NAME_BUF]; |
2348 | 2894 | ||
2349 | memset(buf, 0, TG_PT_GROUP_NAME_BUF); | 2895 | memset(buf, 0, TG_PT_GROUP_NAME_BUF); |
2350 | 2896 | ||
2351 | spin_lock(&tg_pt_gp->tg_pt_gp_lock); | 2897 | spin_lock(&tg_pt_gp->tg_pt_gp_lock); |
2352 | list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list, | 2898 | list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list, |
2353 | tg_pt_gp_mem_list) { | 2899 | lun_tg_pt_gp_link) { |
2354 | port = tg_pt_gp_mem->tg_pt; | 2900 | struct se_portal_group *tpg = lun->lun_tpg; |
2355 | tpg = port->sep_tpg; | ||
2356 | lun = port->sep_lun; | ||
2357 | 2901 | ||
2358 | cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu" | 2902 | cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu" |
2359 | "/%s\n", tpg->se_tpg_tfo->get_fabric_name(), | 2903 | "/%s\n", tpg->se_tpg_tfo->get_fabric_name(), |
@@ -2526,9 +3070,9 @@ static struct config_group *target_core_make_subdev( | |||
2526 | const char *name) | 3070 | const char *name) |
2527 | { | 3071 | { |
2528 | struct t10_alua_tg_pt_gp *tg_pt_gp; | 3072 | struct t10_alua_tg_pt_gp *tg_pt_gp; |
2529 | struct se_subsystem_api *t; | ||
2530 | struct config_item *hba_ci = &group->cg_item; | 3073 | struct config_item *hba_ci = &group->cg_item; |
2531 | struct se_hba *hba = item_to_hba(hba_ci); | 3074 | struct se_hba *hba = item_to_hba(hba_ci); |
3075 | struct target_backend *tb = hba->backend; | ||
2532 | struct se_device *dev; | 3076 | struct se_device *dev; |
2533 | struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL; | 3077 | struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL; |
2534 | struct config_group *dev_stat_grp = NULL; | 3078 | struct config_group *dev_stat_grp = NULL; |
@@ -2537,10 +3081,6 @@ static struct config_group *target_core_make_subdev( | |||
2537 | ret = mutex_lock_interruptible(&hba->hba_access_mutex); | 3081 | ret = mutex_lock_interruptible(&hba->hba_access_mutex); |
2538 | if (ret) | 3082 | if (ret) |
2539 | return ERR_PTR(ret); | 3083 | return ERR_PTR(ret); |
2540 | /* | ||
2541 | * Locate the struct se_subsystem_api from parent's struct se_hba. | ||
2542 | */ | ||
2543 | t = hba->transport; | ||
2544 | 3084 | ||
2545 | dev = target_alloc_device(hba, name); | 3085 | dev = target_alloc_device(hba, name); |
2546 | if (!dev) | 3086 | if (!dev) |
@@ -2553,17 +3093,17 @@ static struct config_group *target_core_make_subdev( | |||
2553 | if (!dev_cg->default_groups) | 3093 | if (!dev_cg->default_groups) |
2554 | goto out_free_device; | 3094 | goto out_free_device; |
2555 | 3095 | ||
2556 | config_group_init_type_name(dev_cg, name, &t->tb_cits.tb_dev_cit); | 3096 | config_group_init_type_name(dev_cg, name, &tb->tb_dev_cit); |
2557 | config_group_init_type_name(&dev->dev_attrib.da_group, "attrib", | 3097 | config_group_init_type_name(&dev->dev_attrib.da_group, "attrib", |
2558 | &t->tb_cits.tb_dev_attrib_cit); | 3098 | &tb->tb_dev_attrib_cit); |
2559 | config_group_init_type_name(&dev->dev_pr_group, "pr", | 3099 | config_group_init_type_name(&dev->dev_pr_group, "pr", |
2560 | &t->tb_cits.tb_dev_pr_cit); | 3100 | &tb->tb_dev_pr_cit); |
2561 | config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn", | 3101 | config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn", |
2562 | &t->tb_cits.tb_dev_wwn_cit); | 3102 | &tb->tb_dev_wwn_cit); |
2563 | config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group, | 3103 | config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group, |
2564 | "alua", &t->tb_cits.tb_dev_alua_tg_pt_gps_cit); | 3104 | "alua", &tb->tb_dev_alua_tg_pt_gps_cit); |
2565 | config_group_init_type_name(&dev->dev_stat_grps.stat_group, | 3105 | config_group_init_type_name(&dev->dev_stat_grps.stat_group, |
2566 | "statistics", &t->tb_cits.tb_dev_stat_cit); | 3106 | "statistics", &tb->tb_dev_stat_cit); |
2567 | 3107 | ||
2568 | dev_cg->default_groups[0] = &dev->dev_attrib.da_group; | 3108 | dev_cg->default_groups[0] = &dev->dev_attrib.da_group; |
2569 | dev_cg->default_groups[1] = &dev->dev_pr_group; | 3109 | dev_cg->default_groups[1] = &dev->dev_pr_group; |
@@ -2693,8 +3233,8 @@ static ssize_t target_core_hba_show_attr_hba_info( | |||
2693 | char *page) | 3233 | char *page) |
2694 | { | 3234 | { |
2695 | return sprintf(page, "HBA Index: %d plugin: %s version: %s\n", | 3235 | return sprintf(page, "HBA Index: %d plugin: %s version: %s\n", |
2696 | hba->hba_id, hba->transport->name, | 3236 | hba->hba_id, hba->backend->ops->name, |
2697 | TARGET_CORE_CONFIGFS_VERSION); | 3237 | TARGET_CORE_VERSION); |
2698 | } | 3238 | } |
2699 | 3239 | ||
2700 | SE_HBA_ATTR_RO(hba_info); | 3240 | SE_HBA_ATTR_RO(hba_info); |
@@ -2713,11 +3253,10 @@ static ssize_t target_core_hba_show_attr_hba_mode(struct se_hba *hba, | |||
2713 | static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba, | 3253 | static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba, |
2714 | const char *page, size_t count) | 3254 | const char *page, size_t count) |
2715 | { | 3255 | { |
2716 | struct se_subsystem_api *transport = hba->transport; | ||
2717 | unsigned long mode_flag; | 3256 | unsigned long mode_flag; |
2718 | int ret; | 3257 | int ret; |
2719 | 3258 | ||
2720 | if (transport->pmode_enable_hba == NULL) | 3259 | if (hba->backend->ops->pmode_enable_hba == NULL) |
2721 | return -EINVAL; | 3260 | return -EINVAL; |
2722 | 3261 | ||
2723 | ret = kstrtoul(page, 0, &mode_flag); | 3262 | ret = kstrtoul(page, 0, &mode_flag); |
@@ -2731,7 +3270,7 @@ static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba, | |||
2731 | return -EINVAL; | 3270 | return -EINVAL; |
2732 | } | 3271 | } |
2733 | 3272 | ||
2734 | ret = transport->pmode_enable_hba(hba, mode_flag); | 3273 | ret = hba->backend->ops->pmode_enable_hba(hba, mode_flag); |
2735 | if (ret < 0) | 3274 | if (ret < 0) |
2736 | return -EINVAL; | 3275 | return -EINVAL; |
2737 | if (ret > 0) | 3276 | if (ret > 0) |
@@ -2857,16 +3396,15 @@ static struct config_item_type target_core_cit = { | |||
2857 | 3396 | ||
2858 | /* Stop functions for struct config_item_type target_core_hba_cit */ | 3397 | /* Stop functions for struct config_item_type target_core_hba_cit */ |
2859 | 3398 | ||
2860 | void target_core_setup_sub_cits(struct se_subsystem_api *sa) | 3399 | void target_setup_backend_cits(struct target_backend *tb) |
2861 | { | 3400 | { |
2862 | target_core_setup_dev_cit(sa); | 3401 | target_core_setup_dev_cit(tb); |
2863 | target_core_setup_dev_attrib_cit(sa); | 3402 | target_core_setup_dev_attrib_cit(tb); |
2864 | target_core_setup_dev_pr_cit(sa); | 3403 | target_core_setup_dev_pr_cit(tb); |
2865 | target_core_setup_dev_wwn_cit(sa); | 3404 | target_core_setup_dev_wwn_cit(tb); |
2866 | target_core_setup_dev_alua_tg_pt_gps_cit(sa); | 3405 | target_core_setup_dev_alua_tg_pt_gps_cit(tb); |
2867 | target_core_setup_dev_stat_cit(sa); | 3406 | target_core_setup_dev_stat_cit(tb); |
2868 | } | 3407 | } |
2869 | EXPORT_SYMBOL(target_core_setup_sub_cits); | ||
2870 | 3408 | ||
2871 | static int __init target_core_init_configfs(void) | 3409 | static int __init target_core_init_configfs(void) |
2872 | { | 3410 | { |
@@ -2968,7 +3506,7 @@ static int __init target_core_init_configfs(void) | |||
2968 | goto out_global; | 3506 | goto out_global; |
2969 | } | 3507 | } |
2970 | pr_debug("TARGET_CORE[0]: Initialized ConfigFS Fabric" | 3508 | pr_debug("TARGET_CORE[0]: Initialized ConfigFS Fabric" |
2971 | " Infrastructure: "TARGET_CORE_CONFIGFS_VERSION" on %s/%s" | 3509 | " Infrastructure: "TARGET_CORE_VERSION" on %s/%s" |
2972 | " on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine); | 3510 | " on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine); |
2973 | /* | 3511 | /* |
2974 | * Register built-in RAMDISK subsystem logic for virtual LUN 0 | 3512 | * Register built-in RAMDISK subsystem logic for virtual LUN 0 |
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 417f88b498c7..09e682b1c549 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c | |||
@@ -56,40 +56,37 @@ static struct se_hba *lun0_hba; | |||
56 | struct se_device *g_lun0_dev; | 56 | struct se_device *g_lun0_dev; |
57 | 57 | ||
58 | sense_reason_t | 58 | sense_reason_t |
59 | transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) | 59 | transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun) |
60 | { | 60 | { |
61 | struct se_lun *se_lun = NULL; | 61 | struct se_lun *se_lun = NULL; |
62 | struct se_session *se_sess = se_cmd->se_sess; | 62 | struct se_session *se_sess = se_cmd->se_sess; |
63 | struct se_device *dev; | 63 | struct se_node_acl *nacl = se_sess->se_node_acl; |
64 | unsigned long flags; | 64 | struct se_dev_entry *deve; |
65 | |||
66 | if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) | ||
67 | return TCM_NON_EXISTENT_LUN; | ||
68 | |||
69 | spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags); | ||
70 | se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun]; | ||
71 | if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { | ||
72 | struct se_dev_entry *deve = se_cmd->se_deve; | ||
73 | 65 | ||
74 | deve->total_cmds++; | 66 | rcu_read_lock(); |
67 | deve = target_nacl_find_deve(nacl, unpacked_lun); | ||
68 | if (deve) { | ||
69 | atomic_long_inc(&deve->total_cmds); | ||
75 | 70 | ||
76 | if ((se_cmd->data_direction == DMA_TO_DEVICE) && | 71 | if ((se_cmd->data_direction == DMA_TO_DEVICE) && |
77 | (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) { | 72 | (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) { |
78 | pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" | 73 | pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" |
79 | " Access for 0x%08x\n", | 74 | " Access for 0x%08llx\n", |
80 | se_cmd->se_tfo->get_fabric_name(), | 75 | se_cmd->se_tfo->get_fabric_name(), |
81 | unpacked_lun); | 76 | unpacked_lun); |
82 | spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); | 77 | rcu_read_unlock(); |
83 | return TCM_WRITE_PROTECTED; | 78 | return TCM_WRITE_PROTECTED; |
84 | } | 79 | } |
85 | 80 | ||
86 | if (se_cmd->data_direction == DMA_TO_DEVICE) | 81 | if (se_cmd->data_direction == DMA_TO_DEVICE) |
87 | deve->write_bytes += se_cmd->data_length; | 82 | atomic_long_add(se_cmd->data_length, |
83 | &deve->write_bytes); | ||
88 | else if (se_cmd->data_direction == DMA_FROM_DEVICE) | 84 | else if (se_cmd->data_direction == DMA_FROM_DEVICE) |
89 | deve->read_bytes += se_cmd->data_length; | 85 | atomic_long_add(se_cmd->data_length, |
86 | &deve->read_bytes); | ||
90 | 87 | ||
91 | se_lun = deve->se_lun; | 88 | se_lun = rcu_dereference(deve->se_lun); |
92 | se_cmd->se_lun = deve->se_lun; | 89 | se_cmd->se_lun = rcu_dereference(deve->se_lun); |
93 | se_cmd->pr_res_key = deve->pr_res_key; | 90 | se_cmd->pr_res_key = deve->pr_res_key; |
94 | se_cmd->orig_fe_lun = unpacked_lun; | 91 | se_cmd->orig_fe_lun = unpacked_lun; |
95 | se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; | 92 | se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; |
@@ -97,7 +94,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) | |||
97 | percpu_ref_get(&se_lun->lun_ref); | 94 | percpu_ref_get(&se_lun->lun_ref); |
98 | se_cmd->lun_ref_active = true; | 95 | se_cmd->lun_ref_active = true; |
99 | } | 96 | } |
100 | spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); | 97 | rcu_read_unlock(); |
101 | 98 | ||
102 | if (!se_lun) { | 99 | if (!se_lun) { |
103 | /* | 100 | /* |
@@ -107,7 +104,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) | |||
107 | */ | 104 | */ |
108 | if (unpacked_lun != 0) { | 105 | if (unpacked_lun != 0) { |
109 | pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" | 106 | pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" |
110 | " Access for 0x%08x\n", | 107 | " Access for 0x%08llx\n", |
111 | se_cmd->se_tfo->get_fabric_name(), | 108 | se_cmd->se_tfo->get_fabric_name(), |
112 | unpacked_lun); | 109 | unpacked_lun); |
113 | return TCM_NON_EXISTENT_LUN; | 110 | return TCM_NON_EXISTENT_LUN; |
@@ -119,64 +116,66 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) | |||
119 | (se_cmd->data_direction != DMA_NONE)) | 116 | (se_cmd->data_direction != DMA_NONE)) |
120 | return TCM_WRITE_PROTECTED; | 117 | return TCM_WRITE_PROTECTED; |
121 | 118 | ||
122 | se_lun = &se_sess->se_tpg->tpg_virt_lun0; | 119 | se_lun = se_sess->se_tpg->tpg_virt_lun0; |
123 | se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; | 120 | se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0; |
124 | se_cmd->orig_fe_lun = 0; | 121 | se_cmd->orig_fe_lun = 0; |
125 | se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; | 122 | se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; |
126 | 123 | ||
127 | percpu_ref_get(&se_lun->lun_ref); | 124 | percpu_ref_get(&se_lun->lun_ref); |
128 | se_cmd->lun_ref_active = true; | 125 | se_cmd->lun_ref_active = true; |
129 | } | 126 | } |
127 | /* | ||
128 | * RCU reference protected by percpu se_lun->lun_ref taken above that | ||
129 | * must drop to zero (including initial reference) before this se_lun | ||
130 | * pointer can be kfree_rcu() by the final se_lun->lun_group put via | ||
131 | * target_core_fabric_configfs.c:target_fabric_port_release | ||
132 | */ | ||
133 | se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); | ||
134 | atomic_long_inc(&se_cmd->se_dev->num_cmds); | ||
130 | 135 | ||
131 | /* Directly associate cmd with se_dev */ | ||
132 | se_cmd->se_dev = se_lun->lun_se_dev; | ||
133 | |||
134 | dev = se_lun->lun_se_dev; | ||
135 | atomic_long_inc(&dev->num_cmds); | ||
136 | if (se_cmd->data_direction == DMA_TO_DEVICE) | 136 | if (se_cmd->data_direction == DMA_TO_DEVICE) |
137 | atomic_long_add(se_cmd->data_length, &dev->write_bytes); | 137 | atomic_long_add(se_cmd->data_length, |
138 | &se_cmd->se_dev->write_bytes); | ||
138 | else if (se_cmd->data_direction == DMA_FROM_DEVICE) | 139 | else if (se_cmd->data_direction == DMA_FROM_DEVICE) |
139 | atomic_long_add(se_cmd->data_length, &dev->read_bytes); | 140 | atomic_long_add(se_cmd->data_length, |
141 | &se_cmd->se_dev->read_bytes); | ||
140 | 142 | ||
141 | return 0; | 143 | return 0; |
142 | } | 144 | } |
143 | EXPORT_SYMBOL(transport_lookup_cmd_lun); | 145 | EXPORT_SYMBOL(transport_lookup_cmd_lun); |
144 | 146 | ||
145 | int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun) | 147 | int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u64 unpacked_lun) |
146 | { | 148 | { |
147 | struct se_dev_entry *deve; | 149 | struct se_dev_entry *deve; |
148 | struct se_lun *se_lun = NULL; | 150 | struct se_lun *se_lun = NULL; |
149 | struct se_session *se_sess = se_cmd->se_sess; | 151 | struct se_session *se_sess = se_cmd->se_sess; |
152 | struct se_node_acl *nacl = se_sess->se_node_acl; | ||
150 | struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; | 153 | struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; |
151 | unsigned long flags; | 154 | unsigned long flags; |
152 | 155 | ||
153 | if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) | 156 | rcu_read_lock(); |
154 | return -ENODEV; | 157 | deve = target_nacl_find_deve(nacl, unpacked_lun); |
155 | 158 | if (deve) { | |
156 | spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags); | 159 | se_tmr->tmr_lun = rcu_dereference(deve->se_lun); |
157 | se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun]; | 160 | se_cmd->se_lun = rcu_dereference(deve->se_lun); |
158 | deve = se_cmd->se_deve; | 161 | se_lun = rcu_dereference(deve->se_lun); |
159 | |||
160 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { | ||
161 | se_tmr->tmr_lun = deve->se_lun; | ||
162 | se_cmd->se_lun = deve->se_lun; | ||
163 | se_lun = deve->se_lun; | ||
164 | se_cmd->pr_res_key = deve->pr_res_key; | 162 | se_cmd->pr_res_key = deve->pr_res_key; |
165 | se_cmd->orig_fe_lun = unpacked_lun; | 163 | se_cmd->orig_fe_lun = unpacked_lun; |
166 | } | 164 | } |
167 | spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); | 165 | rcu_read_unlock(); |
168 | 166 | ||
169 | if (!se_lun) { | 167 | if (!se_lun) { |
170 | pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" | 168 | pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" |
171 | " Access for 0x%08x\n", | 169 | " Access for 0x%08llx\n", |
172 | se_cmd->se_tfo->get_fabric_name(), | 170 | se_cmd->se_tfo->get_fabric_name(), |
173 | unpacked_lun); | 171 | unpacked_lun); |
174 | return -ENODEV; | 172 | return -ENODEV; |
175 | } | 173 | } |
176 | 174 | /* | |
177 | /* Directly associate cmd with se_dev */ | 175 | * XXX: Add percpu se_lun->lun_ref reference count for TMR |
178 | se_cmd->se_dev = se_lun->lun_se_dev; | 176 | */ |
179 | se_tmr->tmr_dev = se_lun->lun_se_dev; | 177 | se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); |
178 | se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev); | ||
180 | 179 | ||
181 | spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags); | 180 | spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags); |
182 | list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list); | 181 | list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list); |
@@ -186,9 +185,24 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun) | |||
186 | } | 185 | } |
187 | EXPORT_SYMBOL(transport_lookup_tmr_lun); | 186 | EXPORT_SYMBOL(transport_lookup_tmr_lun); |
188 | 187 | ||
188 | bool target_lun_is_rdonly(struct se_cmd *cmd) | ||
189 | { | ||
190 | struct se_session *se_sess = cmd->se_sess; | ||
191 | struct se_dev_entry *deve; | ||
192 | bool ret; | ||
193 | |||
194 | rcu_read_lock(); | ||
195 | deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun); | ||
196 | ret = (deve && deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY); | ||
197 | rcu_read_unlock(); | ||
198 | |||
199 | return ret; | ||
200 | } | ||
201 | EXPORT_SYMBOL(target_lun_is_rdonly); | ||
202 | |||
189 | /* | 203 | /* |
190 | * This function is called from core_scsi3_emulate_pro_register_and_move() | 204 | * This function is called from core_scsi3_emulate_pro_register_and_move() |
191 | * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count | 205 | * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref |
192 | * when a matching rtpi is found. | 206 | * when a matching rtpi is found. |
193 | */ | 207 | */ |
194 | struct se_dev_entry *core_get_se_deve_from_rtpi( | 208 | struct se_dev_entry *core_get_se_deve_from_rtpi( |
@@ -197,231 +211,238 @@ struct se_dev_entry *core_get_se_deve_from_rtpi( | |||
197 | { | 211 | { |
198 | struct se_dev_entry *deve; | 212 | struct se_dev_entry *deve; |
199 | struct se_lun *lun; | 213 | struct se_lun *lun; |
200 | struct se_port *port; | ||
201 | struct se_portal_group *tpg = nacl->se_tpg; | 214 | struct se_portal_group *tpg = nacl->se_tpg; |
202 | u32 i; | ||
203 | |||
204 | spin_lock_irq(&nacl->device_list_lock); | ||
205 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | ||
206 | deve = nacl->device_list[i]; | ||
207 | 215 | ||
208 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) | 216 | rcu_read_lock(); |
209 | continue; | 217 | hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { |
210 | 218 | lun = rcu_dereference(deve->se_lun); | |
211 | lun = deve->se_lun; | ||
212 | if (!lun) { | 219 | if (!lun) { |
213 | pr_err("%s device entries device pointer is" | 220 | pr_err("%s device entries device pointer is" |
214 | " NULL, but Initiator has access.\n", | 221 | " NULL, but Initiator has access.\n", |
215 | tpg->se_tpg_tfo->get_fabric_name()); | 222 | tpg->se_tpg_tfo->get_fabric_name()); |
216 | continue; | 223 | continue; |
217 | } | 224 | } |
218 | port = lun->lun_sep; | 225 | if (lun->lun_rtpi != rtpi) |
219 | if (!port) { | ||
220 | pr_err("%s device entries device pointer is" | ||
221 | " NULL, but Initiator has access.\n", | ||
222 | tpg->se_tpg_tfo->get_fabric_name()); | ||
223 | continue; | ||
224 | } | ||
225 | if (port->sep_rtpi != rtpi) | ||
226 | continue; | 226 | continue; |
227 | 227 | ||
228 | atomic_inc_mb(&deve->pr_ref_count); | 228 | kref_get(&deve->pr_kref); |
229 | spin_unlock_irq(&nacl->device_list_lock); | 229 | rcu_read_unlock(); |
230 | 230 | ||
231 | return deve; | 231 | return deve; |
232 | } | 232 | } |
233 | spin_unlock_irq(&nacl->device_list_lock); | 233 | rcu_read_unlock(); |
234 | 234 | ||
235 | return NULL; | 235 | return NULL; |
236 | } | 236 | } |
237 | 237 | ||
238 | int core_free_device_list_for_node( | 238 | void core_free_device_list_for_node( |
239 | struct se_node_acl *nacl, | 239 | struct se_node_acl *nacl, |
240 | struct se_portal_group *tpg) | 240 | struct se_portal_group *tpg) |
241 | { | 241 | { |
242 | struct se_dev_entry *deve; | 242 | struct se_dev_entry *deve; |
243 | struct se_lun *lun; | ||
244 | u32 i; | ||
245 | |||
246 | if (!nacl->device_list) | ||
247 | return 0; | ||
248 | |||
249 | spin_lock_irq(&nacl->device_list_lock); | ||
250 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | ||
251 | deve = nacl->device_list[i]; | ||
252 | |||
253 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) | ||
254 | continue; | ||
255 | |||
256 | if (!deve->se_lun) { | ||
257 | pr_err("%s device entries device pointer is" | ||
258 | " NULL, but Initiator has access.\n", | ||
259 | tpg->se_tpg_tfo->get_fabric_name()); | ||
260 | continue; | ||
261 | } | ||
262 | lun = deve->se_lun; | ||
263 | 243 | ||
264 | spin_unlock_irq(&nacl->device_list_lock); | 244 | mutex_lock(&nacl->lun_entry_mutex); |
265 | core_disable_device_list_for_node(lun, NULL, deve->mapped_lun, | 245 | hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { |
266 | TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg); | 246 | struct se_lun *lun = rcu_dereference_check(deve->se_lun, |
267 | spin_lock_irq(&nacl->device_list_lock); | 247 | lockdep_is_held(&nacl->lun_entry_mutex)); |
248 | core_disable_device_list_for_node(lun, deve, nacl, tpg); | ||
268 | } | 249 | } |
269 | spin_unlock_irq(&nacl->device_list_lock); | 250 | mutex_unlock(&nacl->lun_entry_mutex); |
270 | |||
271 | array_free(nacl->device_list, TRANSPORT_MAX_LUNS_PER_TPG); | ||
272 | nacl->device_list = NULL; | ||
273 | |||
274 | return 0; | ||
275 | } | 251 | } |
276 | 252 | ||
277 | void core_update_device_list_access( | 253 | void core_update_device_list_access( |
278 | u32 mapped_lun, | 254 | u64 mapped_lun, |
279 | u32 lun_access, | 255 | u32 lun_access, |
280 | struct se_node_acl *nacl) | 256 | struct se_node_acl *nacl) |
281 | { | 257 | { |
282 | struct se_dev_entry *deve; | 258 | struct se_dev_entry *deve; |
283 | 259 | ||
284 | spin_lock_irq(&nacl->device_list_lock); | 260 | mutex_lock(&nacl->lun_entry_mutex); |
285 | deve = nacl->device_list[mapped_lun]; | 261 | deve = target_nacl_find_deve(nacl, mapped_lun); |
286 | if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { | 262 | if (deve) { |
287 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; | 263 | if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { |
288 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; | 264 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; |
289 | } else { | 265 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; |
290 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; | 266 | } else { |
291 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; | 267 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; |
268 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; | ||
269 | } | ||
292 | } | 270 | } |
293 | spin_unlock_irq(&nacl->device_list_lock); | 271 | mutex_unlock(&nacl->lun_entry_mutex); |
294 | } | 272 | } |
295 | 273 | ||
296 | /* core_enable_device_list_for_node(): | 274 | /* |
297 | * | 275 | * Called with rcu_read_lock or nacl->device_list_lock held. |
298 | * | ||
299 | */ | 276 | */ |
277 | struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun) | ||
278 | { | ||
279 | struct se_dev_entry *deve; | ||
280 | |||
281 | hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) | ||
282 | if (deve->mapped_lun == mapped_lun) | ||
283 | return deve; | ||
284 | |||
285 | return NULL; | ||
286 | } | ||
287 | EXPORT_SYMBOL(target_nacl_find_deve); | ||
288 | |||
289 | void target_pr_kref_release(struct kref *kref) | ||
290 | { | ||
291 | struct se_dev_entry *deve = container_of(kref, struct se_dev_entry, | ||
292 | pr_kref); | ||
293 | complete(&deve->pr_comp); | ||
294 | } | ||
295 | |||
296 | static void | ||
297 | target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new, | ||
298 | bool skip_new) | ||
299 | { | ||
300 | struct se_dev_entry *tmp; | ||
301 | |||
302 | rcu_read_lock(); | ||
303 | hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) { | ||
304 | if (skip_new && tmp == new) | ||
305 | continue; | ||
306 | core_scsi3_ua_allocate(tmp, 0x3F, | ||
307 | ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED); | ||
308 | } | ||
309 | rcu_read_unlock(); | ||
310 | } | ||
311 | |||
300 | int core_enable_device_list_for_node( | 312 | int core_enable_device_list_for_node( |
301 | struct se_lun *lun, | 313 | struct se_lun *lun, |
302 | struct se_lun_acl *lun_acl, | 314 | struct se_lun_acl *lun_acl, |
303 | u32 mapped_lun, | 315 | u64 mapped_lun, |
304 | u32 lun_access, | 316 | u32 lun_access, |
305 | struct se_node_acl *nacl, | 317 | struct se_node_acl *nacl, |
306 | struct se_portal_group *tpg) | 318 | struct se_portal_group *tpg) |
307 | { | 319 | { |
308 | struct se_port *port = lun->lun_sep; | 320 | struct se_dev_entry *orig, *new; |
309 | struct se_dev_entry *deve; | 321 | |
310 | 322 | new = kzalloc(sizeof(*new), GFP_KERNEL); | |
311 | spin_lock_irq(&nacl->device_list_lock); | 323 | if (!new) { |
312 | 324 | pr_err("Unable to allocate se_dev_entry memory\n"); | |
313 | deve = nacl->device_list[mapped_lun]; | 325 | return -ENOMEM; |
314 | 326 | } | |
315 | /* | 327 | |
316 | * Check if the call is handling demo mode -> explicit LUN ACL | 328 | atomic_set(&new->ua_count, 0); |
317 | * transition. This transition must be for the same struct se_lun | 329 | spin_lock_init(&new->ua_lock); |
318 | * + mapped_lun that was setup in demo mode.. | 330 | INIT_LIST_HEAD(&new->ua_list); |
319 | */ | 331 | INIT_LIST_HEAD(&new->lun_link); |
320 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { | 332 | |
321 | if (deve->se_lun_acl != NULL) { | 333 | new->mapped_lun = mapped_lun; |
322 | pr_err("struct se_dev_entry->se_lun_acl" | 334 | kref_init(&new->pr_kref); |
323 | " already set for demo mode -> explicit" | 335 | init_completion(&new->pr_comp); |
324 | " LUN ACL transition\n"); | 336 | |
325 | spin_unlock_irq(&nacl->device_list_lock); | 337 | if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) |
338 | new->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; | ||
339 | else | ||
340 | new->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; | ||
341 | |||
342 | new->creation_time = get_jiffies_64(); | ||
343 | new->attach_count++; | ||
344 | |||
345 | mutex_lock(&nacl->lun_entry_mutex); | ||
346 | orig = target_nacl_find_deve(nacl, mapped_lun); | ||
347 | if (orig && orig->se_lun) { | ||
348 | struct se_lun *orig_lun = rcu_dereference_check(orig->se_lun, | ||
349 | lockdep_is_held(&nacl->lun_entry_mutex)); | ||
350 | |||
351 | if (orig_lun != lun) { | ||
352 | pr_err("Existing orig->se_lun doesn't match new lun" | ||
353 | " for dynamic -> explicit NodeACL conversion:" | ||
354 | " %s\n", nacl->initiatorname); | ||
355 | mutex_unlock(&nacl->lun_entry_mutex); | ||
356 | kfree(new); | ||
326 | return -EINVAL; | 357 | return -EINVAL; |
327 | } | 358 | } |
328 | if (deve->se_lun != lun) { | 359 | BUG_ON(orig->se_lun_acl != NULL); |
329 | pr_err("struct se_dev_entry->se_lun does" | ||
330 | " match passed struct se_lun for demo mode" | ||
331 | " -> explicit LUN ACL transition\n"); | ||
332 | spin_unlock_irq(&nacl->device_list_lock); | ||
333 | return -EINVAL; | ||
334 | } | ||
335 | deve->se_lun_acl = lun_acl; | ||
336 | 360 | ||
337 | if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { | 361 | rcu_assign_pointer(new->se_lun, lun); |
338 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; | 362 | rcu_assign_pointer(new->se_lun_acl, lun_acl); |
339 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; | 363 | hlist_del_rcu(&orig->link); |
340 | } else { | 364 | hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist); |
341 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; | 365 | mutex_unlock(&nacl->lun_entry_mutex); |
342 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; | ||
343 | } | ||
344 | 366 | ||
345 | spin_unlock_irq(&nacl->device_list_lock); | 367 | spin_lock(&lun->lun_deve_lock); |
346 | return 0; | 368 | list_del(&orig->lun_link); |
347 | } | 369 | list_add_tail(&new->lun_link, &lun->lun_deve_list); |
370 | spin_unlock(&lun->lun_deve_lock); | ||
371 | |||
372 | kref_put(&orig->pr_kref, target_pr_kref_release); | ||
373 | wait_for_completion(&orig->pr_comp); | ||
348 | 374 | ||
349 | deve->se_lun = lun; | 375 | target_luns_data_has_changed(nacl, new, true); |
350 | deve->se_lun_acl = lun_acl; | 376 | kfree_rcu(orig, rcu_head); |
351 | deve->mapped_lun = mapped_lun; | 377 | return 0; |
352 | deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS; | ||
353 | |||
354 | if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { | ||
355 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; | ||
356 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; | ||
357 | } else { | ||
358 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; | ||
359 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; | ||
360 | } | 378 | } |
361 | 379 | ||
362 | deve->creation_time = get_jiffies_64(); | 380 | rcu_assign_pointer(new->se_lun, lun); |
363 | deve->attach_count++; | 381 | rcu_assign_pointer(new->se_lun_acl, lun_acl); |
364 | spin_unlock_irq(&nacl->device_list_lock); | 382 | hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist); |
383 | mutex_unlock(&nacl->lun_entry_mutex); | ||
365 | 384 | ||
366 | spin_lock_bh(&port->sep_alua_lock); | 385 | spin_lock(&lun->lun_deve_lock); |
367 | list_add_tail(&deve->alua_port_list, &port->sep_alua_list); | 386 | list_add_tail(&new->lun_link, &lun->lun_deve_list); |
368 | spin_unlock_bh(&port->sep_alua_lock); | 387 | spin_unlock(&lun->lun_deve_lock); |
369 | 388 | ||
389 | target_luns_data_has_changed(nacl, new, true); | ||
370 | return 0; | 390 | return 0; |
371 | } | 391 | } |
372 | 392 | ||
373 | /* core_disable_device_list_for_node(): | 393 | /* |
374 | * | 394 | * Called with se_node_acl->lun_entry_mutex held. |
375 | * | ||
376 | */ | 395 | */ |
377 | int core_disable_device_list_for_node( | 396 | void core_disable_device_list_for_node( |
378 | struct se_lun *lun, | 397 | struct se_lun *lun, |
379 | struct se_lun_acl *lun_acl, | 398 | struct se_dev_entry *orig, |
380 | u32 mapped_lun, | ||
381 | u32 lun_access, | ||
382 | struct se_node_acl *nacl, | 399 | struct se_node_acl *nacl, |
383 | struct se_portal_group *tpg) | 400 | struct se_portal_group *tpg) |
384 | { | 401 | { |
385 | struct se_port *port = lun->lun_sep; | 402 | /* |
386 | struct se_dev_entry *deve = nacl->device_list[mapped_lun]; | 403 | * rcu_dereference_raw protected by se_lun->lun_group symlink |
387 | 404 | * reference to se_device->dev_group. | |
405 | */ | ||
406 | struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); | ||
388 | /* | 407 | /* |
389 | * If the MappedLUN entry is being disabled, the entry in | 408 | * If the MappedLUN entry is being disabled, the entry in |
390 | * port->sep_alua_list must be removed now before clearing the | 409 | * lun->lun_deve_list must be removed now before clearing the |
391 | * struct se_dev_entry pointers below as logic in | 410 | * struct se_dev_entry pointers below as logic in |
392 | * core_alua_do_transition_tg_pt() depends on these being present. | 411 | * core_alua_do_transition_tg_pt() depends on these being present. |
393 | * | 412 | * |
394 | * deve->se_lun_acl will be NULL for demo-mode created LUNs | 413 | * deve->se_lun_acl will be NULL for demo-mode created LUNs |
395 | * that have not been explicitly converted to MappedLUNs -> | 414 | * that have not been explicitly converted to MappedLUNs -> |
396 | * struct se_lun_acl, but we remove deve->alua_port_list from | 415 | * struct se_lun_acl, but we remove deve->lun_link from |
397 | * port->sep_alua_list. This also means that active UAs and | 416 | * lun->lun_deve_list. This also means that active UAs and |
398 | * NodeACL context specific PR metadata for demo-mode | 417 | * NodeACL context specific PR metadata for demo-mode |
399 | * MappedLUN *deve will be released below.. | 418 | * MappedLUN *deve will be released below.. |
400 | */ | 419 | */ |
401 | spin_lock_bh(&port->sep_alua_lock); | 420 | spin_lock(&lun->lun_deve_lock); |
402 | list_del(&deve->alua_port_list); | 421 | list_del(&orig->lun_link); |
403 | spin_unlock_bh(&port->sep_alua_lock); | 422 | spin_unlock(&lun->lun_deve_lock); |
404 | /* | 423 | /* |
405 | * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE | 424 | * Disable struct se_dev_entry LUN ACL mapping |
406 | * PR operation to complete. | ||
407 | */ | 425 | */ |
408 | while (atomic_read(&deve->pr_ref_count) != 0) | 426 | core_scsi3_ua_release_all(orig); |
409 | cpu_relax(); | 427 | |
410 | 428 | hlist_del_rcu(&orig->link); | |
411 | spin_lock_irq(&nacl->device_list_lock); | 429 | clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags); |
430 | rcu_assign_pointer(orig->se_lun, NULL); | ||
431 | rcu_assign_pointer(orig->se_lun_acl, NULL); | ||
432 | orig->lun_flags = 0; | ||
433 | orig->creation_time = 0; | ||
434 | orig->attach_count--; | ||
412 | /* | 435 | /* |
413 | * Disable struct se_dev_entry LUN ACL mapping | 436 | * Before firing off RCU callback, wait for any in process SPEC_I_PT=1 |
437 | * or REGISTER_AND_MOVE PR operation to complete. | ||
414 | */ | 438 | */ |
415 | core_scsi3_ua_release_all(deve); | 439 | kref_put(&orig->pr_kref, target_pr_kref_release); |
416 | deve->se_lun = NULL; | 440 | wait_for_completion(&orig->pr_comp); |
417 | deve->se_lun_acl = NULL; | 441 | |
418 | deve->lun_flags = 0; | 442 | kfree_rcu(orig, rcu_head); |
419 | deve->creation_time = 0; | 443 | |
420 | deve->attach_count--; | 444 | core_scsi3_free_pr_reg_from_nacl(dev, nacl); |
421 | spin_unlock_irq(&nacl->device_list_lock); | 445 | target_luns_data_has_changed(nacl, NULL, false); |
422 | |||
423 | core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl); | ||
424 | return 0; | ||
425 | } | 446 | } |
426 | 447 | ||
427 | /* core_clear_lun_from_tpg(): | 448 | /* core_clear_lun_from_tpg(): |
@@ -432,53 +453,35 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) | |||
432 | { | 453 | { |
433 | struct se_node_acl *nacl; | 454 | struct se_node_acl *nacl; |
434 | struct se_dev_entry *deve; | 455 | struct se_dev_entry *deve; |
435 | u32 i; | ||
436 | 456 | ||
437 | spin_lock_irq(&tpg->acl_node_lock); | 457 | mutex_lock(&tpg->acl_node_mutex); |
438 | list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { | 458 | list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { |
439 | spin_unlock_irq(&tpg->acl_node_lock); | ||
440 | 459 | ||
441 | spin_lock_irq(&nacl->device_list_lock); | 460 | mutex_lock(&nacl->lun_entry_mutex); |
442 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | 461 | hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { |
443 | deve = nacl->device_list[i]; | 462 | struct se_lun *tmp_lun = rcu_dereference_check(deve->se_lun, |
444 | if (lun != deve->se_lun) | 463 | lockdep_is_held(&nacl->lun_entry_mutex)); |
445 | continue; | ||
446 | spin_unlock_irq(&nacl->device_list_lock); | ||
447 | 464 | ||
448 | core_disable_device_list_for_node(lun, NULL, | 465 | if (lun != tmp_lun) |
449 | deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS, | 466 | continue; |
450 | nacl, tpg); | ||
451 | 467 | ||
452 | spin_lock_irq(&nacl->device_list_lock); | 468 | core_disable_device_list_for_node(lun, deve, nacl, tpg); |
453 | } | 469 | } |
454 | spin_unlock_irq(&nacl->device_list_lock); | 470 | mutex_unlock(&nacl->lun_entry_mutex); |
455 | |||
456 | spin_lock_irq(&tpg->acl_node_lock); | ||
457 | } | 471 | } |
458 | spin_unlock_irq(&tpg->acl_node_lock); | 472 | mutex_unlock(&tpg->acl_node_mutex); |
459 | } | 473 | } |
460 | 474 | ||
461 | static struct se_port *core_alloc_port(struct se_device *dev) | 475 | int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev) |
462 | { | 476 | { |
463 | struct se_port *port, *port_tmp; | 477 | struct se_lun *tmp; |
464 | |||
465 | port = kzalloc(sizeof(struct se_port), GFP_KERNEL); | ||
466 | if (!port) { | ||
467 | pr_err("Unable to allocate struct se_port\n"); | ||
468 | return ERR_PTR(-ENOMEM); | ||
469 | } | ||
470 | INIT_LIST_HEAD(&port->sep_alua_list); | ||
471 | INIT_LIST_HEAD(&port->sep_list); | ||
472 | atomic_set(&port->sep_tg_pt_secondary_offline, 0); | ||
473 | spin_lock_init(&port->sep_alua_lock); | ||
474 | mutex_init(&port->sep_tg_pt_md_mutex); | ||
475 | 478 | ||
476 | spin_lock(&dev->se_port_lock); | 479 | spin_lock(&dev->se_port_lock); |
477 | if (dev->dev_port_count == 0x0000ffff) { | 480 | if (dev->export_count == 0x0000ffff) { |
478 | pr_warn("Reached dev->dev_port_count ==" | 481 | pr_warn("Reached dev->dev_port_count ==" |
479 | " 0x0000ffff\n"); | 482 | " 0x0000ffff\n"); |
480 | spin_unlock(&dev->se_port_lock); | 483 | spin_unlock(&dev->se_port_lock); |
481 | return ERR_PTR(-ENOSPC); | 484 | return -ENOSPC; |
482 | } | 485 | } |
483 | again: | 486 | again: |
484 | /* | 487 | /* |
@@ -493,133 +496,23 @@ again: | |||
493 | * 2h Relative port 2, historically known as port B | 496 | * 2h Relative port 2, historically known as port B |
494 | * 3h to FFFFh Relative port 3 through 65 535 | 497 | * 3h to FFFFh Relative port 3 through 65 535 |
495 | */ | 498 | */ |
496 | port->sep_rtpi = dev->dev_rpti_counter++; | 499 | lun->lun_rtpi = dev->dev_rpti_counter++; |
497 | if (!port->sep_rtpi) | 500 | if (!lun->lun_rtpi) |
498 | goto again; | 501 | goto again; |
499 | 502 | ||
500 | list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) { | 503 | list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) { |
501 | /* | 504 | /* |
502 | * Make sure RELATIVE TARGET PORT IDENTIFIER is unique | 505 | * Make sure RELATIVE TARGET PORT IDENTIFIER is unique |
503 | * for 16-bit wrap.. | 506 | * for 16-bit wrap.. |
504 | */ | 507 | */ |
505 | if (port->sep_rtpi == port_tmp->sep_rtpi) | 508 | if (lun->lun_rtpi == tmp->lun_rtpi) |
506 | goto again; | 509 | goto again; |
507 | } | 510 | } |
508 | spin_unlock(&dev->se_port_lock); | 511 | spin_unlock(&dev->se_port_lock); |
509 | 512 | ||
510 | return port; | ||
511 | } | ||
512 | |||
513 | static void core_export_port( | ||
514 | struct se_device *dev, | ||
515 | struct se_portal_group *tpg, | ||
516 | struct se_port *port, | ||
517 | struct se_lun *lun) | ||
518 | { | ||
519 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL; | ||
520 | |||
521 | spin_lock(&dev->se_port_lock); | ||
522 | spin_lock(&lun->lun_sep_lock); | ||
523 | port->sep_tpg = tpg; | ||
524 | port->sep_lun = lun; | ||
525 | lun->lun_sep = port; | ||
526 | spin_unlock(&lun->lun_sep_lock); | ||
527 | |||
528 | list_add_tail(&port->sep_list, &dev->dev_sep_list); | ||
529 | spin_unlock(&dev->se_port_lock); | ||
530 | |||
531 | if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) && | ||
532 | !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) { | ||
533 | tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port); | ||
534 | if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) { | ||
535 | pr_err("Unable to allocate t10_alua_tg_pt" | ||
536 | "_gp_member_t\n"); | ||
537 | return; | ||
538 | } | ||
539 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | ||
540 | __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, | ||
541 | dev->t10_alua.default_tg_pt_gp); | ||
542 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | ||
543 | pr_debug("%s/%s: Adding to default ALUA Target Port" | ||
544 | " Group: alua/default_tg_pt_gp\n", | ||
545 | dev->transport->name, tpg->se_tpg_tfo->get_fabric_name()); | ||
546 | } | ||
547 | |||
548 | dev->dev_port_count++; | ||
549 | port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFIER */ | ||
550 | } | ||
551 | |||
552 | /* | ||
553 | * Called with struct se_device->se_port_lock spinlock held. | ||
554 | */ | ||
555 | static void core_release_port(struct se_device *dev, struct se_port *port) | ||
556 | __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock) | ||
557 | { | ||
558 | /* | ||
559 | * Wait for any port reference for PR ALL_TG_PT=1 operation | ||
560 | * to complete in __core_scsi3_alloc_registration() | ||
561 | */ | ||
562 | spin_unlock(&dev->se_port_lock); | ||
563 | if (atomic_read(&port->sep_tg_pt_ref_cnt)) | ||
564 | cpu_relax(); | ||
565 | spin_lock(&dev->se_port_lock); | ||
566 | |||
567 | core_alua_free_tg_pt_gp_mem(port); | ||
568 | |||
569 | list_del(&port->sep_list); | ||
570 | dev->dev_port_count--; | ||
571 | kfree(port); | ||
572 | } | ||
573 | |||
574 | int core_dev_export( | ||
575 | struct se_device *dev, | ||
576 | struct se_portal_group *tpg, | ||
577 | struct se_lun *lun) | ||
578 | { | ||
579 | struct se_hba *hba = dev->se_hba; | ||
580 | struct se_port *port; | ||
581 | |||
582 | port = core_alloc_port(dev); | ||
583 | if (IS_ERR(port)) | ||
584 | return PTR_ERR(port); | ||
585 | |||
586 | lun->lun_se_dev = dev; | ||
587 | |||
588 | spin_lock(&hba->device_lock); | ||
589 | dev->export_count++; | ||
590 | spin_unlock(&hba->device_lock); | ||
591 | |||
592 | core_export_port(dev, tpg, port, lun); | ||
593 | return 0; | 513 | return 0; |
594 | } | 514 | } |
595 | 515 | ||
596 | void core_dev_unexport( | ||
597 | struct se_device *dev, | ||
598 | struct se_portal_group *tpg, | ||
599 | struct se_lun *lun) | ||
600 | { | ||
601 | struct se_hba *hba = dev->se_hba; | ||
602 | struct se_port *port = lun->lun_sep; | ||
603 | |||
604 | spin_lock(&lun->lun_sep_lock); | ||
605 | if (lun->lun_se_dev == NULL) { | ||
606 | spin_unlock(&lun->lun_sep_lock); | ||
607 | return; | ||
608 | } | ||
609 | spin_unlock(&lun->lun_sep_lock); | ||
610 | |||
611 | spin_lock(&dev->se_port_lock); | ||
612 | core_release_port(dev, port); | ||
613 | spin_unlock(&dev->se_port_lock); | ||
614 | |||
615 | spin_lock(&hba->device_lock); | ||
616 | dev->export_count--; | ||
617 | spin_unlock(&hba->device_lock); | ||
618 | |||
619 | lun->lun_sep = NULL; | ||
620 | lun->lun_se_dev = NULL; | ||
621 | } | ||
622 | |||
623 | static void se_release_vpd_for_dev(struct se_device *dev) | 516 | static void se_release_vpd_for_dev(struct se_device *dev) |
624 | { | 517 | { |
625 | struct t10_vpd *vpd, *vpd_tmp; | 518 | struct t10_vpd *vpd, *vpd_tmp; |
@@ -651,556 +544,19 @@ static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) | |||
651 | return aligned_max_sectors; | 544 | return aligned_max_sectors; |
652 | } | 545 | } |
653 | 546 | ||
654 | bool se_dev_check_wce(struct se_device *dev) | 547 | int core_dev_add_lun( |
655 | { | ||
656 | bool wce = false; | ||
657 | |||
658 | if (dev->transport->get_write_cache) | ||
659 | wce = dev->transport->get_write_cache(dev); | ||
660 | else if (dev->dev_attrib.emulate_write_cache > 0) | ||
661 | wce = true; | ||
662 | |||
663 | return wce; | ||
664 | } | ||
665 | |||
666 | int se_dev_set_max_unmap_lba_count( | ||
667 | struct se_device *dev, | ||
668 | u32 max_unmap_lba_count) | ||
669 | { | ||
670 | dev->dev_attrib.max_unmap_lba_count = max_unmap_lba_count; | ||
671 | pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n", | ||
672 | dev, dev->dev_attrib.max_unmap_lba_count); | ||
673 | return 0; | ||
674 | } | ||
675 | EXPORT_SYMBOL(se_dev_set_max_unmap_lba_count); | ||
676 | |||
677 | int se_dev_set_max_unmap_block_desc_count( | ||
678 | struct se_device *dev, | ||
679 | u32 max_unmap_block_desc_count) | ||
680 | { | ||
681 | dev->dev_attrib.max_unmap_block_desc_count = | ||
682 | max_unmap_block_desc_count; | ||
683 | pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n", | ||
684 | dev, dev->dev_attrib.max_unmap_block_desc_count); | ||
685 | return 0; | ||
686 | } | ||
687 | EXPORT_SYMBOL(se_dev_set_max_unmap_block_desc_count); | ||
688 | |||
689 | int se_dev_set_unmap_granularity( | ||
690 | struct se_device *dev, | ||
691 | u32 unmap_granularity) | ||
692 | { | ||
693 | dev->dev_attrib.unmap_granularity = unmap_granularity; | ||
694 | pr_debug("dev[%p]: Set unmap_granularity: %u\n", | ||
695 | dev, dev->dev_attrib.unmap_granularity); | ||
696 | return 0; | ||
697 | } | ||
698 | EXPORT_SYMBOL(se_dev_set_unmap_granularity); | ||
699 | |||
700 | int se_dev_set_unmap_granularity_alignment( | ||
701 | struct se_device *dev, | ||
702 | u32 unmap_granularity_alignment) | ||
703 | { | ||
704 | dev->dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment; | ||
705 | pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n", | ||
706 | dev, dev->dev_attrib.unmap_granularity_alignment); | ||
707 | return 0; | ||
708 | } | ||
709 | EXPORT_SYMBOL(se_dev_set_unmap_granularity_alignment); | ||
710 | |||
711 | int se_dev_set_max_write_same_len( | ||
712 | struct se_device *dev, | ||
713 | u32 max_write_same_len) | ||
714 | { | ||
715 | dev->dev_attrib.max_write_same_len = max_write_same_len; | ||
716 | pr_debug("dev[%p]: Set max_write_same_len: %u\n", | ||
717 | dev, dev->dev_attrib.max_write_same_len); | ||
718 | return 0; | ||
719 | } | ||
720 | EXPORT_SYMBOL(se_dev_set_max_write_same_len); | ||
721 | |||
722 | static void dev_set_t10_wwn_model_alias(struct se_device *dev) | ||
723 | { | ||
724 | const char *configname; | ||
725 | |||
726 | configname = config_item_name(&dev->dev_group.cg_item); | ||
727 | if (strlen(configname) >= 16) { | ||
728 | pr_warn("dev[%p]: Backstore name '%s' is too long for " | ||
729 | "INQUIRY_MODEL, truncating to 16 bytes\n", dev, | ||
730 | configname); | ||
731 | } | ||
732 | snprintf(&dev->t10_wwn.model[0], 16, "%s", configname); | ||
733 | } | ||
734 | |||
735 | int se_dev_set_emulate_model_alias(struct se_device *dev, int flag) | ||
736 | { | ||
737 | if (dev->export_count) { | ||
738 | pr_err("dev[%p]: Unable to change model alias" | ||
739 | " while export_count is %d\n", | ||
740 | dev, dev->export_count); | ||
741 | return -EINVAL; | ||
742 | } | ||
743 | |||
744 | if (flag != 0 && flag != 1) { | ||
745 | pr_err("Illegal value %d\n", flag); | ||
746 | return -EINVAL; | ||
747 | } | ||
748 | |||
749 | if (flag) { | ||
750 | dev_set_t10_wwn_model_alias(dev); | ||
751 | } else { | ||
752 | strncpy(&dev->t10_wwn.model[0], | ||
753 | dev->transport->inquiry_prod, 16); | ||
754 | } | ||
755 | dev->dev_attrib.emulate_model_alias = flag; | ||
756 | |||
757 | return 0; | ||
758 | } | ||
759 | EXPORT_SYMBOL(se_dev_set_emulate_model_alias); | ||
760 | |||
761 | int se_dev_set_emulate_dpo(struct se_device *dev, int flag) | ||
762 | { | ||
763 | if (flag != 0 && flag != 1) { | ||
764 | pr_err("Illegal value %d\n", flag); | ||
765 | return -EINVAL; | ||
766 | } | ||
767 | |||
768 | if (flag) { | ||
769 | pr_err("dpo_emulated not supported\n"); | ||
770 | return -EINVAL; | ||
771 | } | ||
772 | |||
773 | return 0; | ||
774 | } | ||
775 | EXPORT_SYMBOL(se_dev_set_emulate_dpo); | ||
776 | |||
777 | int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) | ||
778 | { | ||
779 | if (flag != 0 && flag != 1) { | ||
780 | pr_err("Illegal value %d\n", flag); | ||
781 | return -EINVAL; | ||
782 | } | ||
783 | if (flag && | ||
784 | dev->transport->get_write_cache) { | ||
785 | pr_warn("emulate_fua_write not supported for this device, ignoring\n"); | ||
786 | return 0; | ||
787 | } | ||
788 | if (dev->export_count) { | ||
789 | pr_err("emulate_fua_write cannot be changed with active" | ||
790 | " exports: %d\n", dev->export_count); | ||
791 | return -EINVAL; | ||
792 | } | ||
793 | dev->dev_attrib.emulate_fua_write = flag; | ||
794 | pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", | ||
795 | dev, dev->dev_attrib.emulate_fua_write); | ||
796 | return 0; | ||
797 | } | ||
798 | EXPORT_SYMBOL(se_dev_set_emulate_fua_write); | ||
799 | |||
800 | int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) | ||
801 | { | ||
802 | if (flag != 0 && flag != 1) { | ||
803 | pr_err("Illegal value %d\n", flag); | ||
804 | return -EINVAL; | ||
805 | } | ||
806 | |||
807 | if (flag) { | ||
808 | pr_err("ua read emulated not supported\n"); | ||
809 | return -EINVAL; | ||
810 | } | ||
811 | |||
812 | return 0; | ||
813 | } | ||
814 | EXPORT_SYMBOL(se_dev_set_emulate_fua_read); | ||
815 | |||
816 | int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) | ||
817 | { | ||
818 | if (flag != 0 && flag != 1) { | ||
819 | pr_err("Illegal value %d\n", flag); | ||
820 | return -EINVAL; | ||
821 | } | ||
822 | if (flag && | ||
823 | dev->transport->get_write_cache) { | ||
824 | pr_err("emulate_write_cache not supported for this device\n"); | ||
825 | return -EINVAL; | ||
826 | } | ||
827 | if (dev->export_count) { | ||
828 | pr_err("emulate_write_cache cannot be changed with active" | ||
829 | " exports: %d\n", dev->export_count); | ||
830 | return -EINVAL; | ||
831 | } | ||
832 | dev->dev_attrib.emulate_write_cache = flag; | ||
833 | pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", | ||
834 | dev, dev->dev_attrib.emulate_write_cache); | ||
835 | return 0; | ||
836 | } | ||
837 | EXPORT_SYMBOL(se_dev_set_emulate_write_cache); | ||
838 | |||
839 | int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) | ||
840 | { | ||
841 | if ((flag != 0) && (flag != 1) && (flag != 2)) { | ||
842 | pr_err("Illegal value %d\n", flag); | ||
843 | return -EINVAL; | ||
844 | } | ||
845 | |||
846 | if (dev->export_count) { | ||
847 | pr_err("dev[%p]: Unable to change SE Device" | ||
848 | " UA_INTRLCK_CTRL while export_count is %d\n", | ||
849 | dev, dev->export_count); | ||
850 | return -EINVAL; | ||
851 | } | ||
852 | dev->dev_attrib.emulate_ua_intlck_ctrl = flag; | ||
853 | pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", | ||
854 | dev, dev->dev_attrib.emulate_ua_intlck_ctrl); | ||
855 | |||
856 | return 0; | ||
857 | } | ||
858 | EXPORT_SYMBOL(se_dev_set_emulate_ua_intlck_ctrl); | ||
859 | |||
860 | int se_dev_set_emulate_tas(struct se_device *dev, int flag) | ||
861 | { | ||
862 | if ((flag != 0) && (flag != 1)) { | ||
863 | pr_err("Illegal value %d\n", flag); | ||
864 | return -EINVAL; | ||
865 | } | ||
866 | |||
867 | if (dev->export_count) { | ||
868 | pr_err("dev[%p]: Unable to change SE Device TAS while" | ||
869 | " export_count is %d\n", | ||
870 | dev, dev->export_count); | ||
871 | return -EINVAL; | ||
872 | } | ||
873 | dev->dev_attrib.emulate_tas = flag; | ||
874 | pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n", | ||
875 | dev, (dev->dev_attrib.emulate_tas) ? "Enabled" : "Disabled"); | ||
876 | |||
877 | return 0; | ||
878 | } | ||
879 | EXPORT_SYMBOL(se_dev_set_emulate_tas); | ||
880 | |||
881 | int se_dev_set_emulate_tpu(struct se_device *dev, int flag) | ||
882 | { | ||
883 | if ((flag != 0) && (flag != 1)) { | ||
884 | pr_err("Illegal value %d\n", flag); | ||
885 | return -EINVAL; | ||
886 | } | ||
887 | /* | ||
888 | * We expect this value to be non-zero when generic Block Layer | ||
889 | * Discard supported is detected iblock_create_virtdevice(). | ||
890 | */ | ||
891 | if (flag && !dev->dev_attrib.max_unmap_block_desc_count) { | ||
892 | pr_err("Generic Block Discard not supported\n"); | ||
893 | return -ENOSYS; | ||
894 | } | ||
895 | |||
896 | dev->dev_attrib.emulate_tpu = flag; | ||
897 | pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", | ||
898 | dev, flag); | ||
899 | return 0; | ||
900 | } | ||
901 | EXPORT_SYMBOL(se_dev_set_emulate_tpu); | ||
902 | |||
903 | int se_dev_set_emulate_tpws(struct se_device *dev, int flag) | ||
904 | { | ||
905 | if ((flag != 0) && (flag != 1)) { | ||
906 | pr_err("Illegal value %d\n", flag); | ||
907 | return -EINVAL; | ||
908 | } | ||
909 | /* | ||
910 | * We expect this value to be non-zero when generic Block Layer | ||
911 | * Discard supported is detected iblock_create_virtdevice(). | ||
912 | */ | ||
913 | if (flag && !dev->dev_attrib.max_unmap_block_desc_count) { | ||
914 | pr_err("Generic Block Discard not supported\n"); | ||
915 | return -ENOSYS; | ||
916 | } | ||
917 | |||
918 | dev->dev_attrib.emulate_tpws = flag; | ||
919 | pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", | ||
920 | dev, flag); | ||
921 | return 0; | ||
922 | } | ||
923 | EXPORT_SYMBOL(se_dev_set_emulate_tpws); | ||
924 | |||
925 | int se_dev_set_emulate_caw(struct se_device *dev, int flag) | ||
926 | { | ||
927 | if (flag != 0 && flag != 1) { | ||
928 | pr_err("Illegal value %d\n", flag); | ||
929 | return -EINVAL; | ||
930 | } | ||
931 | dev->dev_attrib.emulate_caw = flag; | ||
932 | pr_debug("dev[%p]: SE Device CompareAndWrite (AtomicTestandSet): %d\n", | ||
933 | dev, flag); | ||
934 | |||
935 | return 0; | ||
936 | } | ||
937 | EXPORT_SYMBOL(se_dev_set_emulate_caw); | ||
938 | |||
939 | int se_dev_set_emulate_3pc(struct se_device *dev, int flag) | ||
940 | { | ||
941 | if (flag != 0 && flag != 1) { | ||
942 | pr_err("Illegal value %d\n", flag); | ||
943 | return -EINVAL; | ||
944 | } | ||
945 | dev->dev_attrib.emulate_3pc = flag; | ||
946 | pr_debug("dev[%p]: SE Device 3rd Party Copy (EXTENDED_COPY): %d\n", | ||
947 | dev, flag); | ||
948 | |||
949 | return 0; | ||
950 | } | ||
951 | EXPORT_SYMBOL(se_dev_set_emulate_3pc); | ||
952 | |||
953 | int se_dev_set_pi_prot_type(struct se_device *dev, int flag) | ||
954 | { | ||
955 | int rc, old_prot = dev->dev_attrib.pi_prot_type; | ||
956 | |||
957 | if (flag != 0 && flag != 1 && flag != 2 && flag != 3) { | ||
958 | pr_err("Illegal value %d for pi_prot_type\n", flag); | ||
959 | return -EINVAL; | ||
960 | } | ||
961 | if (flag == 2) { | ||
962 | pr_err("DIF TYPE2 protection currently not supported\n"); | ||
963 | return -ENOSYS; | ||
964 | } | ||
965 | if (dev->dev_attrib.hw_pi_prot_type) { | ||
966 | pr_warn("DIF protection enabled on underlying hardware," | ||
967 | " ignoring\n"); | ||
968 | return 0; | ||
969 | } | ||
970 | if (!dev->transport->init_prot || !dev->transport->free_prot) { | ||
971 | /* 0 is only allowed value for non-supporting backends */ | ||
972 | if (flag == 0) | ||
973 | return 0; | ||
974 | |||
975 | pr_err("DIF protection not supported by backend: %s\n", | ||
976 | dev->transport->name); | ||
977 | return -ENOSYS; | ||
978 | } | ||
979 | if (!(dev->dev_flags & DF_CONFIGURED)) { | ||
980 | pr_err("DIF protection requires device to be configured\n"); | ||
981 | return -ENODEV; | ||
982 | } | ||
983 | if (dev->export_count) { | ||
984 | pr_err("dev[%p]: Unable to change SE Device PROT type while" | ||
985 | " export_count is %d\n", dev, dev->export_count); | ||
986 | return -EINVAL; | ||
987 | } | ||
988 | |||
989 | dev->dev_attrib.pi_prot_type = flag; | ||
990 | |||
991 | if (flag && !old_prot) { | ||
992 | rc = dev->transport->init_prot(dev); | ||
993 | if (rc) { | ||
994 | dev->dev_attrib.pi_prot_type = old_prot; | ||
995 | return rc; | ||
996 | } | ||
997 | |||
998 | } else if (!flag && old_prot) { | ||
999 | dev->transport->free_prot(dev); | ||
1000 | } | ||
1001 | pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag); | ||
1002 | |||
1003 | return 0; | ||
1004 | } | ||
1005 | EXPORT_SYMBOL(se_dev_set_pi_prot_type); | ||
1006 | |||
1007 | int se_dev_set_pi_prot_format(struct se_device *dev, int flag) | ||
1008 | { | ||
1009 | int rc; | ||
1010 | |||
1011 | if (!flag) | ||
1012 | return 0; | ||
1013 | |||
1014 | if (flag != 1) { | ||
1015 | pr_err("Illegal value %d for pi_prot_format\n", flag); | ||
1016 | return -EINVAL; | ||
1017 | } | ||
1018 | if (!dev->transport->format_prot) { | ||
1019 | pr_err("DIF protection format not supported by backend %s\n", | ||
1020 | dev->transport->name); | ||
1021 | return -ENOSYS; | ||
1022 | } | ||
1023 | if (!(dev->dev_flags & DF_CONFIGURED)) { | ||
1024 | pr_err("DIF protection format requires device to be configured\n"); | ||
1025 | return -ENODEV; | ||
1026 | } | ||
1027 | if (dev->export_count) { | ||
1028 | pr_err("dev[%p]: Unable to format SE Device PROT type while" | ||
1029 | " export_count is %d\n", dev, dev->export_count); | ||
1030 | return -EINVAL; | ||
1031 | } | ||
1032 | |||
1033 | rc = dev->transport->format_prot(dev); | ||
1034 | if (rc) | ||
1035 | return rc; | ||
1036 | |||
1037 | pr_debug("dev[%p]: SE Device Protection Format complete\n", dev); | ||
1038 | |||
1039 | return 0; | ||
1040 | } | ||
1041 | EXPORT_SYMBOL(se_dev_set_pi_prot_format); | ||
1042 | |||
1043 | int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) | ||
1044 | { | ||
1045 | if ((flag != 0) && (flag != 1)) { | ||
1046 | pr_err("Illegal value %d\n", flag); | ||
1047 | return -EINVAL; | ||
1048 | } | ||
1049 | dev->dev_attrib.enforce_pr_isids = flag; | ||
1050 | pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev, | ||
1051 | (dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled"); | ||
1052 | return 0; | ||
1053 | } | ||
1054 | EXPORT_SYMBOL(se_dev_set_enforce_pr_isids); | ||
1055 | |||
1056 | int se_dev_set_force_pr_aptpl(struct se_device *dev, int flag) | ||
1057 | { | ||
1058 | if ((flag != 0) && (flag != 1)) { | ||
1059 | printk(KERN_ERR "Illegal value %d\n", flag); | ||
1060 | return -EINVAL; | ||
1061 | } | ||
1062 | if (dev->export_count) { | ||
1063 | pr_err("dev[%p]: Unable to set force_pr_aptpl while" | ||
1064 | " export_count is %d\n", dev, dev->export_count); | ||
1065 | return -EINVAL; | ||
1066 | } | ||
1067 | |||
1068 | dev->dev_attrib.force_pr_aptpl = flag; | ||
1069 | pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", dev, flag); | ||
1070 | return 0; | ||
1071 | } | ||
1072 | EXPORT_SYMBOL(se_dev_set_force_pr_aptpl); | ||
1073 | |||
1074 | int se_dev_set_is_nonrot(struct se_device *dev, int flag) | ||
1075 | { | ||
1076 | if ((flag != 0) && (flag != 1)) { | ||
1077 | printk(KERN_ERR "Illegal value %d\n", flag); | ||
1078 | return -EINVAL; | ||
1079 | } | ||
1080 | dev->dev_attrib.is_nonrot = flag; | ||
1081 | pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n", | ||
1082 | dev, flag); | ||
1083 | return 0; | ||
1084 | } | ||
1085 | EXPORT_SYMBOL(se_dev_set_is_nonrot); | ||
1086 | |||
1087 | int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag) | ||
1088 | { | ||
1089 | if (flag != 0) { | ||
1090 | printk(KERN_ERR "dev[%p]: SE Device emulatation of restricted" | ||
1091 | " reordering not implemented\n", dev); | ||
1092 | return -ENOSYS; | ||
1093 | } | ||
1094 | dev->dev_attrib.emulate_rest_reord = flag; | ||
1095 | pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag); | ||
1096 | return 0; | ||
1097 | } | ||
1098 | EXPORT_SYMBOL(se_dev_set_emulate_rest_reord); | ||
1099 | |||
1100 | /* | ||
1101 | * Note, this can only be called on unexported SE Device Object. | ||
1102 | */ | ||
1103 | int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) | ||
1104 | { | ||
1105 | if (dev->export_count) { | ||
1106 | pr_err("dev[%p]: Unable to change SE Device TCQ while" | ||
1107 | " export_count is %d\n", | ||
1108 | dev, dev->export_count); | ||
1109 | return -EINVAL; | ||
1110 | } | ||
1111 | if (!queue_depth) { | ||
1112 | pr_err("dev[%p]: Illegal ZERO value for queue" | ||
1113 | "_depth\n", dev); | ||
1114 | return -EINVAL; | ||
1115 | } | ||
1116 | |||
1117 | if (queue_depth > dev->dev_attrib.queue_depth) { | ||
1118 | if (queue_depth > dev->dev_attrib.hw_queue_depth) { | ||
1119 | pr_err("dev[%p]: Passed queue_depth:" | ||
1120 | " %u exceeds TCM/SE_Device MAX" | ||
1121 | " TCQ: %u\n", dev, queue_depth, | ||
1122 | dev->dev_attrib.hw_queue_depth); | ||
1123 | return -EINVAL; | ||
1124 | } | ||
1125 | } | ||
1126 | dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth; | ||
1127 | pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", | ||
1128 | dev, queue_depth); | ||
1129 | return 0; | ||
1130 | } | ||
1131 | EXPORT_SYMBOL(se_dev_set_queue_depth); | ||
1132 | |||
1133 | int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) | ||
1134 | { | ||
1135 | if (dev->export_count) { | ||
1136 | pr_err("dev[%p]: Unable to change SE Device" | ||
1137 | " optimal_sectors while export_count is %d\n", | ||
1138 | dev, dev->export_count); | ||
1139 | return -EINVAL; | ||
1140 | } | ||
1141 | if (optimal_sectors > dev->dev_attrib.hw_max_sectors) { | ||
1142 | pr_err("dev[%p]: Passed optimal_sectors %u cannot be" | ||
1143 | " greater than hw_max_sectors: %u\n", dev, | ||
1144 | optimal_sectors, dev->dev_attrib.hw_max_sectors); | ||
1145 | return -EINVAL; | ||
1146 | } | ||
1147 | |||
1148 | dev->dev_attrib.optimal_sectors = optimal_sectors; | ||
1149 | pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n", | ||
1150 | dev, optimal_sectors); | ||
1151 | return 0; | ||
1152 | } | ||
1153 | EXPORT_SYMBOL(se_dev_set_optimal_sectors); | ||
1154 | |||
1155 | int se_dev_set_block_size(struct se_device *dev, u32 block_size) | ||
1156 | { | ||
1157 | if (dev->export_count) { | ||
1158 | pr_err("dev[%p]: Unable to change SE Device block_size" | ||
1159 | " while export_count is %d\n", | ||
1160 | dev, dev->export_count); | ||
1161 | return -EINVAL; | ||
1162 | } | ||
1163 | |||
1164 | if ((block_size != 512) && | ||
1165 | (block_size != 1024) && | ||
1166 | (block_size != 2048) && | ||
1167 | (block_size != 4096)) { | ||
1168 | pr_err("dev[%p]: Illegal value for block_device: %u" | ||
1169 | " for SE device, must be 512, 1024, 2048 or 4096\n", | ||
1170 | dev, block_size); | ||
1171 | return -EINVAL; | ||
1172 | } | ||
1173 | |||
1174 | dev->dev_attrib.block_size = block_size; | ||
1175 | pr_debug("dev[%p]: SE Device block_size changed to %u\n", | ||
1176 | dev, block_size); | ||
1177 | |||
1178 | if (dev->dev_attrib.max_bytes_per_io) | ||
1179 | dev->dev_attrib.hw_max_sectors = | ||
1180 | dev->dev_attrib.max_bytes_per_io / block_size; | ||
1181 | |||
1182 | return 0; | ||
1183 | } | ||
1184 | EXPORT_SYMBOL(se_dev_set_block_size); | ||
1185 | |||
1186 | struct se_lun *core_dev_add_lun( | ||
1187 | struct se_portal_group *tpg, | 548 | struct se_portal_group *tpg, |
1188 | struct se_device *dev, | 549 | struct se_device *dev, |
1189 | u32 unpacked_lun) | 550 | struct se_lun *lun) |
1190 | { | 551 | { |
1191 | struct se_lun *lun; | ||
1192 | int rc; | 552 | int rc; |
1193 | 553 | ||
1194 | lun = core_tpg_alloc_lun(tpg, unpacked_lun); | ||
1195 | if (IS_ERR(lun)) | ||
1196 | return lun; | ||
1197 | |||
1198 | rc = core_tpg_add_lun(tpg, lun, | 554 | rc = core_tpg_add_lun(tpg, lun, |
1199 | TRANSPORT_LUNFLAGS_READ_WRITE, dev); | 555 | TRANSPORT_LUNFLAGS_READ_WRITE, dev); |
1200 | if (rc < 0) | 556 | if (rc < 0) |
1201 | return ERR_PTR(rc); | 557 | return rc; |
1202 | 558 | ||
1203 | pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" | 559 | pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from" |
1204 | " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(), | 560 | " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(), |
1205 | tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, | 561 | tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, |
1206 | tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id); | 562 | tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id); |
@@ -1210,20 +566,19 @@ struct se_lun *core_dev_add_lun( | |||
1210 | */ | 566 | */ |
1211 | if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { | 567 | if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { |
1212 | struct se_node_acl *acl; | 568 | struct se_node_acl *acl; |
1213 | spin_lock_irq(&tpg->acl_node_lock); | 569 | |
570 | mutex_lock(&tpg->acl_node_mutex); | ||
1214 | list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { | 571 | list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { |
1215 | if (acl->dynamic_node_acl && | 572 | if (acl->dynamic_node_acl && |
1216 | (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only || | 573 | (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only || |
1217 | !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) { | 574 | !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) { |
1218 | spin_unlock_irq(&tpg->acl_node_lock); | 575 | core_tpg_add_node_to_devs(acl, tpg, lun); |
1219 | core_tpg_add_node_to_devs(acl, tpg); | ||
1220 | spin_lock_irq(&tpg->acl_node_lock); | ||
1221 | } | 576 | } |
1222 | } | 577 | } |
1223 | spin_unlock_irq(&tpg->acl_node_lock); | 578 | mutex_unlock(&tpg->acl_node_mutex); |
1224 | } | 579 | } |
1225 | 580 | ||
1226 | return lun; | 581 | return 0; |
1227 | } | 582 | } |
1228 | 583 | ||
1229 | /* core_dev_del_lun(): | 584 | /* core_dev_del_lun(): |
@@ -1234,7 +589,7 @@ void core_dev_del_lun( | |||
1234 | struct se_portal_group *tpg, | 589 | struct se_portal_group *tpg, |
1235 | struct se_lun *lun) | 590 | struct se_lun *lun) |
1236 | { | 591 | { |
1237 | pr_debug("%s_TPG[%u]_LUN[%u] - Deactivating %s Logical Unit from" | 592 | pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from" |
1238 | " device object\n", tpg->se_tpg_tfo->get_fabric_name(), | 593 | " device object\n", tpg->se_tpg_tfo->get_fabric_name(), |
1239 | tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, | 594 | tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, |
1240 | tpg->se_tpg_tfo->get_fabric_name()); | 595 | tpg->se_tpg_tfo->get_fabric_name()); |
@@ -1242,72 +597,10 @@ void core_dev_del_lun( | |||
1242 | core_tpg_remove_lun(tpg, lun); | 597 | core_tpg_remove_lun(tpg, lun); |
1243 | } | 598 | } |
1244 | 599 | ||
1245 | struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun) | ||
1246 | { | ||
1247 | struct se_lun *lun; | ||
1248 | |||
1249 | spin_lock(&tpg->tpg_lun_lock); | ||
1250 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { | ||
1251 | pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS" | ||
1252 | "_PER_TPG-1: %u for Target Portal Group: %hu\n", | ||
1253 | tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, | ||
1254 | TRANSPORT_MAX_LUNS_PER_TPG-1, | ||
1255 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); | ||
1256 | spin_unlock(&tpg->tpg_lun_lock); | ||
1257 | return NULL; | ||
1258 | } | ||
1259 | lun = tpg->tpg_lun_list[unpacked_lun]; | ||
1260 | |||
1261 | if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) { | ||
1262 | pr_err("%s Logical Unit Number: %u is not free on" | ||
1263 | " Target Portal Group: %hu, ignoring request.\n", | ||
1264 | tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, | ||
1265 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); | ||
1266 | spin_unlock(&tpg->tpg_lun_lock); | ||
1267 | return NULL; | ||
1268 | } | ||
1269 | spin_unlock(&tpg->tpg_lun_lock); | ||
1270 | |||
1271 | return lun; | ||
1272 | } | ||
1273 | |||
1274 | /* core_dev_get_lun(): | ||
1275 | * | ||
1276 | * | ||
1277 | */ | ||
1278 | static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun) | ||
1279 | { | ||
1280 | struct se_lun *lun; | ||
1281 | |||
1282 | spin_lock(&tpg->tpg_lun_lock); | ||
1283 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { | ||
1284 | pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER" | ||
1285 | "_TPG-1: %u for Target Portal Group: %hu\n", | ||
1286 | tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, | ||
1287 | TRANSPORT_MAX_LUNS_PER_TPG-1, | ||
1288 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); | ||
1289 | spin_unlock(&tpg->tpg_lun_lock); | ||
1290 | return NULL; | ||
1291 | } | ||
1292 | lun = tpg->tpg_lun_list[unpacked_lun]; | ||
1293 | |||
1294 | if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { | ||
1295 | pr_err("%s Logical Unit Number: %u is not active on" | ||
1296 | " Target Portal Group: %hu, ignoring request.\n", | ||
1297 | tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, | ||
1298 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); | ||
1299 | spin_unlock(&tpg->tpg_lun_lock); | ||
1300 | return NULL; | ||
1301 | } | ||
1302 | spin_unlock(&tpg->tpg_lun_lock); | ||
1303 | |||
1304 | return lun; | ||
1305 | } | ||
1306 | |||
1307 | struct se_lun_acl *core_dev_init_initiator_node_lun_acl( | 600 | struct se_lun_acl *core_dev_init_initiator_node_lun_acl( |
1308 | struct se_portal_group *tpg, | 601 | struct se_portal_group *tpg, |
1309 | struct se_node_acl *nacl, | 602 | struct se_node_acl *nacl, |
1310 | u32 mapped_lun, | 603 | u64 mapped_lun, |
1311 | int *ret) | 604 | int *ret) |
1312 | { | 605 | { |
1313 | struct se_lun_acl *lacl; | 606 | struct se_lun_acl *lacl; |
@@ -1325,7 +618,6 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl( | |||
1325 | return NULL; | 618 | return NULL; |
1326 | } | 619 | } |
1327 | 620 | ||
1328 | INIT_LIST_HEAD(&lacl->lacl_list); | ||
1329 | lacl->mapped_lun = mapped_lun; | 621 | lacl->mapped_lun = mapped_lun; |
1330 | lacl->se_lun_nacl = nacl; | 622 | lacl->se_lun_nacl = nacl; |
1331 | snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", | 623 | snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", |
@@ -1337,22 +629,16 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl( | |||
1337 | int core_dev_add_initiator_node_lun_acl( | 629 | int core_dev_add_initiator_node_lun_acl( |
1338 | struct se_portal_group *tpg, | 630 | struct se_portal_group *tpg, |
1339 | struct se_lun_acl *lacl, | 631 | struct se_lun_acl *lacl, |
1340 | u32 unpacked_lun, | 632 | struct se_lun *lun, |
1341 | u32 lun_access) | 633 | u32 lun_access) |
1342 | { | 634 | { |
1343 | struct se_lun *lun; | 635 | struct se_node_acl *nacl = lacl->se_lun_nacl; |
1344 | struct se_node_acl *nacl; | 636 | /* |
1345 | 637 | * rcu_dereference_raw protected by se_lun->lun_group symlink | |
1346 | lun = core_dev_get_lun(tpg, unpacked_lun); | 638 | * reference to se_device->dev_group. |
1347 | if (!lun) { | 639 | */ |
1348 | pr_err("%s Logical Unit Number: %u is not active on" | 640 | struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); |
1349 | " Target Portal Group: %hu, ignoring request.\n", | ||
1350 | tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, | ||
1351 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); | ||
1352 | return -EINVAL; | ||
1353 | } | ||
1354 | 641 | ||
1355 | nacl = lacl->se_lun_nacl; | ||
1356 | if (!nacl) | 642 | if (!nacl) |
1357 | return -EINVAL; | 643 | return -EINVAL; |
1358 | 644 | ||
@@ -1366,52 +652,40 @@ int core_dev_add_initiator_node_lun_acl( | |||
1366 | lun_access, nacl, tpg) < 0) | 652 | lun_access, nacl, tpg) < 0) |
1367 | return -EINVAL; | 653 | return -EINVAL; |
1368 | 654 | ||
1369 | spin_lock(&lun->lun_acl_lock); | 655 | pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for " |
1370 | list_add_tail(&lacl->lacl_list, &lun->lun_acl_list); | ||
1371 | atomic_inc_mb(&lun->lun_acl_count); | ||
1372 | spin_unlock(&lun->lun_acl_lock); | ||
1373 | |||
1374 | pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " | ||
1375 | " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(), | 656 | " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(), |
1376 | tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun, | 657 | tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun, |
1377 | (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO", | 658 | (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO", |
1378 | lacl->initiatorname); | 659 | lacl->initiatorname); |
1379 | /* | 660 | /* |
1380 | * Check to see if there are any existing persistent reservation APTPL | 661 | * Check to see if there are any existing persistent reservation APTPL |
1381 | * pre-registrations that need to be enabled for this LUN ACL.. | 662 | * pre-registrations that need to be enabled for this LUN ACL.. |
1382 | */ | 663 | */ |
1383 | core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, nacl, | 664 | core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl, |
1384 | lacl->mapped_lun); | 665 | lacl->mapped_lun); |
1385 | return 0; | 666 | return 0; |
1386 | } | 667 | } |
1387 | 668 | ||
1388 | /* core_dev_del_initiator_node_lun_acl(): | ||
1389 | * | ||
1390 | * | ||
1391 | */ | ||
1392 | int core_dev_del_initiator_node_lun_acl( | 669 | int core_dev_del_initiator_node_lun_acl( |
1393 | struct se_portal_group *tpg, | ||
1394 | struct se_lun *lun, | 670 | struct se_lun *lun, |
1395 | struct se_lun_acl *lacl) | 671 | struct se_lun_acl *lacl) |
1396 | { | 672 | { |
673 | struct se_portal_group *tpg = lun->lun_tpg; | ||
1397 | struct se_node_acl *nacl; | 674 | struct se_node_acl *nacl; |
675 | struct se_dev_entry *deve; | ||
1398 | 676 | ||
1399 | nacl = lacl->se_lun_nacl; | 677 | nacl = lacl->se_lun_nacl; |
1400 | if (!nacl) | 678 | if (!nacl) |
1401 | return -EINVAL; | 679 | return -EINVAL; |
1402 | 680 | ||
1403 | spin_lock(&lun->lun_acl_lock); | 681 | mutex_lock(&nacl->lun_entry_mutex); |
1404 | list_del(&lacl->lacl_list); | 682 | deve = target_nacl_find_deve(nacl, lacl->mapped_lun); |
1405 | atomic_dec_mb(&lun->lun_acl_count); | 683 | if (deve) |
1406 | spin_unlock(&lun->lun_acl_lock); | 684 | core_disable_device_list_for_node(lun, deve, nacl, tpg); |
1407 | 685 | mutex_unlock(&nacl->lun_entry_mutex); | |
1408 | core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun, | ||
1409 | TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg); | ||
1410 | |||
1411 | lacl->se_lun = NULL; | ||
1412 | 686 | ||
1413 | pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for" | 687 | pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for" |
1414 | " InitiatorNode: %s Mapped LUN: %u\n", | 688 | " InitiatorNode: %s Mapped LUN: %llu\n", |
1415 | tpg->se_tpg_tfo->get_fabric_name(), | 689 | tpg->se_tpg_tfo->get_fabric_name(), |
1416 | tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, | 690 | tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, |
1417 | lacl->initiatorname, lacl->mapped_lun); | 691 | lacl->initiatorname, lacl->mapped_lun); |
@@ -1424,7 +698,7 @@ void core_dev_free_initiator_node_lun_acl( | |||
1424 | struct se_lun_acl *lacl) | 698 | struct se_lun_acl *lacl) |
1425 | { | 699 | { |
1426 | pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" | 700 | pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" |
1427 | " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(), | 701 | " Mapped LUN: %llu\n", tpg->se_tpg_tfo->get_fabric_name(), |
1428 | tpg->se_tpg_tfo->tpg_get_tag(tpg), | 702 | tpg->se_tpg_tfo->tpg_get_tag(tpg), |
1429 | tpg->se_tpg_tfo->get_fabric_name(), | 703 | tpg->se_tpg_tfo->get_fabric_name(), |
1430 | lacl->initiatorname, lacl->mapped_lun); | 704 | lacl->initiatorname, lacl->mapped_lun); |
@@ -1473,14 +747,15 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) | |||
1473 | struct se_device *dev; | 747 | struct se_device *dev; |
1474 | struct se_lun *xcopy_lun; | 748 | struct se_lun *xcopy_lun; |
1475 | 749 | ||
1476 | dev = hba->transport->alloc_device(hba, name); | 750 | dev = hba->backend->ops->alloc_device(hba, name); |
1477 | if (!dev) | 751 | if (!dev) |
1478 | return NULL; | 752 | return NULL; |
1479 | 753 | ||
1480 | dev->dev_link_magic = SE_DEV_LINK_MAGIC; | 754 | dev->dev_link_magic = SE_DEV_LINK_MAGIC; |
1481 | dev->se_hba = hba; | 755 | dev->se_hba = hba; |
1482 | dev->transport = hba->transport; | 756 | dev->transport = hba->backend->ops; |
1483 | dev->prot_length = sizeof(struct se_dif_v1_tuple); | 757 | dev->prot_length = sizeof(struct se_dif_v1_tuple); |
758 | dev->hba_index = hba->hba_index; | ||
1484 | 759 | ||
1485 | INIT_LIST_HEAD(&dev->dev_list); | 760 | INIT_LIST_HEAD(&dev->dev_list); |
1486 | INIT_LIST_HEAD(&dev->dev_sep_list); | 761 | INIT_LIST_HEAD(&dev->dev_sep_list); |
@@ -1513,9 +788,9 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) | |||
1513 | 788 | ||
1514 | dev->dev_attrib.da_dev = dev; | 789 | dev->dev_attrib.da_dev = dev; |
1515 | dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS; | 790 | dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS; |
1516 | dev->dev_attrib.emulate_dpo = DA_EMULATE_DPO; | 791 | dev->dev_attrib.emulate_dpo = 1; |
1517 | dev->dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE; | 792 | dev->dev_attrib.emulate_fua_write = 1; |
1518 | dev->dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ; | 793 | dev->dev_attrib.emulate_fua_read = 1; |
1519 | dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE; | 794 | dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE; |
1520 | dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL; | 795 | dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL; |
1521 | dev->dev_attrib.emulate_tas = DA_EMULATE_TAS; | 796 | dev->dev_attrib.emulate_tas = DA_EMULATE_TAS; |
@@ -1537,12 +812,12 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) | |||
1537 | dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN; | 812 | dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN; |
1538 | 813 | ||
1539 | xcopy_lun = &dev->xcopy_lun; | 814 | xcopy_lun = &dev->xcopy_lun; |
1540 | xcopy_lun->lun_se_dev = dev; | 815 | rcu_assign_pointer(xcopy_lun->lun_se_dev, dev); |
1541 | init_completion(&xcopy_lun->lun_shutdown_comp); | ||
1542 | INIT_LIST_HEAD(&xcopy_lun->lun_acl_list); | ||
1543 | spin_lock_init(&xcopy_lun->lun_acl_lock); | ||
1544 | spin_lock_init(&xcopy_lun->lun_sep_lock); | ||
1545 | init_completion(&xcopy_lun->lun_ref_comp); | 816 | init_completion(&xcopy_lun->lun_ref_comp); |
817 | INIT_LIST_HEAD(&xcopy_lun->lun_deve_list); | ||
818 | INIT_LIST_HEAD(&xcopy_lun->lun_dev_link); | ||
819 | mutex_init(&xcopy_lun->lun_tg_pt_md_mutex); | ||
820 | xcopy_lun->lun_tpg = &xcopy_pt_tpg; | ||
1546 | 821 | ||
1547 | return dev; | 822 | return dev; |
1548 | } | 823 | } |
@@ -1679,7 +954,7 @@ int core_dev_setup_virtual_lun0(void) | |||
1679 | goto out_free_hba; | 954 | goto out_free_hba; |
1680 | } | 955 | } |
1681 | 956 | ||
1682 | hba->transport->set_configfs_dev_params(dev, buf, sizeof(buf)); | 957 | hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf)); |
1683 | 958 | ||
1684 | ret = target_configure_device(dev); | 959 | ret = target_configure_device(dev); |
1685 | if (ret) | 960 | if (ret) |
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index 1f7886bb16bf..48a36989c1a6 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c | |||
@@ -36,7 +36,6 @@ | |||
36 | #include <target/target_core_base.h> | 36 | #include <target/target_core_base.h> |
37 | #include <target/target_core_fabric.h> | 37 | #include <target/target_core_fabric.h> |
38 | #include <target/target_core_fabric_configfs.h> | 38 | #include <target/target_core_fabric_configfs.h> |
39 | #include <target/target_core_configfs.h> | ||
40 | #include <target/configfs_macros.h> | 39 | #include <target/configfs_macros.h> |
41 | 40 | ||
42 | #include "target_core_internal.h" | 41 | #include "target_core_internal.h" |
@@ -46,27 +45,25 @@ | |||
46 | #define TF_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \ | 45 | #define TF_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \ |
47 | static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \ | 46 | static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \ |
48 | { \ | 47 | { \ |
49 | struct target_fabric_configfs_template *tfc = &tf->tf_cit_tmpl; \ | 48 | struct config_item_type *cit = &tf->tf_##_name##_cit; \ |
50 | struct config_item_type *cit = &tfc->tfc_##_name##_cit; \ | ||
51 | \ | 49 | \ |
52 | cit->ct_item_ops = _item_ops; \ | 50 | cit->ct_item_ops = _item_ops; \ |
53 | cit->ct_group_ops = _group_ops; \ | 51 | cit->ct_group_ops = _group_ops; \ |
54 | cit->ct_attrs = _attrs; \ | 52 | cit->ct_attrs = _attrs; \ |
55 | cit->ct_owner = tf->tf_module; \ | 53 | cit->ct_owner = tf->tf_ops->module; \ |
56 | pr_debug("Setup generic %s\n", __stringify(_name)); \ | 54 | pr_debug("Setup generic %s\n", __stringify(_name)); \ |
57 | } | 55 | } |
58 | 56 | ||
59 | #define TF_CIT_SETUP_DRV(_name, _item_ops, _group_ops) \ | 57 | #define TF_CIT_SETUP_DRV(_name, _item_ops, _group_ops) \ |
60 | static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \ | 58 | static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \ |
61 | { \ | 59 | { \ |
62 | struct target_fabric_configfs_template *tfc = &tf->tf_cit_tmpl; \ | 60 | struct config_item_type *cit = &tf->tf_##_name##_cit; \ |
63 | struct config_item_type *cit = &tfc->tfc_##_name##_cit; \ | 61 | struct configfs_attribute **attrs = tf->tf_ops->tfc_##_name##_attrs; \ |
64 | struct configfs_attribute **attrs = tf->tf_ops.tfc_##_name##_attrs; \ | ||
65 | \ | 62 | \ |
66 | cit->ct_item_ops = _item_ops; \ | 63 | cit->ct_item_ops = _item_ops; \ |
67 | cit->ct_group_ops = _group_ops; \ | 64 | cit->ct_group_ops = _group_ops; \ |
68 | cit->ct_attrs = attrs; \ | 65 | cit->ct_attrs = attrs; \ |
69 | cit->ct_owner = tf->tf_module; \ | 66 | cit->ct_owner = tf->tf_ops->module; \ |
70 | pr_debug("Setup generic %s\n", __stringify(_name)); \ | 67 | pr_debug("Setup generic %s\n", __stringify(_name)); \ |
71 | } | 68 | } |
72 | 69 | ||
@@ -83,7 +80,7 @@ static int target_fabric_mappedlun_link( | |||
83 | struct se_lun_acl, se_lun_group); | 80 | struct se_lun_acl, se_lun_group); |
84 | struct se_portal_group *se_tpg; | 81 | struct se_portal_group *se_tpg; |
85 | struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s; | 82 | struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s; |
86 | int ret = 0, lun_access; | 83 | int lun_access; |
87 | 84 | ||
88 | if (lun->lun_link_magic != SE_LUN_LINK_MAGIC) { | 85 | if (lun->lun_link_magic != SE_LUN_LINK_MAGIC) { |
89 | pr_err("Bad lun->lun_link_magic, not a valid lun_ci pointer:" | 86 | pr_err("Bad lun->lun_link_magic, not a valid lun_ci pointer:" |
@@ -93,12 +90,11 @@ static int target_fabric_mappedlun_link( | |||
93 | /* | 90 | /* |
94 | * Ensure that the source port exists | 91 | * Ensure that the source port exists |
95 | */ | 92 | */ |
96 | if (!lun->lun_sep || !lun->lun_sep->sep_tpg) { | 93 | if (!lun->lun_se_dev) { |
97 | pr_err("Source se_lun->lun_sep or lun->lun_sep->sep" | 94 | pr_err("Source se_lun->lun_se_dev does not exist\n"); |
98 | "_tpg does not exist\n"); | ||
99 | return -EINVAL; | 95 | return -EINVAL; |
100 | } | 96 | } |
101 | se_tpg = lun->lun_sep->sep_tpg; | 97 | se_tpg = lun->lun_tpg; |
102 | 98 | ||
103 | nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item; | 99 | nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item; |
104 | tpg_ci = &nacl_ci->ci_group->cg_item; | 100 | tpg_ci = &nacl_ci->ci_group->cg_item; |
@@ -125,49 +121,35 @@ static int target_fabric_mappedlun_link( | |||
125 | * which be will write protected (READ-ONLY) when | 121 | * which be will write protected (READ-ONLY) when |
126 | * tpg_1/attrib/demo_mode_write_protect=1 | 122 | * tpg_1/attrib/demo_mode_write_protect=1 |
127 | */ | 123 | */ |
128 | spin_lock_irq(&lacl->se_lun_nacl->device_list_lock); | 124 | rcu_read_lock(); |
129 | deve = lacl->se_lun_nacl->device_list[lacl->mapped_lun]; | 125 | deve = target_nacl_find_deve(lacl->se_lun_nacl, lacl->mapped_lun); |
130 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) | 126 | if (deve) |
131 | lun_access = deve->lun_flags; | 127 | lun_access = deve->lun_flags; |
132 | else | 128 | else |
133 | lun_access = | 129 | lun_access = |
134 | (se_tpg->se_tpg_tfo->tpg_check_prod_mode_write_protect( | 130 | (se_tpg->se_tpg_tfo->tpg_check_prod_mode_write_protect( |
135 | se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY : | 131 | se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY : |
136 | TRANSPORT_LUNFLAGS_READ_WRITE; | 132 | TRANSPORT_LUNFLAGS_READ_WRITE; |
137 | spin_unlock_irq(&lacl->se_lun_nacl->device_list_lock); | 133 | rcu_read_unlock(); |
138 | /* | 134 | /* |
139 | * Determine the actual mapped LUN value user wants.. | 135 | * Determine the actual mapped LUN value user wants.. |
140 | * | 136 | * |
141 | * This value is what the SCSI Initiator actually sees the | 137 | * This value is what the SCSI Initiator actually sees the |
142 | * iscsi/$IQN/$TPGT/lun/lun_* as on their SCSI Initiator Ports. | 138 | * $FABRIC/$WWPN/$TPGT/lun/lun_* as on their SCSI Initiator Ports. |
143 | */ | 139 | */ |
144 | ret = core_dev_add_initiator_node_lun_acl(se_tpg, lacl, | 140 | return core_dev_add_initiator_node_lun_acl(se_tpg, lacl, lun, lun_access); |
145 | lun->unpacked_lun, lun_access); | ||
146 | |||
147 | return (ret < 0) ? -EINVAL : 0; | ||
148 | } | 141 | } |
149 | 142 | ||
150 | static int target_fabric_mappedlun_unlink( | 143 | static int target_fabric_mappedlun_unlink( |
151 | struct config_item *lun_acl_ci, | 144 | struct config_item *lun_acl_ci, |
152 | struct config_item *lun_ci) | 145 | struct config_item *lun_ci) |
153 | { | 146 | { |
154 | struct se_lun *lun; | ||
155 | struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci), | 147 | struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci), |
156 | struct se_lun_acl, se_lun_group); | 148 | struct se_lun_acl, se_lun_group); |
157 | struct se_node_acl *nacl = lacl->se_lun_nacl; | 149 | struct se_lun *lun = container_of(to_config_group(lun_ci), |
158 | struct se_dev_entry *deve = nacl->device_list[lacl->mapped_lun]; | 150 | struct se_lun, lun_group); |
159 | struct se_portal_group *se_tpg; | ||
160 | /* | ||
161 | * Determine if the underlying MappedLUN has already been released.. | ||
162 | */ | ||
163 | if (!deve->se_lun) | ||
164 | return 0; | ||
165 | |||
166 | lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group); | ||
167 | se_tpg = lun->lun_sep->sep_tpg; | ||
168 | 151 | ||
169 | core_dev_del_initiator_node_lun_acl(se_tpg, lun, lacl); | 152 | return core_dev_del_initiator_node_lun_acl(lun, lacl); |
170 | return 0; | ||
171 | } | 153 | } |
172 | 154 | ||
173 | CONFIGFS_EATTR_STRUCT(target_fabric_mappedlun, se_lun_acl); | 155 | CONFIGFS_EATTR_STRUCT(target_fabric_mappedlun, se_lun_acl); |
@@ -183,14 +165,15 @@ static ssize_t target_fabric_mappedlun_show_write_protect( | |||
183 | { | 165 | { |
184 | struct se_node_acl *se_nacl = lacl->se_lun_nacl; | 166 | struct se_node_acl *se_nacl = lacl->se_lun_nacl; |
185 | struct se_dev_entry *deve; | 167 | struct se_dev_entry *deve; |
186 | ssize_t len; | 168 | ssize_t len = 0; |
187 | 169 | ||
188 | spin_lock_irq(&se_nacl->device_list_lock); | 170 | rcu_read_lock(); |
189 | deve = se_nacl->device_list[lacl->mapped_lun]; | 171 | deve = target_nacl_find_deve(se_nacl, lacl->mapped_lun); |
190 | len = sprintf(page, "%d\n", | 172 | if (deve) { |
191 | (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) ? | 173 | len = sprintf(page, "%d\n", |
192 | 1 : 0); | 174 | (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) ? 1 : 0); |
193 | spin_unlock_irq(&se_nacl->device_list_lock); | 175 | } |
176 | rcu_read_unlock(); | ||
194 | 177 | ||
195 | return len; | 178 | return len; |
196 | } | 179 | } |
@@ -218,7 +201,7 @@ static ssize_t target_fabric_mappedlun_store_write_protect( | |||
218 | lacl->se_lun_nacl); | 201 | lacl->se_lun_nacl); |
219 | 202 | ||
220 | pr_debug("%s_ConfigFS: Changed Initiator ACL: %s" | 203 | pr_debug("%s_ConfigFS: Changed Initiator ACL: %s" |
221 | " Mapped LUN: %u Write Protect bit to %s\n", | 204 | " Mapped LUN: %llu Write Protect bit to %s\n", |
222 | se_tpg->se_tpg_tfo->get_fabric_name(), | 205 | se_tpg->se_tpg_tfo->get_fabric_name(), |
223 | lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF"); | 206 | lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF"); |
224 | 207 | ||
@@ -338,7 +321,7 @@ static struct config_group *target_fabric_make_mappedlun( | |||
338 | struct config_item *acl_ci; | 321 | struct config_item *acl_ci; |
339 | struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL; | 322 | struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL; |
340 | char *buf; | 323 | char *buf; |
341 | unsigned long mapped_lun; | 324 | unsigned long long mapped_lun; |
342 | int ret = 0; | 325 | int ret = 0; |
343 | 326 | ||
344 | acl_ci = &group->cg_item; | 327 | acl_ci = &group->cg_item; |
@@ -366,21 +349,9 @@ static struct config_group *target_fabric_make_mappedlun( | |||
366 | * Determine the Mapped LUN value. This is what the SCSI Initiator | 349 | * Determine the Mapped LUN value. This is what the SCSI Initiator |
367 | * Port will actually see. | 350 | * Port will actually see. |
368 | */ | 351 | */ |
369 | ret = kstrtoul(buf + 4, 0, &mapped_lun); | 352 | ret = kstrtoull(buf + 4, 0, &mapped_lun); |
370 | if (ret) | 353 | if (ret) |
371 | goto out; | 354 | goto out; |
372 | if (mapped_lun > UINT_MAX) { | ||
373 | ret = -EINVAL; | ||
374 | goto out; | ||
375 | } | ||
376 | if (mapped_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { | ||
377 | pr_err("Mapped LUN: %lu exceeds TRANSPORT_MAX_LUNS_PER_TPG" | ||
378 | "-1: %u for Target Portal Group: %u\n", mapped_lun, | ||
379 | TRANSPORT_MAX_LUNS_PER_TPG-1, | ||
380 | se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); | ||
381 | ret = -EINVAL; | ||
382 | goto out; | ||
383 | } | ||
384 | 355 | ||
385 | lacl = core_dev_init_initiator_node_lun_acl(se_tpg, se_nacl, | 356 | lacl = core_dev_init_initiator_node_lun_acl(se_tpg, se_nacl, |
386 | mapped_lun, &ret); | 357 | mapped_lun, &ret); |
@@ -399,9 +370,9 @@ static struct config_group *target_fabric_make_mappedlun( | |||
399 | } | 370 | } |
400 | 371 | ||
401 | config_group_init_type_name(&lacl->se_lun_group, name, | 372 | config_group_init_type_name(&lacl->se_lun_group, name, |
402 | &tf->tf_cit_tmpl.tfc_tpg_mappedlun_cit); | 373 | &tf->tf_tpg_mappedlun_cit); |
403 | config_group_init_type_name(&lacl->ml_stat_grps.stat_group, | 374 | config_group_init_type_name(&lacl->ml_stat_grps.stat_group, |
404 | "statistics", &tf->tf_cit_tmpl.tfc_tpg_mappedlun_stat_cit); | 375 | "statistics", &tf->tf_tpg_mappedlun_stat_cit); |
405 | lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group; | 376 | lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group; |
406 | lacl_cg->default_groups[1] = NULL; | 377 | lacl_cg->default_groups[1] = NULL; |
407 | 378 | ||
@@ -458,10 +429,11 @@ static void target_fabric_nacl_base_release(struct config_item *item) | |||
458 | { | 429 | { |
459 | struct se_node_acl *se_nacl = container_of(to_config_group(item), | 430 | struct se_node_acl *se_nacl = container_of(to_config_group(item), |
460 | struct se_node_acl, acl_group); | 431 | struct se_node_acl, acl_group); |
461 | struct se_portal_group *se_tpg = se_nacl->se_tpg; | 432 | struct target_fabric_configfs *tf = se_nacl->se_tpg->se_tpg_wwn->wwn_tf; |
462 | struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; | ||
463 | 433 | ||
464 | tf->tf_ops.fabric_drop_nodeacl(se_nacl); | 434 | if (tf->tf_ops->fabric_cleanup_nodeacl) |
435 | tf->tf_ops->fabric_cleanup_nodeacl(se_nacl); | ||
436 | core_tpg_del_initiator_node_acl(se_nacl); | ||
465 | } | 437 | } |
466 | 438 | ||
467 | static struct configfs_item_operations target_fabric_nacl_base_item_ops = { | 439 | static struct configfs_item_operations target_fabric_nacl_base_item_ops = { |
@@ -501,15 +473,18 @@ static struct config_group *target_fabric_make_nodeacl( | |||
501 | struct se_node_acl *se_nacl; | 473 | struct se_node_acl *se_nacl; |
502 | struct config_group *nacl_cg; | 474 | struct config_group *nacl_cg; |
503 | 475 | ||
504 | if (!tf->tf_ops.fabric_make_nodeacl) { | 476 | se_nacl = core_tpg_add_initiator_node_acl(se_tpg, name); |
505 | pr_err("tf->tf_ops.fabric_make_nodeacl is NULL\n"); | ||
506 | return ERR_PTR(-ENOSYS); | ||
507 | } | ||
508 | |||
509 | se_nacl = tf->tf_ops.fabric_make_nodeacl(se_tpg, group, name); | ||
510 | if (IS_ERR(se_nacl)) | 477 | if (IS_ERR(se_nacl)) |
511 | return ERR_CAST(se_nacl); | 478 | return ERR_CAST(se_nacl); |
512 | 479 | ||
480 | if (tf->tf_ops->fabric_init_nodeacl) { | ||
481 | int ret = tf->tf_ops->fabric_init_nodeacl(se_nacl, name); | ||
482 | if (ret) { | ||
483 | core_tpg_del_initiator_node_acl(se_nacl); | ||
484 | return ERR_PTR(ret); | ||
485 | } | ||
486 | } | ||
487 | |||
513 | nacl_cg = &se_nacl->acl_group; | 488 | nacl_cg = &se_nacl->acl_group; |
514 | nacl_cg->default_groups = se_nacl->acl_default_groups; | 489 | nacl_cg->default_groups = se_nacl->acl_default_groups; |
515 | nacl_cg->default_groups[0] = &se_nacl->acl_attrib_group; | 490 | nacl_cg->default_groups[0] = &se_nacl->acl_attrib_group; |
@@ -519,16 +494,15 @@ static struct config_group *target_fabric_make_nodeacl( | |||
519 | nacl_cg->default_groups[4] = NULL; | 494 | nacl_cg->default_groups[4] = NULL; |
520 | 495 | ||
521 | config_group_init_type_name(&se_nacl->acl_group, name, | 496 | config_group_init_type_name(&se_nacl->acl_group, name, |
522 | &tf->tf_cit_tmpl.tfc_tpg_nacl_base_cit); | 497 | &tf->tf_tpg_nacl_base_cit); |
523 | config_group_init_type_name(&se_nacl->acl_attrib_group, "attrib", | 498 | config_group_init_type_name(&se_nacl->acl_attrib_group, "attrib", |
524 | &tf->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit); | 499 | &tf->tf_tpg_nacl_attrib_cit); |
525 | config_group_init_type_name(&se_nacl->acl_auth_group, "auth", | 500 | config_group_init_type_name(&se_nacl->acl_auth_group, "auth", |
526 | &tf->tf_cit_tmpl.tfc_tpg_nacl_auth_cit); | 501 | &tf->tf_tpg_nacl_auth_cit); |
527 | config_group_init_type_name(&se_nacl->acl_param_group, "param", | 502 | config_group_init_type_name(&se_nacl->acl_param_group, "param", |
528 | &tf->tf_cit_tmpl.tfc_tpg_nacl_param_cit); | 503 | &tf->tf_tpg_nacl_param_cit); |
529 | config_group_init_type_name(&se_nacl->acl_fabric_stat_group, | 504 | config_group_init_type_name(&se_nacl->acl_fabric_stat_group, |
530 | "fabric_statistics", | 505 | "fabric_statistics", &tf->tf_tpg_nacl_stat_cit); |
531 | &tf->tf_cit_tmpl.tfc_tpg_nacl_stat_cit); | ||
532 | 506 | ||
533 | return &se_nacl->acl_group; | 507 | return &se_nacl->acl_group; |
534 | } | 508 | } |
@@ -575,7 +549,7 @@ static void target_fabric_np_base_release(struct config_item *item) | |||
575 | struct se_portal_group *se_tpg = se_tpg_np->tpg_np_parent; | 549 | struct se_portal_group *se_tpg = se_tpg_np->tpg_np_parent; |
576 | struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; | 550 | struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; |
577 | 551 | ||
578 | tf->tf_ops.fabric_drop_np(se_tpg_np); | 552 | tf->tf_ops->fabric_drop_np(se_tpg_np); |
579 | } | 553 | } |
580 | 554 | ||
581 | static struct configfs_item_operations target_fabric_np_base_item_ops = { | 555 | static struct configfs_item_operations target_fabric_np_base_item_ops = { |
@@ -599,18 +573,18 @@ static struct config_group *target_fabric_make_np( | |||
599 | struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; | 573 | struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; |
600 | struct se_tpg_np *se_tpg_np; | 574 | struct se_tpg_np *se_tpg_np; |
601 | 575 | ||
602 | if (!tf->tf_ops.fabric_make_np) { | 576 | if (!tf->tf_ops->fabric_make_np) { |
603 | pr_err("tf->tf_ops.fabric_make_np is NULL\n"); | 577 | pr_err("tf->tf_ops.fabric_make_np is NULL\n"); |
604 | return ERR_PTR(-ENOSYS); | 578 | return ERR_PTR(-ENOSYS); |
605 | } | 579 | } |
606 | 580 | ||
607 | se_tpg_np = tf->tf_ops.fabric_make_np(se_tpg, group, name); | 581 | se_tpg_np = tf->tf_ops->fabric_make_np(se_tpg, group, name); |
608 | if (!se_tpg_np || IS_ERR(se_tpg_np)) | 582 | if (!se_tpg_np || IS_ERR(se_tpg_np)) |
609 | return ERR_PTR(-EINVAL); | 583 | return ERR_PTR(-EINVAL); |
610 | 584 | ||
611 | se_tpg_np->tpg_np_parent = se_tpg; | 585 | se_tpg_np->tpg_np_parent = se_tpg; |
612 | config_group_init_type_name(&se_tpg_np->tpg_np_group, name, | 586 | config_group_init_type_name(&se_tpg_np->tpg_np_group, name, |
613 | &tf->tf_cit_tmpl.tfc_tpg_np_base_cit); | 587 | &tf->tf_tpg_np_base_cit); |
614 | 588 | ||
615 | return &se_tpg_np->tpg_np_group; | 589 | return &se_tpg_np->tpg_np_group; |
616 | } | 590 | } |
@@ -654,10 +628,10 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_gp( | |||
654 | struct se_lun *lun, | 628 | struct se_lun *lun, |
655 | char *page) | 629 | char *page) |
656 | { | 630 | { |
657 | if (!lun || !lun->lun_sep) | 631 | if (!lun || !lun->lun_se_dev) |
658 | return -ENODEV; | 632 | return -ENODEV; |
659 | 633 | ||
660 | return core_alua_show_tg_pt_gp_info(lun->lun_sep, page); | 634 | return core_alua_show_tg_pt_gp_info(lun, page); |
661 | } | 635 | } |
662 | 636 | ||
663 | static ssize_t target_fabric_port_store_attr_alua_tg_pt_gp( | 637 | static ssize_t target_fabric_port_store_attr_alua_tg_pt_gp( |
@@ -665,10 +639,10 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_gp( | |||
665 | const char *page, | 639 | const char *page, |
666 | size_t count) | 640 | size_t count) |
667 | { | 641 | { |
668 | if (!lun || !lun->lun_sep) | 642 | if (!lun || !lun->lun_se_dev) |
669 | return -ENODEV; | 643 | return -ENODEV; |
670 | 644 | ||
671 | return core_alua_store_tg_pt_gp_info(lun->lun_sep, page, count); | 645 | return core_alua_store_tg_pt_gp_info(lun, page, count); |
672 | } | 646 | } |
673 | 647 | ||
674 | TCM_PORT_ATTR(alua_tg_pt_gp, S_IRUGO | S_IWUSR); | 648 | TCM_PORT_ATTR(alua_tg_pt_gp, S_IRUGO | S_IWUSR); |
@@ -680,7 +654,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_offline( | |||
680 | struct se_lun *lun, | 654 | struct se_lun *lun, |
681 | char *page) | 655 | char *page) |
682 | { | 656 | { |
683 | if (!lun || !lun->lun_sep) | 657 | if (!lun || !lun->lun_se_dev) |
684 | return -ENODEV; | 658 | return -ENODEV; |
685 | 659 | ||
686 | return core_alua_show_offline_bit(lun, page); | 660 | return core_alua_show_offline_bit(lun, page); |
@@ -691,7 +665,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_offline( | |||
691 | const char *page, | 665 | const char *page, |
692 | size_t count) | 666 | size_t count) |
693 | { | 667 | { |
694 | if (!lun || !lun->lun_sep) | 668 | if (!lun || !lun->lun_se_dev) |
695 | return -ENODEV; | 669 | return -ENODEV; |
696 | 670 | ||
697 | return core_alua_store_offline_bit(lun, page, count); | 671 | return core_alua_store_offline_bit(lun, page, count); |
@@ -706,7 +680,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_status( | |||
706 | struct se_lun *lun, | 680 | struct se_lun *lun, |
707 | char *page) | 681 | char *page) |
708 | { | 682 | { |
709 | if (!lun || !lun->lun_sep) | 683 | if (!lun || !lun->lun_se_dev) |
710 | return -ENODEV; | 684 | return -ENODEV; |
711 | 685 | ||
712 | return core_alua_show_secondary_status(lun, page); | 686 | return core_alua_show_secondary_status(lun, page); |
@@ -717,7 +691,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_status( | |||
717 | const char *page, | 691 | const char *page, |
718 | size_t count) | 692 | size_t count) |
719 | { | 693 | { |
720 | if (!lun || !lun->lun_sep) | 694 | if (!lun || !lun->lun_se_dev) |
721 | return -ENODEV; | 695 | return -ENODEV; |
722 | 696 | ||
723 | return core_alua_store_secondary_status(lun, page, count); | 697 | return core_alua_store_secondary_status(lun, page, count); |
@@ -732,7 +706,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_write_md( | |||
732 | struct se_lun *lun, | 706 | struct se_lun *lun, |
733 | char *page) | 707 | char *page) |
734 | { | 708 | { |
735 | if (!lun || !lun->lun_sep) | 709 | if (!lun || !lun->lun_se_dev) |
736 | return -ENODEV; | 710 | return -ENODEV; |
737 | 711 | ||
738 | return core_alua_show_secondary_write_metadata(lun, page); | 712 | return core_alua_show_secondary_write_metadata(lun, page); |
@@ -743,7 +717,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_write_md( | |||
743 | const char *page, | 717 | const char *page, |
744 | size_t count) | 718 | size_t count) |
745 | { | 719 | { |
746 | if (!lun || !lun->lun_sep) | 720 | if (!lun || !lun->lun_se_dev) |
747 | return -ENODEV; | 721 | return -ENODEV; |
748 | 722 | ||
749 | return core_alua_store_secondary_write_metadata(lun, page, count); | 723 | return core_alua_store_secondary_write_metadata(lun, page, count); |
@@ -769,7 +743,6 @@ static int target_fabric_port_link( | |||
769 | struct config_item *tpg_ci; | 743 | struct config_item *tpg_ci; |
770 | struct se_lun *lun = container_of(to_config_group(lun_ci), | 744 | struct se_lun *lun = container_of(to_config_group(lun_ci), |
771 | struct se_lun, lun_group); | 745 | struct se_lun, lun_group); |
772 | struct se_lun *lun_p; | ||
773 | struct se_portal_group *se_tpg; | 746 | struct se_portal_group *se_tpg; |
774 | struct se_device *dev = | 747 | struct se_device *dev = |
775 | container_of(to_config_group(se_dev_ci), struct se_device, dev_group); | 748 | container_of(to_config_group(se_dev_ci), struct se_device, dev_group); |
@@ -797,20 +770,19 @@ static int target_fabric_port_link( | |||
797 | return -EEXIST; | 770 | return -EEXIST; |
798 | } | 771 | } |
799 | 772 | ||
800 | lun_p = core_dev_add_lun(se_tpg, dev, lun->unpacked_lun); | 773 | ret = core_dev_add_lun(se_tpg, dev, lun); |
801 | if (IS_ERR(lun_p)) { | 774 | if (ret) { |
802 | pr_err("core_dev_add_lun() failed\n"); | 775 | pr_err("core_dev_add_lun() failed: %d\n", ret); |
803 | ret = PTR_ERR(lun_p); | ||
804 | goto out; | 776 | goto out; |
805 | } | 777 | } |
806 | 778 | ||
807 | if (tf->tf_ops.fabric_post_link) { | 779 | if (tf->tf_ops->fabric_post_link) { |
808 | /* | 780 | /* |
809 | * Call the optional fabric_post_link() to allow a | 781 | * Call the optional fabric_post_link() to allow a |
810 | * fabric module to setup any additional state once | 782 | * fabric module to setup any additional state once |
811 | * core_dev_add_lun() has been called.. | 783 | * core_dev_add_lun() has been called.. |
812 | */ | 784 | */ |
813 | tf->tf_ops.fabric_post_link(se_tpg, lun); | 785 | tf->tf_ops->fabric_post_link(se_tpg, lun); |
814 | } | 786 | } |
815 | 787 | ||
816 | return 0; | 788 | return 0; |
@@ -824,25 +796,34 @@ static int target_fabric_port_unlink( | |||
824 | { | 796 | { |
825 | struct se_lun *lun = container_of(to_config_group(lun_ci), | 797 | struct se_lun *lun = container_of(to_config_group(lun_ci), |
826 | struct se_lun, lun_group); | 798 | struct se_lun, lun_group); |
827 | struct se_portal_group *se_tpg = lun->lun_sep->sep_tpg; | 799 | struct se_portal_group *se_tpg = lun->lun_tpg; |
828 | struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; | 800 | struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; |
829 | 801 | ||
830 | if (tf->tf_ops.fabric_pre_unlink) { | 802 | if (tf->tf_ops->fabric_pre_unlink) { |
831 | /* | 803 | /* |
832 | * Call the optional fabric_pre_unlink() to allow a | 804 | * Call the optional fabric_pre_unlink() to allow a |
833 | * fabric module to release any additional stat before | 805 | * fabric module to release any additional stat before |
834 | * core_dev_del_lun() is called. | 806 | * core_dev_del_lun() is called. |
835 | */ | 807 | */ |
836 | tf->tf_ops.fabric_pre_unlink(se_tpg, lun); | 808 | tf->tf_ops->fabric_pre_unlink(se_tpg, lun); |
837 | } | 809 | } |
838 | 810 | ||
839 | core_dev_del_lun(se_tpg, lun); | 811 | core_dev_del_lun(se_tpg, lun); |
840 | return 0; | 812 | return 0; |
841 | } | 813 | } |
842 | 814 | ||
815 | static void target_fabric_port_release(struct config_item *item) | ||
816 | { | ||
817 | struct se_lun *lun = container_of(to_config_group(item), | ||
818 | struct se_lun, lun_group); | ||
819 | |||
820 | kfree_rcu(lun, rcu_head); | ||
821 | } | ||
822 | |||
843 | static struct configfs_item_operations target_fabric_port_item_ops = { | 823 | static struct configfs_item_operations target_fabric_port_item_ops = { |
844 | .show_attribute = target_fabric_port_attr_show, | 824 | .show_attribute = target_fabric_port_attr_show, |
845 | .store_attribute = target_fabric_port_attr_store, | 825 | .store_attribute = target_fabric_port_attr_store, |
826 | .release = target_fabric_port_release, | ||
846 | .allow_link = target_fabric_port_link, | 827 | .allow_link = target_fabric_port_link, |
847 | .drop_link = target_fabric_port_unlink, | 828 | .drop_link = target_fabric_port_unlink, |
848 | }; | 829 | }; |
@@ -887,7 +868,7 @@ static struct config_group *target_fabric_make_lun( | |||
887 | struct se_portal_group, tpg_lun_group); | 868 | struct se_portal_group, tpg_lun_group); |
888 | struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; | 869 | struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; |
889 | struct config_group *lun_cg = NULL, *port_stat_grp = NULL; | 870 | struct config_group *lun_cg = NULL, *port_stat_grp = NULL; |
890 | unsigned long unpacked_lun; | 871 | unsigned long long unpacked_lun; |
891 | int errno; | 872 | int errno; |
892 | 873 | ||
893 | if (strstr(name, "lun_") != name) { | 874 | if (strstr(name, "lun_") != name) { |
@@ -895,28 +876,27 @@ static struct config_group *target_fabric_make_lun( | |||
895 | " \"lun_$LUN_NUMBER\"\n"); | 876 | " \"lun_$LUN_NUMBER\"\n"); |
896 | return ERR_PTR(-EINVAL); | 877 | return ERR_PTR(-EINVAL); |
897 | } | 878 | } |
898 | errno = kstrtoul(name + 4, 0, &unpacked_lun); | 879 | errno = kstrtoull(name + 4, 0, &unpacked_lun); |
899 | if (errno) | 880 | if (errno) |
900 | return ERR_PTR(errno); | 881 | return ERR_PTR(errno); |
901 | if (unpacked_lun > UINT_MAX) | ||
902 | return ERR_PTR(-EINVAL); | ||
903 | 882 | ||
904 | lun = core_get_lun_from_tpg(se_tpg, unpacked_lun); | 883 | lun = core_tpg_alloc_lun(se_tpg, unpacked_lun); |
905 | if (!lun) | 884 | if (IS_ERR(lun)) |
906 | return ERR_PTR(-EINVAL); | 885 | return ERR_CAST(lun); |
907 | 886 | ||
908 | lun_cg = &lun->lun_group; | 887 | lun_cg = &lun->lun_group; |
909 | lun_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2, | 888 | lun_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2, |
910 | GFP_KERNEL); | 889 | GFP_KERNEL); |
911 | if (!lun_cg->default_groups) { | 890 | if (!lun_cg->default_groups) { |
912 | pr_err("Unable to allocate lun_cg->default_groups\n"); | 891 | pr_err("Unable to allocate lun_cg->default_groups\n"); |
892 | kfree(lun); | ||
913 | return ERR_PTR(-ENOMEM); | 893 | return ERR_PTR(-ENOMEM); |
914 | } | 894 | } |
915 | 895 | ||
916 | config_group_init_type_name(&lun->lun_group, name, | 896 | config_group_init_type_name(&lun->lun_group, name, |
917 | &tf->tf_cit_tmpl.tfc_tpg_port_cit); | 897 | &tf->tf_tpg_port_cit); |
918 | config_group_init_type_name(&lun->port_stat_grps.stat_group, | 898 | config_group_init_type_name(&lun->port_stat_grps.stat_group, |
919 | "statistics", &tf->tf_cit_tmpl.tfc_tpg_port_stat_cit); | 899 | "statistics", &tf->tf_tpg_port_stat_cit); |
920 | lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group; | 900 | lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group; |
921 | lun_cg->default_groups[1] = NULL; | 901 | lun_cg->default_groups[1] = NULL; |
922 | 902 | ||
@@ -926,6 +906,7 @@ static struct config_group *target_fabric_make_lun( | |||
926 | if (!port_stat_grp->default_groups) { | 906 | if (!port_stat_grp->default_groups) { |
927 | pr_err("Unable to allocate port_stat_grp->default_groups\n"); | 907 | pr_err("Unable to allocate port_stat_grp->default_groups\n"); |
928 | kfree(lun_cg->default_groups); | 908 | kfree(lun_cg->default_groups); |
909 | kfree(lun); | ||
929 | return ERR_PTR(-ENOMEM); | 910 | return ERR_PTR(-ENOMEM); |
930 | } | 911 | } |
931 | target_stat_setup_port_default_groups(lun); | 912 | target_stat_setup_port_default_groups(lun); |
@@ -1023,7 +1004,7 @@ static void target_fabric_tpg_release(struct config_item *item) | |||
1023 | struct se_wwn *wwn = se_tpg->se_tpg_wwn; | 1004 | struct se_wwn *wwn = se_tpg->se_tpg_wwn; |
1024 | struct target_fabric_configfs *tf = wwn->wwn_tf; | 1005 | struct target_fabric_configfs *tf = wwn->wwn_tf; |
1025 | 1006 | ||
1026 | tf->tf_ops.fabric_drop_tpg(se_tpg); | 1007 | tf->tf_ops->fabric_drop_tpg(se_tpg); |
1027 | } | 1008 | } |
1028 | 1009 | ||
1029 | static struct configfs_item_operations target_fabric_tpg_base_item_ops = { | 1010 | static struct configfs_item_operations target_fabric_tpg_base_item_ops = { |
@@ -1046,12 +1027,12 @@ static struct config_group *target_fabric_make_tpg( | |||
1046 | struct target_fabric_configfs *tf = wwn->wwn_tf; | 1027 | struct target_fabric_configfs *tf = wwn->wwn_tf; |
1047 | struct se_portal_group *se_tpg; | 1028 | struct se_portal_group *se_tpg; |
1048 | 1029 | ||
1049 | if (!tf->tf_ops.fabric_make_tpg) { | 1030 | if (!tf->tf_ops->fabric_make_tpg) { |
1050 | pr_err("tf->tf_ops.fabric_make_tpg is NULL\n"); | 1031 | pr_err("tf->tf_ops->fabric_make_tpg is NULL\n"); |
1051 | return ERR_PTR(-ENOSYS); | 1032 | return ERR_PTR(-ENOSYS); |
1052 | } | 1033 | } |
1053 | 1034 | ||
1054 | se_tpg = tf->tf_ops.fabric_make_tpg(wwn, group, name); | 1035 | se_tpg = tf->tf_ops->fabric_make_tpg(wwn, group, name); |
1055 | if (!se_tpg || IS_ERR(se_tpg)) | 1036 | if (!se_tpg || IS_ERR(se_tpg)) |
1056 | return ERR_PTR(-EINVAL); | 1037 | return ERR_PTR(-EINVAL); |
1057 | /* | 1038 | /* |
@@ -1067,19 +1048,19 @@ static struct config_group *target_fabric_make_tpg( | |||
1067 | se_tpg->tpg_group.default_groups[6] = NULL; | 1048 | se_tpg->tpg_group.default_groups[6] = NULL; |
1068 | 1049 | ||
1069 | config_group_init_type_name(&se_tpg->tpg_group, name, | 1050 | config_group_init_type_name(&se_tpg->tpg_group, name, |
1070 | &tf->tf_cit_tmpl.tfc_tpg_base_cit); | 1051 | &tf->tf_tpg_base_cit); |
1071 | config_group_init_type_name(&se_tpg->tpg_lun_group, "lun", | 1052 | config_group_init_type_name(&se_tpg->tpg_lun_group, "lun", |
1072 | &tf->tf_cit_tmpl.tfc_tpg_lun_cit); | 1053 | &tf->tf_tpg_lun_cit); |
1073 | config_group_init_type_name(&se_tpg->tpg_np_group, "np", | 1054 | config_group_init_type_name(&se_tpg->tpg_np_group, "np", |
1074 | &tf->tf_cit_tmpl.tfc_tpg_np_cit); | 1055 | &tf->tf_tpg_np_cit); |
1075 | config_group_init_type_name(&se_tpg->tpg_acl_group, "acls", | 1056 | config_group_init_type_name(&se_tpg->tpg_acl_group, "acls", |
1076 | &tf->tf_cit_tmpl.tfc_tpg_nacl_cit); | 1057 | &tf->tf_tpg_nacl_cit); |
1077 | config_group_init_type_name(&se_tpg->tpg_attrib_group, "attrib", | 1058 | config_group_init_type_name(&se_tpg->tpg_attrib_group, "attrib", |
1078 | &tf->tf_cit_tmpl.tfc_tpg_attrib_cit); | 1059 | &tf->tf_tpg_attrib_cit); |
1079 | config_group_init_type_name(&se_tpg->tpg_auth_group, "auth", | 1060 | config_group_init_type_name(&se_tpg->tpg_auth_group, "auth", |
1080 | &tf->tf_cit_tmpl.tfc_tpg_auth_cit); | 1061 | &tf->tf_tpg_auth_cit); |
1081 | config_group_init_type_name(&se_tpg->tpg_param_group, "param", | 1062 | config_group_init_type_name(&se_tpg->tpg_param_group, "param", |
1082 | &tf->tf_cit_tmpl.tfc_tpg_param_cit); | 1063 | &tf->tf_tpg_param_cit); |
1083 | 1064 | ||
1084 | return &se_tpg->tpg_group; | 1065 | return &se_tpg->tpg_group; |
1085 | } | 1066 | } |
@@ -1112,7 +1093,7 @@ static void target_fabric_release_wwn(struct config_item *item) | |||
1112 | struct se_wwn, wwn_group); | 1093 | struct se_wwn, wwn_group); |
1113 | struct target_fabric_configfs *tf = wwn->wwn_tf; | 1094 | struct target_fabric_configfs *tf = wwn->wwn_tf; |
1114 | 1095 | ||
1115 | tf->tf_ops.fabric_drop_wwn(wwn); | 1096 | tf->tf_ops->fabric_drop_wwn(wwn); |
1116 | } | 1097 | } |
1117 | 1098 | ||
1118 | static struct configfs_item_operations target_fabric_tpg_item_ops = { | 1099 | static struct configfs_item_operations target_fabric_tpg_item_ops = { |
@@ -1148,12 +1129,12 @@ static struct config_group *target_fabric_make_wwn( | |||
1148 | struct target_fabric_configfs, tf_group); | 1129 | struct target_fabric_configfs, tf_group); |
1149 | struct se_wwn *wwn; | 1130 | struct se_wwn *wwn; |
1150 | 1131 | ||
1151 | if (!tf->tf_ops.fabric_make_wwn) { | 1132 | if (!tf->tf_ops->fabric_make_wwn) { |
1152 | pr_err("tf->tf_ops.fabric_make_wwn is NULL\n"); | 1133 | pr_err("tf->tf_ops.fabric_make_wwn is NULL\n"); |
1153 | return ERR_PTR(-ENOSYS); | 1134 | return ERR_PTR(-ENOSYS); |
1154 | } | 1135 | } |
1155 | 1136 | ||
1156 | wwn = tf->tf_ops.fabric_make_wwn(tf, group, name); | 1137 | wwn = tf->tf_ops->fabric_make_wwn(tf, group, name); |
1157 | if (!wwn || IS_ERR(wwn)) | 1138 | if (!wwn || IS_ERR(wwn)) |
1158 | return ERR_PTR(-EINVAL); | 1139 | return ERR_PTR(-EINVAL); |
1159 | 1140 | ||
@@ -1165,10 +1146,9 @@ static struct config_group *target_fabric_make_wwn( | |||
1165 | wwn->wwn_group.default_groups[0] = &wwn->fabric_stat_group; | 1146 | wwn->wwn_group.default_groups[0] = &wwn->fabric_stat_group; |
1166 | wwn->wwn_group.default_groups[1] = NULL; | 1147 | wwn->wwn_group.default_groups[1] = NULL; |
1167 | 1148 | ||
1168 | config_group_init_type_name(&wwn->wwn_group, name, | 1149 | config_group_init_type_name(&wwn->wwn_group, name, &tf->tf_tpg_cit); |
1169 | &tf->tf_cit_tmpl.tfc_tpg_cit); | ||
1170 | config_group_init_type_name(&wwn->fabric_stat_group, "fabric_statistics", | 1150 | config_group_init_type_name(&wwn->fabric_stat_group, "fabric_statistics", |
1171 | &tf->tf_cit_tmpl.tfc_wwn_fabric_stats_cit); | 1151 | &tf->tf_wwn_fabric_stats_cit); |
1172 | 1152 | ||
1173 | return &wwn->wwn_group; | 1153 | return &wwn->wwn_group; |
1174 | } | 1154 | } |
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c index 41f4f270f919..cb6497ce4b61 100644 --- a/drivers/target/target_core_fabric_lib.c +++ b/drivers/target/target_core_fabric_lib.c | |||
@@ -24,6 +24,11 @@ | |||
24 | * | 24 | * |
25 | ******************************************************************************/ | 25 | ******************************************************************************/ |
26 | 26 | ||
27 | /* | ||
28 | * See SPC4, section 7.5 "Protocol specific parameters" for details | ||
29 | * on the formats implemented in this file. | ||
30 | */ | ||
31 | |||
27 | #include <linux/kernel.h> | 32 | #include <linux/kernel.h> |
28 | #include <linux/string.h> | 33 | #include <linux/string.h> |
29 | #include <linux/ctype.h> | 34 | #include <linux/ctype.h> |
@@ -34,124 +39,30 @@ | |||
34 | 39 | ||
35 | #include <target/target_core_base.h> | 40 | #include <target/target_core_base.h> |
36 | #include <target/target_core_fabric.h> | 41 | #include <target/target_core_fabric.h> |
37 | #include <target/target_core_configfs.h> | ||
38 | 42 | ||
39 | #include "target_core_internal.h" | 43 | #include "target_core_internal.h" |
40 | #include "target_core_pr.h" | 44 | #include "target_core_pr.h" |
41 | 45 | ||
42 | /* | ||
43 | * Handlers for Serial Attached SCSI (SAS) | ||
44 | */ | ||
45 | u8 sas_get_fabric_proto_ident(struct se_portal_group *se_tpg) | ||
46 | { | ||
47 | /* | ||
48 | * Return a SAS Serial SCSI Protocol identifier for loopback operations | ||
49 | * This is defined in section 7.5.1 Table 362 in spc4r17 | ||
50 | */ | ||
51 | return 0x6; | ||
52 | } | ||
53 | EXPORT_SYMBOL(sas_get_fabric_proto_ident); | ||
54 | 46 | ||
55 | u32 sas_get_pr_transport_id( | 47 | static int sas_get_pr_transport_id( |
56 | struct se_portal_group *se_tpg, | 48 | struct se_node_acl *nacl, |
57 | struct se_node_acl *se_nacl, | ||
58 | struct t10_pr_registration *pr_reg, | ||
59 | int *format_code, | 49 | int *format_code, |
60 | unsigned char *buf) | 50 | unsigned char *buf) |
61 | { | 51 | { |
62 | unsigned char *ptr; | ||
63 | int ret; | 52 | int ret; |
64 | 53 | ||
65 | /* | 54 | /* Skip over 'naa. prefix */ |
66 | * Set PROTOCOL IDENTIFIER to 6h for SAS | 55 | ret = hex2bin(&buf[4], &nacl->initiatorname[4], 8); |
67 | */ | 56 | if (ret) { |
68 | buf[0] = 0x06; | 57 | pr_debug("%s: invalid hex string\n", __func__); |
69 | /* | 58 | return ret; |
70 | * From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI | 59 | } |
71 | * over SAS Serial SCSI Protocol | ||
72 | */ | ||
73 | ptr = &se_nacl->initiatorname[4]; /* Skip over 'naa. prefix */ | ||
74 | |||
75 | ret = hex2bin(&buf[4], ptr, 8); | ||
76 | if (ret < 0) | ||
77 | pr_debug("sas transport_id: invalid hex string\n"); | ||
78 | |||
79 | /* | ||
80 | * The SAS Transport ID is a hardcoded 24-byte length | ||
81 | */ | ||
82 | return 24; | ||
83 | } | ||
84 | EXPORT_SYMBOL(sas_get_pr_transport_id); | ||
85 | |||
86 | u32 sas_get_pr_transport_id_len( | ||
87 | struct se_portal_group *se_tpg, | ||
88 | struct se_node_acl *se_nacl, | ||
89 | struct t10_pr_registration *pr_reg, | ||
90 | int *format_code) | ||
91 | { | ||
92 | *format_code = 0; | ||
93 | /* | ||
94 | * From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI | ||
95 | * over SAS Serial SCSI Protocol | ||
96 | * | ||
97 | * The SAS Transport ID is a hardcoded 24-byte length | ||
98 | */ | ||
99 | return 24; | ||
100 | } | ||
101 | EXPORT_SYMBOL(sas_get_pr_transport_id_len); | ||
102 | |||
103 | /* | ||
104 | * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above | ||
105 | * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations. | ||
106 | */ | ||
107 | char *sas_parse_pr_out_transport_id( | ||
108 | struct se_portal_group *se_tpg, | ||
109 | const char *buf, | ||
110 | u32 *out_tid_len, | ||
111 | char **port_nexus_ptr) | ||
112 | { | ||
113 | /* | ||
114 | * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.7 TransportID | ||
115 | * for initiator ports using SCSI over SAS Serial SCSI Protocol | ||
116 | * | ||
117 | * The TransportID for a SAS Initiator Port is of fixed size of | ||
118 | * 24 bytes, and SAS does not contain a I_T nexus identifier, | ||
119 | * so we return the **port_nexus_ptr set to NULL. | ||
120 | */ | ||
121 | *port_nexus_ptr = NULL; | ||
122 | *out_tid_len = 24; | ||
123 | |||
124 | return (char *)&buf[4]; | ||
125 | } | ||
126 | EXPORT_SYMBOL(sas_parse_pr_out_transport_id); | ||
127 | |||
128 | /* | ||
129 | * Handlers for Fibre Channel Protocol (FCP) | ||
130 | */ | ||
131 | u8 fc_get_fabric_proto_ident(struct se_portal_group *se_tpg) | ||
132 | { | ||
133 | return 0x0; /* 0 = fcp-2 per SPC4 section 7.5.1 */ | ||
134 | } | ||
135 | EXPORT_SYMBOL(fc_get_fabric_proto_ident); | ||
136 | 60 | ||
137 | u32 fc_get_pr_transport_id_len( | ||
138 | struct se_portal_group *se_tpg, | ||
139 | struct se_node_acl *se_nacl, | ||
140 | struct t10_pr_registration *pr_reg, | ||
141 | int *format_code) | ||
142 | { | ||
143 | *format_code = 0; | ||
144 | /* | ||
145 | * The FC Transport ID is a hardcoded 24-byte length | ||
146 | */ | ||
147 | return 24; | 61 | return 24; |
148 | } | 62 | } |
149 | EXPORT_SYMBOL(fc_get_pr_transport_id_len); | ||
150 | 63 | ||
151 | u32 fc_get_pr_transport_id( | 64 | static int fc_get_pr_transport_id( |
152 | struct se_portal_group *se_tpg, | ||
153 | struct se_node_acl *se_nacl, | 65 | struct se_node_acl *se_nacl, |
154 | struct t10_pr_registration *pr_reg, | ||
155 | int *format_code, | 66 | int *format_code, |
156 | unsigned char *buf) | 67 | unsigned char *buf) |
157 | { | 68 | { |
@@ -160,24 +71,20 @@ u32 fc_get_pr_transport_id( | |||
160 | u32 off = 8; | 71 | u32 off = 8; |
161 | 72 | ||
162 | /* | 73 | /* |
163 | * PROTOCOL IDENTIFIER is 0h for FCP-2 | ||
164 | * | ||
165 | * From spc4r17, 7.5.4.2 TransportID for initiator ports using | ||
166 | * SCSI over Fibre Channel | ||
167 | * | ||
168 | * We convert the ASCII formatted N Port name into a binary | 74 | * We convert the ASCII formatted N Port name into a binary |
169 | * encoded TransportID. | 75 | * encoded TransportID. |
170 | */ | 76 | */ |
171 | ptr = &se_nacl->initiatorname[0]; | 77 | ptr = &se_nacl->initiatorname[0]; |
172 | |||
173 | for (i = 0; i < 24; ) { | 78 | for (i = 0; i < 24; ) { |
174 | if (!strncmp(&ptr[i], ":", 1)) { | 79 | if (!strncmp(&ptr[i], ":", 1)) { |
175 | i++; | 80 | i++; |
176 | continue; | 81 | continue; |
177 | } | 82 | } |
178 | ret = hex2bin(&buf[off++], &ptr[i], 1); | 83 | ret = hex2bin(&buf[off++], &ptr[i], 1); |
179 | if (ret < 0) | 84 | if (ret < 0) { |
180 | pr_debug("fc transport_id: invalid hex string\n"); | 85 | pr_debug("%s: invalid hex string\n", __func__); |
86 | return ret; | ||
87 | } | ||
181 | i += 2; | 88 | i += 2; |
182 | } | 89 | } |
183 | /* | 90 | /* |
@@ -185,42 +92,52 @@ u32 fc_get_pr_transport_id( | |||
185 | */ | 92 | */ |
186 | return 24; | 93 | return 24; |
187 | } | 94 | } |
188 | EXPORT_SYMBOL(fc_get_pr_transport_id); | ||
189 | 95 | ||
190 | char *fc_parse_pr_out_transport_id( | 96 | static int sbp_get_pr_transport_id( |
191 | struct se_portal_group *se_tpg, | 97 | struct se_node_acl *nacl, |
192 | const char *buf, | 98 | int *format_code, |
193 | u32 *out_tid_len, | 99 | unsigned char *buf) |
194 | char **port_nexus_ptr) | ||
195 | { | 100 | { |
196 | /* | 101 | int ret; |
197 | * The TransportID for a FC N Port is of fixed size of | ||
198 | * 24 bytes, and FC does not contain a I_T nexus identifier, | ||
199 | * so we return the **port_nexus_ptr set to NULL. | ||
200 | */ | ||
201 | *port_nexus_ptr = NULL; | ||
202 | *out_tid_len = 24; | ||
203 | 102 | ||
204 | return (char *)&buf[8]; | 103 | ret = hex2bin(&buf[8], nacl->initiatorname, 8); |
205 | } | 104 | if (ret) { |
206 | EXPORT_SYMBOL(fc_parse_pr_out_transport_id); | 105 | pr_debug("%s: invalid hex string\n", __func__); |
106 | return ret; | ||
107 | } | ||
207 | 108 | ||
208 | /* | 109 | return 24; |
209 | * Handlers for Internet Small Computer Systems Interface (iSCSI) | 110 | } |
210 | */ | ||
211 | 111 | ||
212 | u8 iscsi_get_fabric_proto_ident(struct se_portal_group *se_tpg) | 112 | static int srp_get_pr_transport_id( |
113 | struct se_node_acl *nacl, | ||
114 | int *format_code, | ||
115 | unsigned char *buf) | ||
213 | { | 116 | { |
214 | /* | 117 | const char *p; |
215 | * This value is defined for "Internet SCSI (iSCSI)" | 118 | unsigned len, count, leading_zero_bytes; |
216 | * in spc4r17 section 7.5.1 Table 362 | 119 | int rc; |
217 | */ | 120 | |
218 | return 0x5; | 121 | p = nacl->initiatorname; |
122 | if (strncasecmp(p, "0x", 2) == 0) | ||
123 | p += 2; | ||
124 | len = strlen(p); | ||
125 | if (len % 2) | ||
126 | return -EINVAL; | ||
127 | |||
128 | count = min(len / 2, 16U); | ||
129 | leading_zero_bytes = 16 - count; | ||
130 | memset(buf + 8, 0, leading_zero_bytes); | ||
131 | rc = hex2bin(buf + 8 + leading_zero_bytes, p, count); | ||
132 | if (rc < 0) { | ||
133 | pr_debug("hex2bin failed for %s: %d\n", __func__, rc); | ||
134 | return rc; | ||
135 | } | ||
136 | |||
137 | return 24; | ||
219 | } | 138 | } |
220 | EXPORT_SYMBOL(iscsi_get_fabric_proto_ident); | ||
221 | 139 | ||
222 | u32 iscsi_get_pr_transport_id( | 140 | static int iscsi_get_pr_transport_id( |
223 | struct se_portal_group *se_tpg, | ||
224 | struct se_node_acl *se_nacl, | 141 | struct se_node_acl *se_nacl, |
225 | struct t10_pr_registration *pr_reg, | 142 | struct t10_pr_registration *pr_reg, |
226 | int *format_code, | 143 | int *format_code, |
@@ -231,10 +148,6 @@ u32 iscsi_get_pr_transport_id( | |||
231 | 148 | ||
232 | spin_lock_irq(&se_nacl->nacl_sess_lock); | 149 | spin_lock_irq(&se_nacl->nacl_sess_lock); |
233 | /* | 150 | /* |
234 | * Set PROTOCOL IDENTIFIER to 5h for iSCSI | ||
235 | */ | ||
236 | buf[0] = 0x05; | ||
237 | /* | ||
238 | * From spc4r17 Section 7.5.4.6: TransportID for initiator | 151 | * From spc4r17 Section 7.5.4.6: TransportID for initiator |
239 | * ports using SCSI over iSCSI. | 152 | * ports using SCSI over iSCSI. |
240 | * | 153 | * |
@@ -313,10 +226,8 @@ u32 iscsi_get_pr_transport_id( | |||
313 | 226 | ||
314 | return len; | 227 | return len; |
315 | } | 228 | } |
316 | EXPORT_SYMBOL(iscsi_get_pr_transport_id); | ||
317 | 229 | ||
318 | u32 iscsi_get_pr_transport_id_len( | 230 | static int iscsi_get_pr_transport_id_len( |
319 | struct se_portal_group *se_tpg, | ||
320 | struct se_node_acl *se_nacl, | 231 | struct se_node_acl *se_nacl, |
321 | struct t10_pr_registration *pr_reg, | 232 | struct t10_pr_registration *pr_reg, |
322 | int *format_code) | 233 | int *format_code) |
@@ -359,9 +270,8 @@ u32 iscsi_get_pr_transport_id_len( | |||
359 | 270 | ||
360 | return len; | 271 | return len; |
361 | } | 272 | } |
362 | EXPORT_SYMBOL(iscsi_get_pr_transport_id_len); | ||
363 | 273 | ||
364 | char *iscsi_parse_pr_out_transport_id( | 274 | static char *iscsi_parse_pr_out_transport_id( |
365 | struct se_portal_group *se_tpg, | 275 | struct se_portal_group *se_tpg, |
366 | const char *buf, | 276 | const char *buf, |
367 | u32 *out_tid_len, | 277 | u32 *out_tid_len, |
@@ -448,4 +358,79 @@ char *iscsi_parse_pr_out_transport_id( | |||
448 | 358 | ||
449 | return (char *)&buf[4]; | 359 | return (char *)&buf[4]; |
450 | } | 360 | } |
451 | EXPORT_SYMBOL(iscsi_parse_pr_out_transport_id); | 361 | |
362 | int target_get_pr_transport_id_len(struct se_node_acl *nacl, | ||
363 | struct t10_pr_registration *pr_reg, int *format_code) | ||
364 | { | ||
365 | switch (nacl->se_tpg->proto_id) { | ||
366 | case SCSI_PROTOCOL_FCP: | ||
367 | case SCSI_PROTOCOL_SBP: | ||
368 | case SCSI_PROTOCOL_SRP: | ||
369 | case SCSI_PROTOCOL_SAS: | ||
370 | break; | ||
371 | case SCSI_PROTOCOL_ISCSI: | ||
372 | return iscsi_get_pr_transport_id_len(nacl, pr_reg, format_code); | ||
373 | default: | ||
374 | pr_err("Unknown proto_id: 0x%02x\n", nacl->se_tpg->proto_id); | ||
375 | return -EINVAL; | ||
376 | } | ||
377 | |||
378 | /* | ||
379 | * Most transports use a fixed length 24 byte identifier. | ||
380 | */ | ||
381 | *format_code = 0; | ||
382 | return 24; | ||
383 | } | ||
384 | |||
385 | int target_get_pr_transport_id(struct se_node_acl *nacl, | ||
386 | struct t10_pr_registration *pr_reg, int *format_code, | ||
387 | unsigned char *buf) | ||
388 | { | ||
389 | switch (nacl->se_tpg->proto_id) { | ||
390 | case SCSI_PROTOCOL_SAS: | ||
391 | return sas_get_pr_transport_id(nacl, format_code, buf); | ||
392 | case SCSI_PROTOCOL_SBP: | ||
393 | return sbp_get_pr_transport_id(nacl, format_code, buf); | ||
394 | case SCSI_PROTOCOL_SRP: | ||
395 | return srp_get_pr_transport_id(nacl, format_code, buf); | ||
396 | case SCSI_PROTOCOL_FCP: | ||
397 | return fc_get_pr_transport_id(nacl, format_code, buf); | ||
398 | case SCSI_PROTOCOL_ISCSI: | ||
399 | return iscsi_get_pr_transport_id(nacl, pr_reg, format_code, | ||
400 | buf); | ||
401 | default: | ||
402 | pr_err("Unknown proto_id: 0x%02x\n", nacl->se_tpg->proto_id); | ||
403 | return -EINVAL; | ||
404 | } | ||
405 | } | ||
406 | |||
407 | const char *target_parse_pr_out_transport_id(struct se_portal_group *tpg, | ||
408 | const char *buf, u32 *out_tid_len, char **port_nexus_ptr) | ||
409 | { | ||
410 | u32 offset; | ||
411 | |||
412 | switch (tpg->proto_id) { | ||
413 | case SCSI_PROTOCOL_SAS: | ||
414 | /* | ||
415 | * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.7 TransportID | ||
416 | * for initiator ports using SCSI over SAS Serial SCSI Protocol. | ||
417 | */ | ||
418 | offset = 4; | ||
419 | break; | ||
420 | case SCSI_PROTOCOL_SBP: | ||
421 | case SCSI_PROTOCOL_SRP: | ||
422 | case SCSI_PROTOCOL_FCP: | ||
423 | offset = 8; | ||
424 | break; | ||
425 | case SCSI_PROTOCOL_ISCSI: | ||
426 | return iscsi_parse_pr_out_transport_id(tpg, buf, out_tid_len, | ||
427 | port_nexus_ptr); | ||
428 | default: | ||
429 | pr_err("Unknown proto_id: 0x%02x\n", tpg->proto_id); | ||
430 | return NULL; | ||
431 | } | ||
432 | |||
433 | *port_nexus_ptr = NULL; | ||
434 | *out_tid_len = 24; | ||
435 | return buf + offset; | ||
436 | } | ||
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 664171353289..e3195700211a 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c | |||
@@ -37,7 +37,6 @@ | |||
37 | 37 | ||
38 | #include <target/target_core_base.h> | 38 | #include <target/target_core_base.h> |
39 | #include <target/target_core_backend.h> | 39 | #include <target/target_core_backend.h> |
40 | #include <target/target_core_backend_configfs.h> | ||
41 | 40 | ||
42 | #include "target_core_file.h" | 41 | #include "target_core_file.h" |
43 | 42 | ||
@@ -46,10 +45,6 @@ static inline struct fd_dev *FD_DEV(struct se_device *dev) | |||
46 | return container_of(dev, struct fd_dev, dev); | 45 | return container_of(dev, struct fd_dev, dev); |
47 | } | 46 | } |
48 | 47 | ||
49 | /* fd_attach_hba(): (Part of se_subsystem_api_t template) | ||
50 | * | ||
51 | * | ||
52 | */ | ||
53 | static int fd_attach_hba(struct se_hba *hba, u32 host_id) | 48 | static int fd_attach_hba(struct se_hba *hba, u32 host_id) |
54 | { | 49 | { |
55 | struct fd_host *fd_host; | 50 | struct fd_host *fd_host; |
@@ -66,7 +61,7 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id) | |||
66 | 61 | ||
67 | pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" | 62 | pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" |
68 | " Target Core Stack %s\n", hba->hba_id, FD_VERSION, | 63 | " Target Core Stack %s\n", hba->hba_id, FD_VERSION, |
69 | TARGET_CORE_MOD_VERSION); | 64 | TARGET_CORE_VERSION); |
70 | pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n", | 65 | pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n", |
71 | hba->hba_id, fd_host->fd_host_id); | 66 | hba->hba_id, fd_host->fd_host_id); |
72 | 67 | ||
@@ -246,87 +241,34 @@ fail: | |||
246 | return ret; | 241 | return ret; |
247 | } | 242 | } |
248 | 243 | ||
249 | static void fd_free_device(struct se_device *dev) | 244 | static void fd_dev_call_rcu(struct rcu_head *p) |
250 | { | 245 | { |
246 | struct se_device *dev = container_of(p, struct se_device, rcu_head); | ||
251 | struct fd_dev *fd_dev = FD_DEV(dev); | 247 | struct fd_dev *fd_dev = FD_DEV(dev); |
252 | 248 | ||
253 | if (fd_dev->fd_file) { | ||
254 | filp_close(fd_dev->fd_file, NULL); | ||
255 | fd_dev->fd_file = NULL; | ||
256 | } | ||
257 | |||
258 | kfree(fd_dev); | 249 | kfree(fd_dev); |
259 | } | 250 | } |
260 | 251 | ||
261 | static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot, | 252 | static void fd_free_device(struct se_device *dev) |
262 | int is_write) | ||
263 | { | 253 | { |
264 | struct se_device *se_dev = cmd->se_dev; | 254 | struct fd_dev *fd_dev = FD_DEV(dev); |
265 | struct fd_dev *dev = FD_DEV(se_dev); | ||
266 | struct file *prot_fd = dev->fd_prot_file; | ||
267 | loff_t pos = (cmd->t_task_lba * se_dev->prot_length); | ||
268 | unsigned char *buf; | ||
269 | u32 prot_size; | ||
270 | int rc, ret = 1; | ||
271 | |||
272 | prot_size = (cmd->data_length / se_dev->dev_attrib.block_size) * | ||
273 | se_dev->prot_length; | ||
274 | |||
275 | if (!is_write) { | ||
276 | fd_prot->prot_buf = kzalloc(prot_size, GFP_KERNEL); | ||
277 | if (!fd_prot->prot_buf) { | ||
278 | pr_err("Unable to allocate fd_prot->prot_buf\n"); | ||
279 | return -ENOMEM; | ||
280 | } | ||
281 | buf = fd_prot->prot_buf; | ||
282 | |||
283 | fd_prot->prot_sg_nents = 1; | ||
284 | fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist), | ||
285 | GFP_KERNEL); | ||
286 | if (!fd_prot->prot_sg) { | ||
287 | pr_err("Unable to allocate fd_prot->prot_sg\n"); | ||
288 | kfree(fd_prot->prot_buf); | ||
289 | return -ENOMEM; | ||
290 | } | ||
291 | sg_init_table(fd_prot->prot_sg, fd_prot->prot_sg_nents); | ||
292 | sg_set_buf(fd_prot->prot_sg, buf, prot_size); | ||
293 | } | ||
294 | |||
295 | if (is_write) { | ||
296 | rc = kernel_write(prot_fd, fd_prot->prot_buf, prot_size, pos); | ||
297 | if (rc < 0 || prot_size != rc) { | ||
298 | pr_err("kernel_write() for fd_do_prot_rw failed:" | ||
299 | " %d\n", rc); | ||
300 | ret = -EINVAL; | ||
301 | } | ||
302 | } else { | ||
303 | rc = kernel_read(prot_fd, pos, fd_prot->prot_buf, prot_size); | ||
304 | if (rc < 0) { | ||
305 | pr_err("kernel_read() for fd_do_prot_rw failed:" | ||
306 | " %d\n", rc); | ||
307 | ret = -EINVAL; | ||
308 | } | ||
309 | } | ||
310 | 255 | ||
311 | if (is_write || ret < 0) { | 256 | if (fd_dev->fd_file) { |
312 | kfree(fd_prot->prot_sg); | 257 | filp_close(fd_dev->fd_file, NULL); |
313 | kfree(fd_prot->prot_buf); | 258 | fd_dev->fd_file = NULL; |
314 | } | 259 | } |
315 | 260 | call_rcu(&dev->rcu_head, fd_dev_call_rcu); | |
316 | return ret; | ||
317 | } | 261 | } |
318 | 262 | ||
319 | static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl, | 263 | static int fd_do_rw(struct se_cmd *cmd, struct file *fd, |
320 | u32 sgl_nents, int is_write) | 264 | u32 block_size, struct scatterlist *sgl, |
265 | u32 sgl_nents, u32 data_length, int is_write) | ||
321 | { | 266 | { |
322 | struct se_device *se_dev = cmd->se_dev; | ||
323 | struct fd_dev *dev = FD_DEV(se_dev); | ||
324 | struct file *fd = dev->fd_file; | ||
325 | struct scatterlist *sg; | 267 | struct scatterlist *sg; |
326 | struct iov_iter iter; | 268 | struct iov_iter iter; |
327 | struct bio_vec *bvec; | 269 | struct bio_vec *bvec; |
328 | ssize_t len = 0; | 270 | ssize_t len = 0; |
329 | loff_t pos = (cmd->t_task_lba * se_dev->dev_attrib.block_size); | 271 | loff_t pos = (cmd->t_task_lba * block_size); |
330 | int ret = 0, i; | 272 | int ret = 0, i; |
331 | 273 | ||
332 | bvec = kcalloc(sgl_nents, sizeof(struct bio_vec), GFP_KERNEL); | 274 | bvec = kcalloc(sgl_nents, sizeof(struct bio_vec), GFP_KERNEL); |
@@ -352,7 +294,7 @@ static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl, | |||
352 | kfree(bvec); | 294 | kfree(bvec); |
353 | 295 | ||
354 | if (is_write) { | 296 | if (is_write) { |
355 | if (ret < 0 || ret != cmd->data_length) { | 297 | if (ret < 0 || ret != data_length) { |
356 | pr_err("%s() write returned %d\n", __func__, ret); | 298 | pr_err("%s() write returned %d\n", __func__, ret); |
357 | return (ret < 0 ? ret : -EINVAL); | 299 | return (ret < 0 ? ret : -EINVAL); |
358 | } | 300 | } |
@@ -363,10 +305,10 @@ static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl, | |||
363 | * block_device. | 305 | * block_device. |
364 | */ | 306 | */ |
365 | if (S_ISBLK(file_inode(fd)->i_mode)) { | 307 | if (S_ISBLK(file_inode(fd)->i_mode)) { |
366 | if (ret < 0 || ret != cmd->data_length) { | 308 | if (ret < 0 || ret != data_length) { |
367 | pr_err("%s() returned %d, expecting %u for " | 309 | pr_err("%s() returned %d, expecting %u for " |
368 | "S_ISBLK\n", __func__, ret, | 310 | "S_ISBLK\n", __func__, ret, |
369 | cmd->data_length); | 311 | data_length); |
370 | return (ret < 0 ? ret : -EINVAL); | 312 | return (ret < 0 ? ret : -EINVAL); |
371 | } | 313 | } |
372 | } else { | 314 | } else { |
@@ -533,9 +475,9 @@ fd_do_prot_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb) | |||
533 | } | 475 | } |
534 | 476 | ||
535 | static sense_reason_t | 477 | static sense_reason_t |
536 | fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb) | 478 | fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb) |
537 | { | 479 | { |
538 | struct file *file = priv; | 480 | struct file *file = FD_DEV(cmd->se_dev)->fd_file; |
539 | struct inode *inode = file->f_mapping->host; | 481 | struct inode *inode = file->f_mapping->host; |
540 | int ret; | 482 | int ret; |
541 | 483 | ||
@@ -577,42 +519,13 @@ fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb) | |||
577 | } | 519 | } |
578 | 520 | ||
579 | static sense_reason_t | 521 | static sense_reason_t |
580 | fd_execute_write_same_unmap(struct se_cmd *cmd) | ||
581 | { | ||
582 | struct se_device *se_dev = cmd->se_dev; | ||
583 | struct fd_dev *fd_dev = FD_DEV(se_dev); | ||
584 | struct file *file = fd_dev->fd_file; | ||
585 | sector_t lba = cmd->t_task_lba; | ||
586 | sector_t nolb = sbc_get_write_same_sectors(cmd); | ||
587 | sense_reason_t ret; | ||
588 | |||
589 | if (!nolb) { | ||
590 | target_complete_cmd(cmd, SAM_STAT_GOOD); | ||
591 | return 0; | ||
592 | } | ||
593 | |||
594 | ret = fd_do_unmap(cmd, file, lba, nolb); | ||
595 | if (ret) | ||
596 | return ret; | ||
597 | |||
598 | target_complete_cmd(cmd, GOOD); | ||
599 | return 0; | ||
600 | } | ||
601 | |||
602 | static sense_reason_t | ||
603 | fd_execute_unmap(struct se_cmd *cmd) | ||
604 | { | ||
605 | struct file *file = FD_DEV(cmd->se_dev)->fd_file; | ||
606 | |||
607 | return sbc_execute_unmap(cmd, fd_do_unmap, file); | ||
608 | } | ||
609 | |||
610 | static sense_reason_t | ||
611 | fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, | 522 | fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, |
612 | enum dma_data_direction data_direction) | 523 | enum dma_data_direction data_direction) |
613 | { | 524 | { |
614 | struct se_device *dev = cmd->se_dev; | 525 | struct se_device *dev = cmd->se_dev; |
615 | struct fd_prot fd_prot; | 526 | struct fd_dev *fd_dev = FD_DEV(dev); |
527 | struct file *file = fd_dev->fd_file; | ||
528 | struct file *pfile = fd_dev->fd_prot_file; | ||
616 | sense_reason_t rc; | 529 | sense_reason_t rc; |
617 | int ret = 0; | 530 | int ret = 0; |
618 | /* | 531 | /* |
@@ -630,58 +543,45 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, | |||
630 | * physical memory addresses to struct iovec virtual memory. | 543 | * physical memory addresses to struct iovec virtual memory. |
631 | */ | 544 | */ |
632 | if (data_direction == DMA_FROM_DEVICE) { | 545 | if (data_direction == DMA_FROM_DEVICE) { |
633 | memset(&fd_prot, 0, sizeof(struct fd_prot)); | ||
634 | |||
635 | if (cmd->prot_type && dev->dev_attrib.pi_prot_type) { | 546 | if (cmd->prot_type && dev->dev_attrib.pi_prot_type) { |
636 | ret = fd_do_prot_rw(cmd, &fd_prot, false); | 547 | ret = fd_do_rw(cmd, pfile, dev->prot_length, |
548 | cmd->t_prot_sg, cmd->t_prot_nents, | ||
549 | cmd->prot_length, 0); | ||
637 | if (ret < 0) | 550 | if (ret < 0) |
638 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 551 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
639 | } | 552 | } |
640 | 553 | ||
641 | ret = fd_do_rw(cmd, sgl, sgl_nents, 0); | 554 | ret = fd_do_rw(cmd, file, dev->dev_attrib.block_size, |
555 | sgl, sgl_nents, cmd->data_length, 0); | ||
642 | 556 | ||
643 | if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type) { | 557 | if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type) { |
644 | u32 sectors = cmd->data_length / dev->dev_attrib.block_size; | 558 | u32 sectors = cmd->data_length >> |
559 | ilog2(dev->dev_attrib.block_size); | ||
645 | 560 | ||
646 | rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, | 561 | rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, |
647 | 0, fd_prot.prot_sg, 0); | 562 | 0, cmd->t_prot_sg, 0); |
648 | if (rc) { | 563 | if (rc) |
649 | kfree(fd_prot.prot_sg); | ||
650 | kfree(fd_prot.prot_buf); | ||
651 | return rc; | 564 | return rc; |
652 | } | ||
653 | kfree(fd_prot.prot_sg); | ||
654 | kfree(fd_prot.prot_buf); | ||
655 | } | 565 | } |
656 | } else { | 566 | } else { |
657 | memset(&fd_prot, 0, sizeof(struct fd_prot)); | ||
658 | |||
659 | if (cmd->prot_type && dev->dev_attrib.pi_prot_type) { | 567 | if (cmd->prot_type && dev->dev_attrib.pi_prot_type) { |
660 | u32 sectors = cmd->data_length / dev->dev_attrib.block_size; | 568 | u32 sectors = cmd->data_length >> |
569 | ilog2(dev->dev_attrib.block_size); | ||
661 | 570 | ||
662 | ret = fd_do_prot_rw(cmd, &fd_prot, false); | 571 | rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, |
663 | if (ret < 0) | 572 | 0, cmd->t_prot_sg, 0); |
664 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 573 | if (rc) |
665 | |||
666 | rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors, | ||
667 | 0, fd_prot.prot_sg, 0); | ||
668 | if (rc) { | ||
669 | kfree(fd_prot.prot_sg); | ||
670 | kfree(fd_prot.prot_buf); | ||
671 | return rc; | 574 | return rc; |
672 | } | ||
673 | } | 575 | } |
674 | 576 | ||
675 | ret = fd_do_rw(cmd, sgl, sgl_nents, 1); | 577 | ret = fd_do_rw(cmd, file, dev->dev_attrib.block_size, |
578 | sgl, sgl_nents, cmd->data_length, 1); | ||
676 | /* | 579 | /* |
677 | * Perform implicit vfs_fsync_range() for fd_do_writev() ops | 580 | * Perform implicit vfs_fsync_range() for fd_do_writev() ops |
678 | * for SCSI WRITEs with Forced Unit Access (FUA) set. | 581 | * for SCSI WRITEs with Forced Unit Access (FUA) set. |
679 | * Allow this to happen independent of WCE=0 setting. | 582 | * Allow this to happen independent of WCE=0 setting. |
680 | */ | 583 | */ |
681 | if (ret > 0 && | 584 | if (ret > 0 && (cmd->se_cmd_flags & SCF_FUA)) { |
682 | dev->dev_attrib.emulate_fua_write > 0 && | ||
683 | (cmd->se_cmd_flags & SCF_FUA)) { | ||
684 | struct fd_dev *fd_dev = FD_DEV(dev); | ||
685 | loff_t start = cmd->t_task_lba * | 585 | loff_t start = cmd->t_task_lba * |
686 | dev->dev_attrib.block_size; | 586 | dev->dev_attrib.block_size; |
687 | loff_t end; | 587 | loff_t end; |
@@ -695,17 +595,16 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, | |||
695 | } | 595 | } |
696 | 596 | ||
697 | if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type) { | 597 | if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type) { |
698 | ret = fd_do_prot_rw(cmd, &fd_prot, true); | 598 | ret = fd_do_rw(cmd, pfile, dev->prot_length, |
599 | cmd->t_prot_sg, cmd->t_prot_nents, | ||
600 | cmd->prot_length, 1); | ||
699 | if (ret < 0) | 601 | if (ret < 0) |
700 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 602 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
701 | } | 603 | } |
702 | } | 604 | } |
703 | 605 | ||
704 | if (ret < 0) { | 606 | if (ret < 0) |
705 | kfree(fd_prot.prot_sg); | ||
706 | kfree(fd_prot.prot_buf); | ||
707 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 607 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
708 | } | ||
709 | 608 | ||
710 | if (ret) | 609 | if (ret) |
711 | target_complete_cmd(cmd, SAM_STAT_GOOD); | 610 | target_complete_cmd(cmd, SAM_STAT_GOOD); |
@@ -908,7 +807,6 @@ static struct sbc_ops fd_sbc_ops = { | |||
908 | .execute_rw = fd_execute_rw, | 807 | .execute_rw = fd_execute_rw, |
909 | .execute_sync_cache = fd_execute_sync_cache, | 808 | .execute_sync_cache = fd_execute_sync_cache, |
910 | .execute_write_same = fd_execute_write_same, | 809 | .execute_write_same = fd_execute_write_same, |
911 | .execute_write_same_unmap = fd_execute_write_same_unmap, | ||
912 | .execute_unmap = fd_execute_unmap, | 810 | .execute_unmap = fd_execute_unmap, |
913 | }; | 811 | }; |
914 | 812 | ||
@@ -918,42 +816,7 @@ fd_parse_cdb(struct se_cmd *cmd) | |||
918 | return sbc_parse_cdb(cmd, &fd_sbc_ops); | 816 | return sbc_parse_cdb(cmd, &fd_sbc_ops); |
919 | } | 817 | } |
920 | 818 | ||
921 | DEF_TB_DEFAULT_ATTRIBS(fileio); | 819 | static const struct target_backend_ops fileio_ops = { |
922 | |||
923 | static struct configfs_attribute *fileio_backend_dev_attrs[] = { | ||
924 | &fileio_dev_attrib_emulate_model_alias.attr, | ||
925 | &fileio_dev_attrib_emulate_dpo.attr, | ||
926 | &fileio_dev_attrib_emulate_fua_write.attr, | ||
927 | &fileio_dev_attrib_emulate_fua_read.attr, | ||
928 | &fileio_dev_attrib_emulate_write_cache.attr, | ||
929 | &fileio_dev_attrib_emulate_ua_intlck_ctrl.attr, | ||
930 | &fileio_dev_attrib_emulate_tas.attr, | ||
931 | &fileio_dev_attrib_emulate_tpu.attr, | ||
932 | &fileio_dev_attrib_emulate_tpws.attr, | ||
933 | &fileio_dev_attrib_emulate_caw.attr, | ||
934 | &fileio_dev_attrib_emulate_3pc.attr, | ||
935 | &fileio_dev_attrib_pi_prot_type.attr, | ||
936 | &fileio_dev_attrib_hw_pi_prot_type.attr, | ||
937 | &fileio_dev_attrib_pi_prot_format.attr, | ||
938 | &fileio_dev_attrib_enforce_pr_isids.attr, | ||
939 | &fileio_dev_attrib_is_nonrot.attr, | ||
940 | &fileio_dev_attrib_emulate_rest_reord.attr, | ||
941 | &fileio_dev_attrib_force_pr_aptpl.attr, | ||
942 | &fileio_dev_attrib_hw_block_size.attr, | ||
943 | &fileio_dev_attrib_block_size.attr, | ||
944 | &fileio_dev_attrib_hw_max_sectors.attr, | ||
945 | &fileio_dev_attrib_optimal_sectors.attr, | ||
946 | &fileio_dev_attrib_hw_queue_depth.attr, | ||
947 | &fileio_dev_attrib_queue_depth.attr, | ||
948 | &fileio_dev_attrib_max_unmap_lba_count.attr, | ||
949 | &fileio_dev_attrib_max_unmap_block_desc_count.attr, | ||
950 | &fileio_dev_attrib_unmap_granularity.attr, | ||
951 | &fileio_dev_attrib_unmap_granularity_alignment.attr, | ||
952 | &fileio_dev_attrib_max_write_same_len.attr, | ||
953 | NULL, | ||
954 | }; | ||
955 | |||
956 | static struct se_subsystem_api fileio_template = { | ||
957 | .name = "fileio", | 820 | .name = "fileio", |
958 | .inquiry_prod = "FILEIO", | 821 | .inquiry_prod = "FILEIO", |
959 | .inquiry_rev = FD_VERSION, | 822 | .inquiry_rev = FD_VERSION, |
@@ -971,21 +834,17 @@ static struct se_subsystem_api fileio_template = { | |||
971 | .init_prot = fd_init_prot, | 834 | .init_prot = fd_init_prot, |
972 | .format_prot = fd_format_prot, | 835 | .format_prot = fd_format_prot, |
973 | .free_prot = fd_free_prot, | 836 | .free_prot = fd_free_prot, |
837 | .tb_dev_attrib_attrs = sbc_attrib_attrs, | ||
974 | }; | 838 | }; |
975 | 839 | ||
976 | static int __init fileio_module_init(void) | 840 | static int __init fileio_module_init(void) |
977 | { | 841 | { |
978 | struct target_backend_cits *tbc = &fileio_template.tb_cits; | 842 | return transport_backend_register(&fileio_ops); |
979 | |||
980 | target_core_setup_sub_cits(&fileio_template); | ||
981 | tbc->tb_dev_attrib_cit.ct_attrs = fileio_backend_dev_attrs; | ||
982 | |||
983 | return transport_subsystem_register(&fileio_template); | ||
984 | } | 843 | } |
985 | 844 | ||
986 | static void __exit fileio_module_exit(void) | 845 | static void __exit fileio_module_exit(void) |
987 | { | 846 | { |
988 | transport_subsystem_release(&fileio_template); | 847 | target_backend_unregister(&fileio_ops); |
989 | } | 848 | } |
990 | 849 | ||
991 | MODULE_DESCRIPTION("TCM FILEIO subsystem plugin"); | 850 | MODULE_DESCRIPTION("TCM FILEIO subsystem plugin"); |
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h index 182cbb295039..068966fce308 100644 --- a/drivers/target/target_core_file.h +++ b/drivers/target/target_core_file.h | |||
@@ -21,12 +21,6 @@ | |||
21 | #define FDBD_HAS_BUFFERED_IO_WCE 0x04 | 21 | #define FDBD_HAS_BUFFERED_IO_WCE 0x04 |
22 | #define FDBD_FORMAT_UNIT_SIZE 2048 | 22 | #define FDBD_FORMAT_UNIT_SIZE 2048 |
23 | 23 | ||
24 | struct fd_prot { | ||
25 | unsigned char *prot_buf; | ||
26 | struct scatterlist *prot_sg; | ||
27 | u32 prot_sg_nents; | ||
28 | }; | ||
29 | |||
30 | struct fd_dev { | 24 | struct fd_dev { |
31 | struct se_device dev; | 25 | struct se_device dev; |
32 | 26 | ||
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c index ff95f95dcd13..62ea4e8e70a8 100644 --- a/drivers/target/target_core_hba.c +++ b/drivers/target/target_core_hba.c | |||
@@ -36,67 +36,78 @@ | |||
36 | #include <target/target_core_base.h> | 36 | #include <target/target_core_base.h> |
37 | #include <target/target_core_backend.h> | 37 | #include <target/target_core_backend.h> |
38 | #include <target/target_core_fabric.h> | 38 | #include <target/target_core_fabric.h> |
39 | #include <target/target_core_configfs.h> | ||
40 | 39 | ||
41 | #include "target_core_internal.h" | 40 | #include "target_core_internal.h" |
42 | 41 | ||
43 | static LIST_HEAD(subsystem_list); | 42 | static LIST_HEAD(backend_list); |
44 | static DEFINE_MUTEX(subsystem_mutex); | 43 | static DEFINE_MUTEX(backend_mutex); |
45 | 44 | ||
46 | static u32 hba_id_counter; | 45 | static u32 hba_id_counter; |
47 | 46 | ||
48 | static DEFINE_SPINLOCK(hba_lock); | 47 | static DEFINE_SPINLOCK(hba_lock); |
49 | static LIST_HEAD(hba_list); | 48 | static LIST_HEAD(hba_list); |
50 | 49 | ||
51 | int transport_subsystem_register(struct se_subsystem_api *sub_api) | ||
52 | { | ||
53 | struct se_subsystem_api *s; | ||
54 | |||
55 | INIT_LIST_HEAD(&sub_api->sub_api_list); | ||
56 | 50 | ||
57 | mutex_lock(&subsystem_mutex); | 51 | int transport_backend_register(const struct target_backend_ops *ops) |
58 | list_for_each_entry(s, &subsystem_list, sub_api_list) { | 52 | { |
59 | if (!strcmp(s->name, sub_api->name)) { | 53 | struct target_backend *tb, *old; |
60 | pr_err("%p is already registered with" | 54 | |
61 | " duplicate name %s, unable to process" | 55 | tb = kzalloc(sizeof(*tb), GFP_KERNEL); |
62 | " request\n", s, s->name); | 56 | if (!tb) |
63 | mutex_unlock(&subsystem_mutex); | 57 | return -ENOMEM; |
58 | tb->ops = ops; | ||
59 | |||
60 | mutex_lock(&backend_mutex); | ||
61 | list_for_each_entry(old, &backend_list, list) { | ||
62 | if (!strcmp(old->ops->name, ops->name)) { | ||
63 | pr_err("backend %s already registered.\n", ops->name); | ||
64 | mutex_unlock(&backend_mutex); | ||
65 | kfree(tb); | ||
64 | return -EEXIST; | 66 | return -EEXIST; |
65 | } | 67 | } |
66 | } | 68 | } |
67 | list_add_tail(&sub_api->sub_api_list, &subsystem_list); | 69 | target_setup_backend_cits(tb); |
68 | mutex_unlock(&subsystem_mutex); | 70 | list_add_tail(&tb->list, &backend_list); |
71 | mutex_unlock(&backend_mutex); | ||
69 | 72 | ||
70 | pr_debug("TCM: Registered subsystem plugin: %s struct module:" | 73 | pr_debug("TCM: Registered subsystem plugin: %s struct module: %p\n", |
71 | " %p\n", sub_api->name, sub_api->owner); | 74 | ops->name, ops->owner); |
72 | return 0; | 75 | return 0; |
73 | } | 76 | } |
74 | EXPORT_SYMBOL(transport_subsystem_register); | 77 | EXPORT_SYMBOL(transport_backend_register); |
75 | 78 | ||
76 | void transport_subsystem_release(struct se_subsystem_api *sub_api) | 79 | void target_backend_unregister(const struct target_backend_ops *ops) |
77 | { | 80 | { |
78 | mutex_lock(&subsystem_mutex); | 81 | struct target_backend *tb; |
79 | list_del(&sub_api->sub_api_list); | 82 | |
80 | mutex_unlock(&subsystem_mutex); | 83 | mutex_lock(&backend_mutex); |
84 | list_for_each_entry(tb, &backend_list, list) { | ||
85 | if (tb->ops == ops) { | ||
86 | list_del(&tb->list); | ||
87 | kfree(tb); | ||
88 | break; | ||
89 | } | ||
90 | } | ||
91 | mutex_unlock(&backend_mutex); | ||
81 | } | 92 | } |
82 | EXPORT_SYMBOL(transport_subsystem_release); | 93 | EXPORT_SYMBOL(target_backend_unregister); |
83 | 94 | ||
84 | static struct se_subsystem_api *core_get_backend(const char *sub_name) | 95 | static struct target_backend *core_get_backend(const char *name) |
85 | { | 96 | { |
86 | struct se_subsystem_api *s; | 97 | struct target_backend *tb; |
87 | 98 | ||
88 | mutex_lock(&subsystem_mutex); | 99 | mutex_lock(&backend_mutex); |
89 | list_for_each_entry(s, &subsystem_list, sub_api_list) { | 100 | list_for_each_entry(tb, &backend_list, list) { |
90 | if (!strcmp(s->name, sub_name)) | 101 | if (!strcmp(tb->ops->name, name)) |
91 | goto found; | 102 | goto found; |
92 | } | 103 | } |
93 | mutex_unlock(&subsystem_mutex); | 104 | mutex_unlock(&backend_mutex); |
94 | return NULL; | 105 | return NULL; |
95 | found: | 106 | found: |
96 | if (s->owner && !try_module_get(s->owner)) | 107 | if (tb->ops->owner && !try_module_get(tb->ops->owner)) |
97 | s = NULL; | 108 | tb = NULL; |
98 | mutex_unlock(&subsystem_mutex); | 109 | mutex_unlock(&backend_mutex); |
99 | return s; | 110 | return tb; |
100 | } | 111 | } |
101 | 112 | ||
102 | struct se_hba * | 113 | struct se_hba * |
@@ -117,13 +128,13 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags) | |||
117 | hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX); | 128 | hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX); |
118 | hba->hba_flags |= hba_flags; | 129 | hba->hba_flags |= hba_flags; |
119 | 130 | ||
120 | hba->transport = core_get_backend(plugin_name); | 131 | hba->backend = core_get_backend(plugin_name); |
121 | if (!hba->transport) { | 132 | if (!hba->backend) { |
122 | ret = -EINVAL; | 133 | ret = -EINVAL; |
123 | goto out_free_hba; | 134 | goto out_free_hba; |
124 | } | 135 | } |
125 | 136 | ||
126 | ret = hba->transport->attach_hba(hba, plugin_dep_id); | 137 | ret = hba->backend->ops->attach_hba(hba, plugin_dep_id); |
127 | if (ret < 0) | 138 | if (ret < 0) |
128 | goto out_module_put; | 139 | goto out_module_put; |
129 | 140 | ||
@@ -138,8 +149,8 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags) | |||
138 | return hba; | 149 | return hba; |
139 | 150 | ||
140 | out_module_put: | 151 | out_module_put: |
141 | module_put(hba->transport->owner); | 152 | module_put(hba->backend->ops->owner); |
142 | hba->transport = NULL; | 153 | hba->backend = NULL; |
143 | out_free_hba: | 154 | out_free_hba: |
144 | kfree(hba); | 155 | kfree(hba); |
145 | return ERR_PTR(ret); | 156 | return ERR_PTR(ret); |
@@ -150,7 +161,7 @@ core_delete_hba(struct se_hba *hba) | |||
150 | { | 161 | { |
151 | WARN_ON(hba->dev_count); | 162 | WARN_ON(hba->dev_count); |
152 | 163 | ||
153 | hba->transport->detach_hba(hba); | 164 | hba->backend->ops->detach_hba(hba); |
154 | 165 | ||
155 | spin_lock(&hba_lock); | 166 | spin_lock(&hba_lock); |
156 | list_del(&hba->hba_node); | 167 | list_del(&hba->hba_node); |
@@ -159,9 +170,9 @@ core_delete_hba(struct se_hba *hba) | |||
159 | pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target" | 170 | pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target" |
160 | " Core\n", hba->hba_id); | 171 | " Core\n", hba->hba_id); |
161 | 172 | ||
162 | module_put(hba->transport->owner); | 173 | module_put(hba->backend->ops->owner); |
163 | 174 | ||
164 | hba->transport = NULL; | 175 | hba->backend = NULL; |
165 | kfree(hba); | 176 | kfree(hba); |
166 | return 0; | 177 | return 0; |
167 | } | 178 | } |
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 972ed1781ae2..6d88d24e6cce 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c | |||
@@ -40,7 +40,6 @@ | |||
40 | 40 | ||
41 | #include <target/target_core_base.h> | 41 | #include <target/target_core_base.h> |
42 | #include <target/target_core_backend.h> | 42 | #include <target/target_core_backend.h> |
43 | #include <target/target_core_backend_configfs.h> | ||
44 | 43 | ||
45 | #include "target_core_iblock.h" | 44 | #include "target_core_iblock.h" |
46 | 45 | ||
@@ -53,17 +52,11 @@ static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev) | |||
53 | } | 52 | } |
54 | 53 | ||
55 | 54 | ||
56 | static struct se_subsystem_api iblock_template; | ||
57 | |||
58 | /* iblock_attach_hba(): (Part of se_subsystem_api_t template) | ||
59 | * | ||
60 | * | ||
61 | */ | ||
62 | static int iblock_attach_hba(struct se_hba *hba, u32 host_id) | 55 | static int iblock_attach_hba(struct se_hba *hba, u32 host_id) |
63 | { | 56 | { |
64 | pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on" | 57 | pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on" |
65 | " Generic Target Core Stack %s\n", hba->hba_id, | 58 | " Generic Target Core Stack %s\n", hba->hba_id, |
66 | IBLOCK_VERSION, TARGET_CORE_MOD_VERSION); | 59 | IBLOCK_VERSION, TARGET_CORE_VERSION); |
67 | return 0; | 60 | return 0; |
68 | } | 61 | } |
69 | 62 | ||
@@ -197,6 +190,14 @@ out: | |||
197 | return ret; | 190 | return ret; |
198 | } | 191 | } |
199 | 192 | ||
193 | static void iblock_dev_call_rcu(struct rcu_head *p) | ||
194 | { | ||
195 | struct se_device *dev = container_of(p, struct se_device, rcu_head); | ||
196 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev); | ||
197 | |||
198 | kfree(ib_dev); | ||
199 | } | ||
200 | |||
200 | static void iblock_free_device(struct se_device *dev) | 201 | static void iblock_free_device(struct se_device *dev) |
201 | { | 202 | { |
202 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev); | 203 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev); |
@@ -206,7 +207,7 @@ static void iblock_free_device(struct se_device *dev) | |||
206 | if (ib_dev->ibd_bio_set != NULL) | 207 | if (ib_dev->ibd_bio_set != NULL) |
207 | bioset_free(ib_dev->ibd_bio_set); | 208 | bioset_free(ib_dev->ibd_bio_set); |
208 | 209 | ||
209 | kfree(ib_dev); | 210 | call_rcu(&dev->rcu_head, iblock_dev_call_rcu); |
210 | } | 211 | } |
211 | 212 | ||
212 | static unsigned long long iblock_emulate_read_cap_with_block_size( | 213 | static unsigned long long iblock_emulate_read_cap_with_block_size( |
@@ -414,10 +415,9 @@ iblock_execute_sync_cache(struct se_cmd *cmd) | |||
414 | } | 415 | } |
415 | 416 | ||
416 | static sense_reason_t | 417 | static sense_reason_t |
417 | iblock_do_unmap(struct se_cmd *cmd, void *priv, | 418 | iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb) |
418 | sector_t lba, sector_t nolb) | ||
419 | { | 419 | { |
420 | struct block_device *bdev = priv; | 420 | struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd; |
421 | int ret; | 421 | int ret; |
422 | 422 | ||
423 | ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0); | 423 | ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0); |
@@ -430,30 +430,6 @@ iblock_do_unmap(struct se_cmd *cmd, void *priv, | |||
430 | } | 430 | } |
431 | 431 | ||
432 | static sense_reason_t | 432 | static sense_reason_t |
433 | iblock_execute_unmap(struct se_cmd *cmd) | ||
434 | { | ||
435 | struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd; | ||
436 | |||
437 | return sbc_execute_unmap(cmd, iblock_do_unmap, bdev); | ||
438 | } | ||
439 | |||
440 | static sense_reason_t | ||
441 | iblock_execute_write_same_unmap(struct se_cmd *cmd) | ||
442 | { | ||
443 | struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd; | ||
444 | sector_t lba = cmd->t_task_lba; | ||
445 | sector_t nolb = sbc_get_write_same_sectors(cmd); | ||
446 | sense_reason_t ret; | ||
447 | |||
448 | ret = iblock_do_unmap(cmd, bdev, lba, nolb); | ||
449 | if (ret) | ||
450 | return ret; | ||
451 | |||
452 | target_complete_cmd(cmd, GOOD); | ||
453 | return 0; | ||
454 | } | ||
455 | |||
456 | static sense_reason_t | ||
457 | iblock_execute_write_same(struct se_cmd *cmd) | 433 | iblock_execute_write_same(struct se_cmd *cmd) |
458 | { | 434 | { |
459 | struct iblock_req *ibr; | 435 | struct iblock_req *ibr; |
@@ -844,7 +820,6 @@ static struct sbc_ops iblock_sbc_ops = { | |||
844 | .execute_rw = iblock_execute_rw, | 820 | .execute_rw = iblock_execute_rw, |
845 | .execute_sync_cache = iblock_execute_sync_cache, | 821 | .execute_sync_cache = iblock_execute_sync_cache, |
846 | .execute_write_same = iblock_execute_write_same, | 822 | .execute_write_same = iblock_execute_write_same, |
847 | .execute_write_same_unmap = iblock_execute_write_same_unmap, | ||
848 | .execute_unmap = iblock_execute_unmap, | 823 | .execute_unmap = iblock_execute_unmap, |
849 | }; | 824 | }; |
850 | 825 | ||
@@ -863,42 +838,7 @@ static bool iblock_get_write_cache(struct se_device *dev) | |||
863 | return q->flush_flags & REQ_FLUSH; | 838 | return q->flush_flags & REQ_FLUSH; |
864 | } | 839 | } |
865 | 840 | ||
866 | DEF_TB_DEFAULT_ATTRIBS(iblock); | 841 | static const struct target_backend_ops iblock_ops = { |
867 | |||
868 | static struct configfs_attribute *iblock_backend_dev_attrs[] = { | ||
869 | &iblock_dev_attrib_emulate_model_alias.attr, | ||
870 | &iblock_dev_attrib_emulate_dpo.attr, | ||
871 | &iblock_dev_attrib_emulate_fua_write.attr, | ||
872 | &iblock_dev_attrib_emulate_fua_read.attr, | ||
873 | &iblock_dev_attrib_emulate_write_cache.attr, | ||
874 | &iblock_dev_attrib_emulate_ua_intlck_ctrl.attr, | ||
875 | &iblock_dev_attrib_emulate_tas.attr, | ||
876 | &iblock_dev_attrib_emulate_tpu.attr, | ||
877 | &iblock_dev_attrib_emulate_tpws.attr, | ||
878 | &iblock_dev_attrib_emulate_caw.attr, | ||
879 | &iblock_dev_attrib_emulate_3pc.attr, | ||
880 | &iblock_dev_attrib_pi_prot_type.attr, | ||
881 | &iblock_dev_attrib_hw_pi_prot_type.attr, | ||
882 | &iblock_dev_attrib_pi_prot_format.attr, | ||
883 | &iblock_dev_attrib_enforce_pr_isids.attr, | ||
884 | &iblock_dev_attrib_is_nonrot.attr, | ||
885 | &iblock_dev_attrib_emulate_rest_reord.attr, | ||
886 | &iblock_dev_attrib_force_pr_aptpl.attr, | ||
887 | &iblock_dev_attrib_hw_block_size.attr, | ||
888 | &iblock_dev_attrib_block_size.attr, | ||
889 | &iblock_dev_attrib_hw_max_sectors.attr, | ||
890 | &iblock_dev_attrib_optimal_sectors.attr, | ||
891 | &iblock_dev_attrib_hw_queue_depth.attr, | ||
892 | &iblock_dev_attrib_queue_depth.attr, | ||
893 | &iblock_dev_attrib_max_unmap_lba_count.attr, | ||
894 | &iblock_dev_attrib_max_unmap_block_desc_count.attr, | ||
895 | &iblock_dev_attrib_unmap_granularity.attr, | ||
896 | &iblock_dev_attrib_unmap_granularity_alignment.attr, | ||
897 | &iblock_dev_attrib_max_write_same_len.attr, | ||
898 | NULL, | ||
899 | }; | ||
900 | |||
901 | static struct se_subsystem_api iblock_template = { | ||
902 | .name = "iblock", | 842 | .name = "iblock", |
903 | .inquiry_prod = "IBLOCK", | 843 | .inquiry_prod = "IBLOCK", |
904 | .inquiry_rev = IBLOCK_VERSION, | 844 | .inquiry_rev = IBLOCK_VERSION, |
@@ -918,21 +858,17 @@ static struct se_subsystem_api iblock_template = { | |||
918 | .get_io_min = iblock_get_io_min, | 858 | .get_io_min = iblock_get_io_min, |
919 | .get_io_opt = iblock_get_io_opt, | 859 | .get_io_opt = iblock_get_io_opt, |
920 | .get_write_cache = iblock_get_write_cache, | 860 | .get_write_cache = iblock_get_write_cache, |
861 | .tb_dev_attrib_attrs = sbc_attrib_attrs, | ||
921 | }; | 862 | }; |
922 | 863 | ||
923 | static int __init iblock_module_init(void) | 864 | static int __init iblock_module_init(void) |
924 | { | 865 | { |
925 | struct target_backend_cits *tbc = &iblock_template.tb_cits; | 866 | return transport_backend_register(&iblock_ops); |
926 | |||
927 | target_core_setup_sub_cits(&iblock_template); | ||
928 | tbc->tb_dev_attrib_cit.ct_attrs = iblock_backend_dev_attrs; | ||
929 | |||
930 | return transport_subsystem_register(&iblock_template); | ||
931 | } | 867 | } |
932 | 868 | ||
933 | static void __exit iblock_module_exit(void) | 869 | static void __exit iblock_module_exit(void) |
934 | { | 870 | { |
935 | transport_subsystem_release(&iblock_template); | 871 | target_backend_unregister(&iblock_ops); |
936 | } | 872 | } |
937 | 873 | ||
938 | MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin"); | 874 | MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin"); |
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index 68bd7f5d9f73..99c24acfe676 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h | |||
@@ -1,6 +1,53 @@ | |||
1 | #ifndef TARGET_CORE_INTERNAL_H | 1 | #ifndef TARGET_CORE_INTERNAL_H |
2 | #define TARGET_CORE_INTERNAL_H | 2 | #define TARGET_CORE_INTERNAL_H |
3 | 3 | ||
4 | #define TARGET_CORE_NAME_MAX_LEN 64 | ||
5 | #define TARGET_FABRIC_NAME_SIZE 32 | ||
6 | |||
7 | struct target_backend { | ||
8 | struct list_head list; | ||
9 | |||
10 | const struct target_backend_ops *ops; | ||
11 | |||
12 | struct config_item_type tb_dev_cit; | ||
13 | struct config_item_type tb_dev_attrib_cit; | ||
14 | struct config_item_type tb_dev_pr_cit; | ||
15 | struct config_item_type tb_dev_wwn_cit; | ||
16 | struct config_item_type tb_dev_alua_tg_pt_gps_cit; | ||
17 | struct config_item_type tb_dev_stat_cit; | ||
18 | }; | ||
19 | |||
20 | struct target_fabric_configfs { | ||
21 | atomic_t tf_access_cnt; | ||
22 | struct list_head tf_list; | ||
23 | struct config_group tf_group; | ||
24 | struct config_group tf_disc_group; | ||
25 | struct config_group *tf_default_groups[2]; | ||
26 | const struct target_core_fabric_ops *tf_ops; | ||
27 | |||
28 | struct config_item_type tf_discovery_cit; | ||
29 | struct config_item_type tf_wwn_cit; | ||
30 | struct config_item_type tf_wwn_fabric_stats_cit; | ||
31 | struct config_item_type tf_tpg_cit; | ||
32 | struct config_item_type tf_tpg_base_cit; | ||
33 | struct config_item_type tf_tpg_lun_cit; | ||
34 | struct config_item_type tf_tpg_port_cit; | ||
35 | struct config_item_type tf_tpg_port_stat_cit; | ||
36 | struct config_item_type tf_tpg_np_cit; | ||
37 | struct config_item_type tf_tpg_np_base_cit; | ||
38 | struct config_item_type tf_tpg_attrib_cit; | ||
39 | struct config_item_type tf_tpg_auth_cit; | ||
40 | struct config_item_type tf_tpg_param_cit; | ||
41 | struct config_item_type tf_tpg_nacl_cit; | ||
42 | struct config_item_type tf_tpg_nacl_base_cit; | ||
43 | struct config_item_type tf_tpg_nacl_attrib_cit; | ||
44 | struct config_item_type tf_tpg_nacl_auth_cit; | ||
45 | struct config_item_type tf_tpg_nacl_param_cit; | ||
46 | struct config_item_type tf_tpg_nacl_stat_cit; | ||
47 | struct config_item_type tf_tpg_mappedlun_cit; | ||
48 | struct config_item_type tf_tpg_mappedlun_stat_cit; | ||
49 | }; | ||
50 | |||
4 | /* target_core_alua.c */ | 51 | /* target_core_alua.c */ |
5 | extern struct t10_alua_lu_gp *default_lu_gp; | 52 | extern struct t10_alua_lu_gp *default_lu_gp; |
6 | 53 | ||
@@ -8,28 +55,27 @@ extern struct t10_alua_lu_gp *default_lu_gp; | |||
8 | extern struct mutex g_device_mutex; | 55 | extern struct mutex g_device_mutex; |
9 | extern struct list_head g_device_list; | 56 | extern struct list_head g_device_list; |
10 | 57 | ||
58 | int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev); | ||
11 | struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16); | 59 | struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16); |
12 | int core_free_device_list_for_node(struct se_node_acl *, | 60 | void target_pr_kref_release(struct kref *); |
61 | void core_free_device_list_for_node(struct se_node_acl *, | ||
13 | struct se_portal_group *); | 62 | struct se_portal_group *); |
14 | void core_update_device_list_access(u32, u32, struct se_node_acl *); | 63 | void core_update_device_list_access(u64, u32, struct se_node_acl *); |
64 | struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *, u64); | ||
15 | int core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *, | 65 | int core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *, |
16 | u32, u32, struct se_node_acl *, struct se_portal_group *); | 66 | u64, u32, struct se_node_acl *, struct se_portal_group *); |
17 | int core_disable_device_list_for_node(struct se_lun *, struct se_lun_acl *, | 67 | void core_disable_device_list_for_node(struct se_lun *, struct se_dev_entry *, |
18 | u32, u32, struct se_node_acl *, struct se_portal_group *); | 68 | struct se_node_acl *, struct se_portal_group *); |
19 | void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *); | 69 | void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *); |
20 | int core_dev_export(struct se_device *, struct se_portal_group *, | 70 | int core_dev_add_lun(struct se_portal_group *, struct se_device *, |
21 | struct se_lun *); | 71 | struct se_lun *lun); |
22 | void core_dev_unexport(struct se_device *, struct se_portal_group *, | ||
23 | struct se_lun *); | ||
24 | struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_device *, u32); | ||
25 | void core_dev_del_lun(struct se_portal_group *, struct se_lun *); | 72 | void core_dev_del_lun(struct se_portal_group *, struct se_lun *); |
26 | struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32); | ||
27 | struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *, | 73 | struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *, |
28 | struct se_node_acl *, u32, int *); | 74 | struct se_node_acl *, u64, int *); |
29 | int core_dev_add_initiator_node_lun_acl(struct se_portal_group *, | 75 | int core_dev_add_initiator_node_lun_acl(struct se_portal_group *, |
30 | struct se_lun_acl *, u32, u32); | 76 | struct se_lun_acl *, struct se_lun *lun, u32); |
31 | int core_dev_del_initiator_node_lun_acl(struct se_portal_group *, | 77 | int core_dev_del_initiator_node_lun_acl(struct se_lun *, |
32 | struct se_lun *, struct se_lun_acl *); | 78 | struct se_lun_acl *); |
33 | void core_dev_free_initiator_node_lun_acl(struct se_portal_group *, | 79 | void core_dev_free_initiator_node_lun_acl(struct se_portal_group *, |
34 | struct se_lun_acl *lacl); | 80 | struct se_lun_acl *lacl); |
35 | int core_dev_setup_virtual_lun0(void); | 81 | int core_dev_setup_virtual_lun0(void); |
@@ -38,6 +84,18 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name); | |||
38 | int target_configure_device(struct se_device *dev); | 84 | int target_configure_device(struct se_device *dev); |
39 | void target_free_device(struct se_device *); | 85 | void target_free_device(struct se_device *); |
40 | 86 | ||
87 | /* target_core_configfs.c */ | ||
88 | void target_setup_backend_cits(struct target_backend *); | ||
89 | |||
90 | /* target_core_fabric_lib.c */ | ||
91 | int target_get_pr_transport_id_len(struct se_node_acl *nacl, | ||
92 | struct t10_pr_registration *pr_reg, int *format_code); | ||
93 | int target_get_pr_transport_id(struct se_node_acl *nacl, | ||
94 | struct t10_pr_registration *pr_reg, int *format_code, | ||
95 | unsigned char *buf); | ||
96 | const char *target_parse_pr_out_transport_id(struct se_portal_group *tpg, | ||
97 | const char *buf, u32 *out_tid_len, char **port_nexus_ptr); | ||
98 | |||
41 | /* target_core_hba.c */ | 99 | /* target_core_hba.c */ |
42 | struct se_hba *core_alloc_hba(const char *, u32, u32); | 100 | struct se_hba *core_alloc_hba(const char *, u32, u32); |
43 | int core_delete_hba(struct se_hba *); | 101 | int core_delete_hba(struct se_hba *); |
@@ -53,12 +111,16 @@ extern struct se_device *g_lun0_dev; | |||
53 | 111 | ||
54 | struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg, | 112 | struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg, |
55 | const char *); | 113 | const char *); |
56 | void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *); | 114 | void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *, |
115 | struct se_lun *); | ||
57 | void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *); | 116 | void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *); |
58 | struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u32); | 117 | struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u64); |
59 | int core_tpg_add_lun(struct se_portal_group *, struct se_lun *, | 118 | int core_tpg_add_lun(struct se_portal_group *, struct se_lun *, |
60 | u32, struct se_device *); | 119 | u32, struct se_device *); |
61 | void core_tpg_remove_lun(struct se_portal_group *, struct se_lun *); | 120 | void core_tpg_remove_lun(struct se_portal_group *, struct se_lun *); |
121 | struct se_node_acl *core_tpg_add_initiator_node_acl(struct se_portal_group *tpg, | ||
122 | const char *initiatorname); | ||
123 | void core_tpg_del_initiator_node_acl(struct se_node_acl *acl); | ||
62 | 124 | ||
63 | /* target_core_transport.c */ | 125 | /* target_core_transport.c */ |
64 | extern struct kmem_cache *se_tmr_req_cache; | 126 | extern struct kmem_cache *se_tmr_req_cache; |
@@ -77,14 +139,19 @@ int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int); | |||
77 | int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int); | 139 | int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int); |
78 | int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int); | 140 | int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int); |
79 | bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags); | 141 | bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags); |
80 | int transport_clear_lun_ref(struct se_lun *); | 142 | void transport_clear_lun_ref(struct se_lun *); |
81 | void transport_send_task_abort(struct se_cmd *); | 143 | void transport_send_task_abort(struct se_cmd *); |
82 | sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size); | 144 | sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size); |
83 | void target_qf_do_work(struct work_struct *work); | 145 | void target_qf_do_work(struct work_struct *work); |
146 | bool target_check_wce(struct se_device *dev); | ||
147 | bool target_check_fua(struct se_device *dev); | ||
84 | 148 | ||
85 | /* target_core_stat.c */ | 149 | /* target_core_stat.c */ |
86 | void target_stat_setup_dev_default_groups(struct se_device *); | 150 | void target_stat_setup_dev_default_groups(struct se_device *); |
87 | void target_stat_setup_port_default_groups(struct se_lun *); | 151 | void target_stat_setup_port_default_groups(struct se_lun *); |
88 | void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *); | 152 | void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *); |
89 | 153 | ||
154 | /* target_core_xcopy.c */ | ||
155 | extern struct se_portal_group xcopy_pt_tpg; | ||
156 | |||
90 | #endif /* TARGET_CORE_INTERNAL_H */ | 157 | #endif /* TARGET_CORE_INTERNAL_H */ |
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 8e5fa291f878..0fdbe43b7dad 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c | |||
@@ -35,7 +35,6 @@ | |||
35 | #include <target/target_core_base.h> | 35 | #include <target/target_core_base.h> |
36 | #include <target/target_core_backend.h> | 36 | #include <target/target_core_backend.h> |
37 | #include <target/target_core_fabric.h> | 37 | #include <target/target_core_fabric.h> |
38 | #include <target/target_core_configfs.h> | ||
39 | 38 | ||
40 | #include "target_core_internal.h" | 39 | #include "target_core_internal.h" |
41 | #include "target_core_pr.h" | 40 | #include "target_core_pr.h" |
@@ -45,7 +44,6 @@ | |||
45 | * Used for Specify Initiator Ports Capable Bit (SPEC_I_PT) | 44 | * Used for Specify Initiator Ports Capable Bit (SPEC_I_PT) |
46 | */ | 45 | */ |
47 | struct pr_transport_id_holder { | 46 | struct pr_transport_id_holder { |
48 | int dest_local_nexus; | ||
49 | struct t10_pr_registration *dest_pr_reg; | 47 | struct t10_pr_registration *dest_pr_reg; |
50 | struct se_portal_group *dest_tpg; | 48 | struct se_portal_group *dest_tpg; |
51 | struct se_node_acl *dest_node_acl; | 49 | struct se_node_acl *dest_node_acl; |
@@ -231,9 +229,10 @@ target_scsi2_reservation_release(struct se_cmd *cmd) | |||
231 | dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS_WITH_ISID; | 229 | dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS_WITH_ISID; |
232 | } | 230 | } |
233 | tpg = sess->se_tpg; | 231 | tpg = sess->se_tpg; |
234 | pr_debug("SCSI-2 Released reservation for %s LUN: %u ->" | 232 | pr_debug("SCSI-2 Released reservation for %s LUN: %llu ->" |
235 | " MAPPED LUN: %u for %s\n", tpg->se_tpg_tfo->get_fabric_name(), | 233 | " MAPPED LUN: %llu for %s\n", |
236 | cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun, | 234 | tpg->se_tpg_tfo->get_fabric_name(), |
235 | cmd->se_lun->unpacked_lun, cmd->orig_fe_lun, | ||
237 | sess->se_node_acl->initiatorname); | 236 | sess->se_node_acl->initiatorname); |
238 | 237 | ||
239 | out_unlock: | 238 | out_unlock: |
@@ -277,12 +276,12 @@ target_scsi2_reservation_reserve(struct se_cmd *cmd) | |||
277 | (dev->dev_reserved_node_acl != sess->se_node_acl)) { | 276 | (dev->dev_reserved_node_acl != sess->se_node_acl)) { |
278 | pr_err("SCSI-2 RESERVATION CONFLIFT for %s fabric\n", | 277 | pr_err("SCSI-2 RESERVATION CONFLIFT for %s fabric\n", |
279 | tpg->se_tpg_tfo->get_fabric_name()); | 278 | tpg->se_tpg_tfo->get_fabric_name()); |
280 | pr_err("Original reserver LUN: %u %s\n", | 279 | pr_err("Original reserver LUN: %llu %s\n", |
281 | cmd->se_lun->unpacked_lun, | 280 | cmd->se_lun->unpacked_lun, |
282 | dev->dev_reserved_node_acl->initiatorname); | 281 | dev->dev_reserved_node_acl->initiatorname); |
283 | pr_err("Current attempt - LUN: %u -> MAPPED LUN: %u" | 282 | pr_err("Current attempt - LUN: %llu -> MAPPED LUN: %llu" |
284 | " from %s \n", cmd->se_lun->unpacked_lun, | 283 | " from %s \n", cmd->se_lun->unpacked_lun, |
285 | cmd->se_deve->mapped_lun, | 284 | cmd->orig_fe_lun, |
286 | sess->se_node_acl->initiatorname); | 285 | sess->se_node_acl->initiatorname); |
287 | ret = TCM_RESERVATION_CONFLICT; | 286 | ret = TCM_RESERVATION_CONFLICT; |
288 | goto out_unlock; | 287 | goto out_unlock; |
@@ -294,9 +293,9 @@ target_scsi2_reservation_reserve(struct se_cmd *cmd) | |||
294 | dev->dev_res_bin_isid = sess->sess_bin_isid; | 293 | dev->dev_res_bin_isid = sess->sess_bin_isid; |
295 | dev->dev_reservation_flags |= DRF_SPC2_RESERVATIONS_WITH_ISID; | 294 | dev->dev_reservation_flags |= DRF_SPC2_RESERVATIONS_WITH_ISID; |
296 | } | 295 | } |
297 | pr_debug("SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u" | 296 | pr_debug("SCSI-2 Reserved %s LUN: %llu -> MAPPED LUN: %llu" |
298 | " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), | 297 | " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), |
299 | cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun, | 298 | cmd->se_lun->unpacked_lun, cmd->orig_fe_lun, |
300 | sess->se_node_acl->initiatorname); | 299 | sess->se_node_acl->initiatorname); |
301 | 300 | ||
302 | out_unlock: | 301 | out_unlock: |
@@ -314,28 +313,31 @@ out: | |||
314 | * This function is called by those initiator ports who are *NOT* | 313 | * This function is called by those initiator ports who are *NOT* |
315 | * the active PR reservation holder when a reservation is present. | 314 | * the active PR reservation holder when a reservation is present. |
316 | */ | 315 | */ |
317 | static int core_scsi3_pr_seq_non_holder( | 316 | static int core_scsi3_pr_seq_non_holder(struct se_cmd *cmd, u32 pr_reg_type, |
318 | struct se_cmd *cmd, | 317 | bool isid_mismatch) |
319 | u32 pr_reg_type) | ||
320 | { | 318 | { |
321 | unsigned char *cdb = cmd->t_task_cdb; | 319 | unsigned char *cdb = cmd->t_task_cdb; |
322 | struct se_dev_entry *se_deve; | ||
323 | struct se_session *se_sess = cmd->se_sess; | 320 | struct se_session *se_sess = cmd->se_sess; |
324 | int other_cdb = 0, ignore_reg; | 321 | struct se_node_acl *nacl = se_sess->se_node_acl; |
322 | int other_cdb = 0; | ||
325 | int registered_nexus = 0, ret = 1; /* Conflict by default */ | 323 | int registered_nexus = 0, ret = 1; /* Conflict by default */ |
326 | int all_reg = 0, reg_only = 0; /* ALL_REG, REG_ONLY */ | 324 | int all_reg = 0, reg_only = 0; /* ALL_REG, REG_ONLY */ |
327 | int we = 0; /* Write Exclusive */ | 325 | int we = 0; /* Write Exclusive */ |
328 | int legacy = 0; /* Act like a legacy device and return | 326 | int legacy = 0; /* Act like a legacy device and return |
329 | * RESERVATION CONFLICT on some CDBs */ | 327 | * RESERVATION CONFLICT on some CDBs */ |
330 | 328 | ||
331 | se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; | 329 | if (isid_mismatch) { |
332 | /* | 330 | registered_nexus = 0; |
333 | * Determine if the registration should be ignored due to | 331 | } else { |
334 | * non-matching ISIDs in target_scsi3_pr_reservation_check(). | 332 | struct se_dev_entry *se_deve; |
335 | */ | 333 | |
336 | ignore_reg = (pr_reg_type & 0x80000000); | 334 | rcu_read_lock(); |
337 | if (ignore_reg) | 335 | se_deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun); |
338 | pr_reg_type &= ~0x80000000; | 336 | if (se_deve) |
337 | registered_nexus = test_bit(DEF_PR_REG_ACTIVE, | ||
338 | &se_deve->deve_flags); | ||
339 | rcu_read_unlock(); | ||
340 | } | ||
339 | 341 | ||
340 | switch (pr_reg_type) { | 342 | switch (pr_reg_type) { |
341 | case PR_TYPE_WRITE_EXCLUSIVE: | 343 | case PR_TYPE_WRITE_EXCLUSIVE: |
@@ -345,8 +347,6 @@ static int core_scsi3_pr_seq_non_holder( | |||
345 | * Some commands are only allowed for the persistent reservation | 347 | * Some commands are only allowed for the persistent reservation |
346 | * holder. | 348 | * holder. |
347 | */ | 349 | */ |
348 | if ((se_deve->def_pr_registered) && !(ignore_reg)) | ||
349 | registered_nexus = 1; | ||
350 | break; | 350 | break; |
351 | case PR_TYPE_WRITE_EXCLUSIVE_REGONLY: | 351 | case PR_TYPE_WRITE_EXCLUSIVE_REGONLY: |
352 | we = 1; | 352 | we = 1; |
@@ -355,8 +355,6 @@ static int core_scsi3_pr_seq_non_holder( | |||
355 | * Some commands are only allowed for registered I_T Nexuses. | 355 | * Some commands are only allowed for registered I_T Nexuses. |
356 | */ | 356 | */ |
357 | reg_only = 1; | 357 | reg_only = 1; |
358 | if ((se_deve->def_pr_registered) && !(ignore_reg)) | ||
359 | registered_nexus = 1; | ||
360 | break; | 358 | break; |
361 | case PR_TYPE_WRITE_EXCLUSIVE_ALLREG: | 359 | case PR_TYPE_WRITE_EXCLUSIVE_ALLREG: |
362 | we = 1; | 360 | we = 1; |
@@ -365,8 +363,6 @@ static int core_scsi3_pr_seq_non_holder( | |||
365 | * Each registered I_T Nexus is a reservation holder. | 363 | * Each registered I_T Nexus is a reservation holder. |
366 | */ | 364 | */ |
367 | all_reg = 1; | 365 | all_reg = 1; |
368 | if ((se_deve->def_pr_registered) && !(ignore_reg)) | ||
369 | registered_nexus = 1; | ||
370 | break; | 366 | break; |
371 | default: | 367 | default: |
372 | return -EINVAL; | 368 | return -EINVAL; |
@@ -572,6 +568,7 @@ target_scsi3_pr_reservation_check(struct se_cmd *cmd) | |||
572 | struct se_device *dev = cmd->se_dev; | 568 | struct se_device *dev = cmd->se_dev; |
573 | struct se_session *sess = cmd->se_sess; | 569 | struct se_session *sess = cmd->se_sess; |
574 | u32 pr_reg_type; | 570 | u32 pr_reg_type; |
571 | bool isid_mismatch = false; | ||
575 | 572 | ||
576 | if (!dev->dev_pr_res_holder) | 573 | if (!dev->dev_pr_res_holder) |
577 | return 0; | 574 | return 0; |
@@ -584,7 +581,7 @@ target_scsi3_pr_reservation_check(struct se_cmd *cmd) | |||
584 | if (dev->dev_pr_res_holder->isid_present_at_reg) { | 581 | if (dev->dev_pr_res_holder->isid_present_at_reg) { |
585 | if (dev->dev_pr_res_holder->pr_reg_bin_isid != | 582 | if (dev->dev_pr_res_holder->pr_reg_bin_isid != |
586 | sess->sess_bin_isid) { | 583 | sess->sess_bin_isid) { |
587 | pr_reg_type |= 0x80000000; | 584 | isid_mismatch = true; |
588 | goto check_nonholder; | 585 | goto check_nonholder; |
589 | } | 586 | } |
590 | } | 587 | } |
@@ -592,7 +589,7 @@ target_scsi3_pr_reservation_check(struct se_cmd *cmd) | |||
592 | return 0; | 589 | return 0; |
593 | 590 | ||
594 | check_nonholder: | 591 | check_nonholder: |
595 | if (core_scsi3_pr_seq_non_holder(cmd, pr_reg_type)) | 592 | if (core_scsi3_pr_seq_non_holder(cmd, pr_reg_type, isid_mismatch)) |
596 | return TCM_RESERVATION_CONFLICT; | 593 | return TCM_RESERVATION_CONFLICT; |
597 | return 0; | 594 | return 0; |
598 | } | 595 | } |
@@ -620,7 +617,9 @@ static u32 core_scsi3_pr_generation(struct se_device *dev) | |||
620 | static struct t10_pr_registration *__core_scsi3_do_alloc_registration( | 617 | static struct t10_pr_registration *__core_scsi3_do_alloc_registration( |
621 | struct se_device *dev, | 618 | struct se_device *dev, |
622 | struct se_node_acl *nacl, | 619 | struct se_node_acl *nacl, |
620 | struct se_lun *lun, | ||
623 | struct se_dev_entry *deve, | 621 | struct se_dev_entry *deve, |
622 | u64 mapped_lun, | ||
624 | unsigned char *isid, | 623 | unsigned char *isid, |
625 | u64 sa_res_key, | 624 | u64 sa_res_key, |
626 | int all_tg_pt, | 625 | int all_tg_pt, |
@@ -642,12 +641,12 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration( | |||
642 | atomic_set(&pr_reg->pr_res_holders, 0); | 641 | atomic_set(&pr_reg->pr_res_holders, 0); |
643 | pr_reg->pr_reg_nacl = nacl; | 642 | pr_reg->pr_reg_nacl = nacl; |
644 | pr_reg->pr_reg_deve = deve; | 643 | pr_reg->pr_reg_deve = deve; |
645 | pr_reg->pr_res_mapped_lun = deve->mapped_lun; | 644 | pr_reg->pr_res_mapped_lun = mapped_lun; |
646 | pr_reg->pr_aptpl_target_lun = deve->se_lun->unpacked_lun; | 645 | pr_reg->pr_aptpl_target_lun = lun->unpacked_lun; |
646 | pr_reg->tg_pt_sep_rtpi = lun->lun_rtpi; | ||
647 | pr_reg->pr_res_key = sa_res_key; | 647 | pr_reg->pr_res_key = sa_res_key; |
648 | pr_reg->pr_reg_all_tg_pt = all_tg_pt; | 648 | pr_reg->pr_reg_all_tg_pt = all_tg_pt; |
649 | pr_reg->pr_reg_aptpl = aptpl; | 649 | pr_reg->pr_reg_aptpl = aptpl; |
650 | pr_reg->pr_reg_tg_pt_lun = deve->se_lun; | ||
651 | /* | 650 | /* |
652 | * If an ISID value for this SCSI Initiator Port exists, | 651 | * If an ISID value for this SCSI Initiator Port exists, |
653 | * save it to the registration now. | 652 | * save it to the registration now. |
@@ -671,7 +670,9 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *); | |||
671 | static struct t10_pr_registration *__core_scsi3_alloc_registration( | 670 | static struct t10_pr_registration *__core_scsi3_alloc_registration( |
672 | struct se_device *dev, | 671 | struct se_device *dev, |
673 | struct se_node_acl *nacl, | 672 | struct se_node_acl *nacl, |
673 | struct se_lun *lun, | ||
674 | struct se_dev_entry *deve, | 674 | struct se_dev_entry *deve, |
675 | u64 mapped_lun, | ||
675 | unsigned char *isid, | 676 | unsigned char *isid, |
676 | u64 sa_res_key, | 677 | u64 sa_res_key, |
677 | int all_tg_pt, | 678 | int all_tg_pt, |
@@ -679,7 +680,8 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( | |||
679 | { | 680 | { |
680 | struct se_dev_entry *deve_tmp; | 681 | struct se_dev_entry *deve_tmp; |
681 | struct se_node_acl *nacl_tmp; | 682 | struct se_node_acl *nacl_tmp; |
682 | struct se_port *port, *port_tmp; | 683 | struct se_lun_acl *lacl_tmp; |
684 | struct se_lun *lun_tmp, *next, *dest_lun; | ||
683 | const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; | 685 | const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; |
684 | struct t10_pr_registration *pr_reg, *pr_reg_atp, *pr_reg_tmp, *pr_reg_tmp_safe; | 686 | struct t10_pr_registration *pr_reg, *pr_reg_atp, *pr_reg_tmp, *pr_reg_tmp_safe; |
685 | int ret; | 687 | int ret; |
@@ -687,8 +689,9 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( | |||
687 | * Create a registration for the I_T Nexus upon which the | 689 | * Create a registration for the I_T Nexus upon which the |
688 | * PROUT REGISTER was received. | 690 | * PROUT REGISTER was received. |
689 | */ | 691 | */ |
690 | pr_reg = __core_scsi3_do_alloc_registration(dev, nacl, deve, isid, | 692 | pr_reg = __core_scsi3_do_alloc_registration(dev, nacl, lun, deve, mapped_lun, |
691 | sa_res_key, all_tg_pt, aptpl); | 693 | isid, sa_res_key, all_tg_pt, |
694 | aptpl); | ||
692 | if (!pr_reg) | 695 | if (!pr_reg) |
693 | return NULL; | 696 | return NULL; |
694 | /* | 697 | /* |
@@ -701,13 +704,13 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( | |||
701 | * for ALL_TG_PT=1 | 704 | * for ALL_TG_PT=1 |
702 | */ | 705 | */ |
703 | spin_lock(&dev->se_port_lock); | 706 | spin_lock(&dev->se_port_lock); |
704 | list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) { | 707 | list_for_each_entry_safe(lun_tmp, next, &dev->dev_sep_list, lun_dev_link) { |
705 | atomic_inc_mb(&port->sep_tg_pt_ref_cnt); | 708 | if (!percpu_ref_tryget_live(&lun_tmp->lun_ref)) |
709 | continue; | ||
706 | spin_unlock(&dev->se_port_lock); | 710 | spin_unlock(&dev->se_port_lock); |
707 | 711 | ||
708 | spin_lock_bh(&port->sep_alua_lock); | 712 | spin_lock(&lun_tmp->lun_deve_lock); |
709 | list_for_each_entry(deve_tmp, &port->sep_alua_list, | 713 | list_for_each_entry(deve_tmp, &lun_tmp->lun_deve_list, lun_link) { |
710 | alua_port_list) { | ||
711 | /* | 714 | /* |
712 | * This pointer will be NULL for demo mode MappedLUNs | 715 | * This pointer will be NULL for demo mode MappedLUNs |
713 | * that have not been make explicit via a ConfigFS | 716 | * that have not been make explicit via a ConfigFS |
@@ -716,7 +719,9 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( | |||
716 | if (!deve_tmp->se_lun_acl) | 719 | if (!deve_tmp->se_lun_acl) |
717 | continue; | 720 | continue; |
718 | 721 | ||
719 | nacl_tmp = deve_tmp->se_lun_acl->se_lun_nacl; | 722 | lacl_tmp = rcu_dereference_check(deve_tmp->se_lun_acl, |
723 | lockdep_is_held(&lun_tmp->lun_deve_lock)); | ||
724 | nacl_tmp = lacl_tmp->se_lun_nacl; | ||
720 | /* | 725 | /* |
721 | * Skip the matching struct se_node_acl that is allocated | 726 | * Skip the matching struct se_node_acl that is allocated |
722 | * above.. | 727 | * above.. |
@@ -736,8 +741,8 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( | |||
736 | if (strcmp(nacl->initiatorname, nacl_tmp->initiatorname)) | 741 | if (strcmp(nacl->initiatorname, nacl_tmp->initiatorname)) |
737 | continue; | 742 | continue; |
738 | 743 | ||
739 | atomic_inc_mb(&deve_tmp->pr_ref_count); | 744 | kref_get(&deve_tmp->pr_kref); |
740 | spin_unlock_bh(&port->sep_alua_lock); | 745 | spin_unlock(&lun_tmp->lun_deve_lock); |
741 | /* | 746 | /* |
742 | * Grab a configfs group dependency that is released | 747 | * Grab a configfs group dependency that is released |
743 | * for the exception path at label out: below, or upon | 748 | * for the exception path at label out: below, or upon |
@@ -748,8 +753,8 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( | |||
748 | if (ret < 0) { | 753 | if (ret < 0) { |
749 | pr_err("core_scsi3_lunacl_depend" | 754 | pr_err("core_scsi3_lunacl_depend" |
750 | "_item() failed\n"); | 755 | "_item() failed\n"); |
751 | atomic_dec_mb(&port->sep_tg_pt_ref_cnt); | 756 | percpu_ref_put(&lun_tmp->lun_ref); |
752 | atomic_dec_mb(&deve_tmp->pr_ref_count); | 757 | kref_put(&deve_tmp->pr_kref, target_pr_kref_release); |
753 | goto out; | 758 | goto out; |
754 | } | 759 | } |
755 | /* | 760 | /* |
@@ -759,24 +764,27 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( | |||
759 | * the original *pr_reg is processed in | 764 | * the original *pr_reg is processed in |
760 | * __core_scsi3_add_registration() | 765 | * __core_scsi3_add_registration() |
761 | */ | 766 | */ |
767 | dest_lun = rcu_dereference_check(deve_tmp->se_lun, | ||
768 | atomic_read(&deve_tmp->pr_kref.refcount) != 0); | ||
769 | |||
762 | pr_reg_atp = __core_scsi3_do_alloc_registration(dev, | 770 | pr_reg_atp = __core_scsi3_do_alloc_registration(dev, |
763 | nacl_tmp, deve_tmp, NULL, | 771 | nacl_tmp, dest_lun, deve_tmp, |
772 | deve_tmp->mapped_lun, NULL, | ||
764 | sa_res_key, all_tg_pt, aptpl); | 773 | sa_res_key, all_tg_pt, aptpl); |
765 | if (!pr_reg_atp) { | 774 | if (!pr_reg_atp) { |
766 | atomic_dec_mb(&port->sep_tg_pt_ref_cnt); | 775 | percpu_ref_put(&lun_tmp->lun_ref); |
767 | atomic_dec_mb(&deve_tmp->pr_ref_count); | ||
768 | core_scsi3_lunacl_undepend_item(deve_tmp); | 776 | core_scsi3_lunacl_undepend_item(deve_tmp); |
769 | goto out; | 777 | goto out; |
770 | } | 778 | } |
771 | 779 | ||
772 | list_add_tail(&pr_reg_atp->pr_reg_atp_mem_list, | 780 | list_add_tail(&pr_reg_atp->pr_reg_atp_mem_list, |
773 | &pr_reg->pr_reg_atp_list); | 781 | &pr_reg->pr_reg_atp_list); |
774 | spin_lock_bh(&port->sep_alua_lock); | 782 | spin_lock(&lun_tmp->lun_deve_lock); |
775 | } | 783 | } |
776 | spin_unlock_bh(&port->sep_alua_lock); | 784 | spin_unlock(&lun_tmp->lun_deve_lock); |
777 | 785 | ||
778 | spin_lock(&dev->se_port_lock); | 786 | spin_lock(&dev->se_port_lock); |
779 | atomic_dec_mb(&port->sep_tg_pt_ref_cnt); | 787 | percpu_ref_put(&lun_tmp->lun_ref); |
780 | } | 788 | } |
781 | spin_unlock(&dev->se_port_lock); | 789 | spin_unlock(&dev->se_port_lock); |
782 | 790 | ||
@@ -797,10 +805,10 @@ int core_scsi3_alloc_aptpl_registration( | |||
797 | u64 sa_res_key, | 805 | u64 sa_res_key, |
798 | unsigned char *i_port, | 806 | unsigned char *i_port, |
799 | unsigned char *isid, | 807 | unsigned char *isid, |
800 | u32 mapped_lun, | 808 | u64 mapped_lun, |
801 | unsigned char *t_port, | 809 | unsigned char *t_port, |
802 | u16 tpgt, | 810 | u16 tpgt, |
803 | u32 target_lun, | 811 | u64 target_lun, |
804 | int res_holder, | 812 | int res_holder, |
805 | int all_tg_pt, | 813 | int all_tg_pt, |
806 | u8 type) | 814 | u8 type) |
@@ -831,7 +839,6 @@ int core_scsi3_alloc_aptpl_registration( | |||
831 | pr_reg->pr_res_key = sa_res_key; | 839 | pr_reg->pr_res_key = sa_res_key; |
832 | pr_reg->pr_reg_all_tg_pt = all_tg_pt; | 840 | pr_reg->pr_reg_all_tg_pt = all_tg_pt; |
833 | pr_reg->pr_reg_aptpl = 1; | 841 | pr_reg->pr_reg_aptpl = 1; |
834 | pr_reg->pr_reg_tg_pt_lun = NULL; | ||
835 | pr_reg->pr_res_scope = 0; /* Always LUN_SCOPE */ | 842 | pr_reg->pr_res_scope = 0; /* Always LUN_SCOPE */ |
836 | pr_reg->pr_res_type = type; | 843 | pr_reg->pr_res_type = type; |
837 | /* | 844 | /* |
@@ -895,9 +902,9 @@ static int __core_scsi3_check_aptpl_registration( | |||
895 | struct se_device *dev, | 902 | struct se_device *dev, |
896 | struct se_portal_group *tpg, | 903 | struct se_portal_group *tpg, |
897 | struct se_lun *lun, | 904 | struct se_lun *lun, |
898 | u32 target_lun, | 905 | u64 target_lun, |
899 | struct se_node_acl *nacl, | 906 | struct se_node_acl *nacl, |
900 | struct se_dev_entry *deve) | 907 | u64 mapped_lun) |
901 | { | 908 | { |
902 | struct t10_pr_registration *pr_reg, *pr_reg_tmp; | 909 | struct t10_pr_registration *pr_reg, *pr_reg_tmp; |
903 | struct t10_reservation *pr_tmpl = &dev->t10_pr; | 910 | struct t10_reservation *pr_tmpl = &dev->t10_pr; |
@@ -925,14 +932,13 @@ static int __core_scsi3_check_aptpl_registration( | |||
925 | pr_reg_aptpl_list) { | 932 | pr_reg_aptpl_list) { |
926 | 933 | ||
927 | if (!strcmp(pr_reg->pr_iport, i_port) && | 934 | if (!strcmp(pr_reg->pr_iport, i_port) && |
928 | (pr_reg->pr_res_mapped_lun == deve->mapped_lun) && | 935 | (pr_reg->pr_res_mapped_lun == mapped_lun) && |
929 | !(strcmp(pr_reg->pr_tport, t_port)) && | 936 | !(strcmp(pr_reg->pr_tport, t_port)) && |
930 | (pr_reg->pr_reg_tpgt == tpgt) && | 937 | (pr_reg->pr_reg_tpgt == tpgt) && |
931 | (pr_reg->pr_aptpl_target_lun == target_lun)) { | 938 | (pr_reg->pr_aptpl_target_lun == target_lun)) { |
932 | 939 | ||
933 | pr_reg->pr_reg_nacl = nacl; | 940 | pr_reg->pr_reg_nacl = nacl; |
934 | pr_reg->pr_reg_deve = deve; | 941 | pr_reg->tg_pt_sep_rtpi = lun->lun_rtpi; |
935 | pr_reg->pr_reg_tg_pt_lun = lun; | ||
936 | 942 | ||
937 | list_del(&pr_reg->pr_reg_aptpl_list); | 943 | list_del(&pr_reg->pr_reg_aptpl_list); |
938 | spin_unlock(&pr_tmpl->aptpl_reg_lock); | 944 | spin_unlock(&pr_tmpl->aptpl_reg_lock); |
@@ -967,15 +973,14 @@ int core_scsi3_check_aptpl_registration( | |||
967 | struct se_portal_group *tpg, | 973 | struct se_portal_group *tpg, |
968 | struct se_lun *lun, | 974 | struct se_lun *lun, |
969 | struct se_node_acl *nacl, | 975 | struct se_node_acl *nacl, |
970 | u32 mapped_lun) | 976 | u64 mapped_lun) |
971 | { | 977 | { |
972 | struct se_dev_entry *deve = nacl->device_list[mapped_lun]; | ||
973 | |||
974 | if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) | 978 | if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) |
975 | return 0; | 979 | return 0; |
976 | 980 | ||
977 | return __core_scsi3_check_aptpl_registration(dev, tpg, lun, | 981 | return __core_scsi3_check_aptpl_registration(dev, tpg, lun, |
978 | lun->unpacked_lun, nacl, deve); | 982 | lun->unpacked_lun, nacl, |
983 | mapped_lun); | ||
979 | } | 984 | } |
980 | 985 | ||
981 | static void __core_scsi3_dump_registration( | 986 | static void __core_scsi3_dump_registration( |
@@ -1009,10 +1014,6 @@ static void __core_scsi3_dump_registration( | |||
1009 | pr_reg->pr_reg_aptpl); | 1014 | pr_reg->pr_reg_aptpl); |
1010 | } | 1015 | } |
1011 | 1016 | ||
1012 | /* | ||
1013 | * this function can be called with struct se_device->dev_reservation_lock | ||
1014 | * when register_move = 1 | ||
1015 | */ | ||
1016 | static void __core_scsi3_add_registration( | 1017 | static void __core_scsi3_add_registration( |
1017 | struct se_device *dev, | 1018 | struct se_device *dev, |
1018 | struct se_node_acl *nacl, | 1019 | struct se_node_acl *nacl, |
@@ -1023,6 +1024,7 @@ static void __core_scsi3_add_registration( | |||
1023 | const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; | 1024 | const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; |
1024 | struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe; | 1025 | struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe; |
1025 | struct t10_reservation *pr_tmpl = &dev->t10_pr; | 1026 | struct t10_reservation *pr_tmpl = &dev->t10_pr; |
1027 | struct se_dev_entry *deve; | ||
1026 | 1028 | ||
1027 | /* | 1029 | /* |
1028 | * Increment PRgeneration counter for struct se_device upon a successful | 1030 | * Increment PRgeneration counter for struct se_device upon a successful |
@@ -1039,10 +1041,16 @@ static void __core_scsi3_add_registration( | |||
1039 | 1041 | ||
1040 | spin_lock(&pr_tmpl->registration_lock); | 1042 | spin_lock(&pr_tmpl->registration_lock); |
1041 | list_add_tail(&pr_reg->pr_reg_list, &pr_tmpl->registration_list); | 1043 | list_add_tail(&pr_reg->pr_reg_list, &pr_tmpl->registration_list); |
1042 | pr_reg->pr_reg_deve->def_pr_registered = 1; | ||
1043 | 1044 | ||
1044 | __core_scsi3_dump_registration(tfo, dev, nacl, pr_reg, register_type); | 1045 | __core_scsi3_dump_registration(tfo, dev, nacl, pr_reg, register_type); |
1045 | spin_unlock(&pr_tmpl->registration_lock); | 1046 | spin_unlock(&pr_tmpl->registration_lock); |
1047 | |||
1048 | rcu_read_lock(); | ||
1049 | deve = pr_reg->pr_reg_deve; | ||
1050 | if (deve) | ||
1051 | set_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags); | ||
1052 | rcu_read_unlock(); | ||
1053 | |||
1046 | /* | 1054 | /* |
1047 | * Skip extra processing for ALL_TG_PT=0 or REGISTER_AND_MOVE. | 1055 | * Skip extra processing for ALL_TG_PT=0 or REGISTER_AND_MOVE. |
1048 | */ | 1056 | */ |
@@ -1054,6 +1062,8 @@ static void __core_scsi3_add_registration( | |||
1054 | */ | 1062 | */ |
1055 | list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe, | 1063 | list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe, |
1056 | &pr_reg->pr_reg_atp_list, pr_reg_atp_mem_list) { | 1064 | &pr_reg->pr_reg_atp_list, pr_reg_atp_mem_list) { |
1065 | struct se_node_acl *nacl_tmp = pr_reg_tmp->pr_reg_nacl; | ||
1066 | |||
1057 | list_del(&pr_reg_tmp->pr_reg_atp_mem_list); | 1067 | list_del(&pr_reg_tmp->pr_reg_atp_mem_list); |
1058 | 1068 | ||
1059 | pr_reg_tmp->pr_res_generation = core_scsi3_pr_generation(dev); | 1069 | pr_reg_tmp->pr_res_generation = core_scsi3_pr_generation(dev); |
@@ -1061,12 +1071,17 @@ static void __core_scsi3_add_registration( | |||
1061 | spin_lock(&pr_tmpl->registration_lock); | 1071 | spin_lock(&pr_tmpl->registration_lock); |
1062 | list_add_tail(&pr_reg_tmp->pr_reg_list, | 1072 | list_add_tail(&pr_reg_tmp->pr_reg_list, |
1063 | &pr_tmpl->registration_list); | 1073 | &pr_tmpl->registration_list); |
1064 | pr_reg_tmp->pr_reg_deve->def_pr_registered = 1; | ||
1065 | 1074 | ||
1066 | __core_scsi3_dump_registration(tfo, dev, | 1075 | __core_scsi3_dump_registration(tfo, dev, nacl_tmp, pr_reg_tmp, |
1067 | pr_reg_tmp->pr_reg_nacl, pr_reg_tmp, | 1076 | register_type); |
1068 | register_type); | ||
1069 | spin_unlock(&pr_tmpl->registration_lock); | 1077 | spin_unlock(&pr_tmpl->registration_lock); |
1078 | |||
1079 | rcu_read_lock(); | ||
1080 | deve = pr_reg_tmp->pr_reg_deve; | ||
1081 | if (deve) | ||
1082 | set_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags); | ||
1083 | rcu_read_unlock(); | ||
1084 | |||
1070 | /* | 1085 | /* |
1071 | * Drop configfs group dependency reference from | 1086 | * Drop configfs group dependency reference from |
1072 | * __core_scsi3_alloc_registration() | 1087 | * __core_scsi3_alloc_registration() |
@@ -1078,7 +1093,9 @@ static void __core_scsi3_add_registration( | |||
1078 | static int core_scsi3_alloc_registration( | 1093 | static int core_scsi3_alloc_registration( |
1079 | struct se_device *dev, | 1094 | struct se_device *dev, |
1080 | struct se_node_acl *nacl, | 1095 | struct se_node_acl *nacl, |
1096 | struct se_lun *lun, | ||
1081 | struct se_dev_entry *deve, | 1097 | struct se_dev_entry *deve, |
1098 | u64 mapped_lun, | ||
1082 | unsigned char *isid, | 1099 | unsigned char *isid, |
1083 | u64 sa_res_key, | 1100 | u64 sa_res_key, |
1084 | int all_tg_pt, | 1101 | int all_tg_pt, |
@@ -1088,8 +1105,9 @@ static int core_scsi3_alloc_registration( | |||
1088 | { | 1105 | { |
1089 | struct t10_pr_registration *pr_reg; | 1106 | struct t10_pr_registration *pr_reg; |
1090 | 1107 | ||
1091 | pr_reg = __core_scsi3_alloc_registration(dev, nacl, deve, isid, | 1108 | pr_reg = __core_scsi3_alloc_registration(dev, nacl, lun, deve, mapped_lun, |
1092 | sa_res_key, all_tg_pt, aptpl); | 1109 | isid, sa_res_key, all_tg_pt, |
1110 | aptpl); | ||
1093 | if (!pr_reg) | 1111 | if (!pr_reg) |
1094 | return -EPERM; | 1112 | return -EPERM; |
1095 | 1113 | ||
@@ -1242,13 +1260,13 @@ static void __core_scsi3_free_registration( | |||
1242 | const struct target_core_fabric_ops *tfo = | 1260 | const struct target_core_fabric_ops *tfo = |
1243 | pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo; | 1261 | pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo; |
1244 | struct t10_reservation *pr_tmpl = &dev->t10_pr; | 1262 | struct t10_reservation *pr_tmpl = &dev->t10_pr; |
1263 | struct se_node_acl *nacl = pr_reg->pr_reg_nacl; | ||
1264 | struct se_dev_entry *deve; | ||
1245 | char i_buf[PR_REG_ISID_ID_LEN]; | 1265 | char i_buf[PR_REG_ISID_ID_LEN]; |
1246 | 1266 | ||
1247 | memset(i_buf, 0, PR_REG_ISID_ID_LEN); | 1267 | memset(i_buf, 0, PR_REG_ISID_ID_LEN); |
1248 | core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); | 1268 | core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); |
1249 | 1269 | ||
1250 | pr_reg->pr_reg_deve->def_pr_registered = 0; | ||
1251 | pr_reg->pr_reg_deve->pr_res_key = 0; | ||
1252 | if (!list_empty(&pr_reg->pr_reg_list)) | 1270 | if (!list_empty(&pr_reg->pr_reg_list)) |
1253 | list_del(&pr_reg->pr_reg_list); | 1271 | list_del(&pr_reg->pr_reg_list); |
1254 | /* | 1272 | /* |
@@ -1257,6 +1275,8 @@ static void __core_scsi3_free_registration( | |||
1257 | */ | 1275 | */ |
1258 | if (dec_holders) | 1276 | if (dec_holders) |
1259 | core_scsi3_put_pr_reg(pr_reg); | 1277 | core_scsi3_put_pr_reg(pr_reg); |
1278 | |||
1279 | spin_unlock(&pr_tmpl->registration_lock); | ||
1260 | /* | 1280 | /* |
1261 | * Wait until all reference from any other I_T nexuses for this | 1281 | * Wait until all reference from any other I_T nexuses for this |
1262 | * *pr_reg have been released. Because list_del() is called above, | 1282 | * *pr_reg have been released. Because list_del() is called above, |
@@ -1264,13 +1284,18 @@ static void __core_scsi3_free_registration( | |||
1264 | * count back to zero, and we release *pr_reg. | 1284 | * count back to zero, and we release *pr_reg. |
1265 | */ | 1285 | */ |
1266 | while (atomic_read(&pr_reg->pr_res_holders) != 0) { | 1286 | while (atomic_read(&pr_reg->pr_res_holders) != 0) { |
1267 | spin_unlock(&pr_tmpl->registration_lock); | ||
1268 | pr_debug("SPC-3 PR [%s] waiting for pr_res_holders\n", | 1287 | pr_debug("SPC-3 PR [%s] waiting for pr_res_holders\n", |
1269 | tfo->get_fabric_name()); | 1288 | tfo->get_fabric_name()); |
1270 | cpu_relax(); | 1289 | cpu_relax(); |
1271 | spin_lock(&pr_tmpl->registration_lock); | ||
1272 | } | 1290 | } |
1273 | 1291 | ||
1292 | rcu_read_lock(); | ||
1293 | deve = target_nacl_find_deve(nacl, pr_reg->pr_res_mapped_lun); | ||
1294 | if (deve) | ||
1295 | clear_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags); | ||
1296 | rcu_read_unlock(); | ||
1297 | |||
1298 | spin_lock(&pr_tmpl->registration_lock); | ||
1274 | pr_debug("SPC-3 PR [%s] Service Action: UNREGISTER Initiator" | 1299 | pr_debug("SPC-3 PR [%s] Service Action: UNREGISTER Initiator" |
1275 | " Node: %s%s\n", tfo->get_fabric_name(), | 1300 | " Node: %s%s\n", tfo->get_fabric_name(), |
1276 | pr_reg->pr_reg_nacl->initiatorname, | 1301 | pr_reg->pr_reg_nacl->initiatorname, |
@@ -1392,12 +1417,14 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl) | |||
1392 | 1417 | ||
1393 | static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve) | 1418 | static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve) |
1394 | { | 1419 | { |
1395 | struct se_lun_acl *lun_acl = se_deve->se_lun_acl; | 1420 | struct se_lun_acl *lun_acl; |
1396 | struct se_node_acl *nacl; | 1421 | struct se_node_acl *nacl; |
1397 | struct se_portal_group *tpg; | 1422 | struct se_portal_group *tpg; |
1398 | /* | 1423 | /* |
1399 | * For nacl->dynamic_node_acl=1 | 1424 | * For nacl->dynamic_node_acl=1 |
1400 | */ | 1425 | */ |
1426 | lun_acl = rcu_dereference_check(se_deve->se_lun_acl, | ||
1427 | atomic_read(&se_deve->pr_kref.refcount) != 0); | ||
1401 | if (!lun_acl) | 1428 | if (!lun_acl) |
1402 | return 0; | 1429 | return 0; |
1403 | 1430 | ||
@@ -1409,21 +1436,23 @@ static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve) | |||
1409 | 1436 | ||
1410 | static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve) | 1437 | static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve) |
1411 | { | 1438 | { |
1412 | struct se_lun_acl *lun_acl = se_deve->se_lun_acl; | 1439 | struct se_lun_acl *lun_acl; |
1413 | struct se_node_acl *nacl; | 1440 | struct se_node_acl *nacl; |
1414 | struct se_portal_group *tpg; | 1441 | struct se_portal_group *tpg; |
1415 | /* | 1442 | /* |
1416 | * For nacl->dynamic_node_acl=1 | 1443 | * For nacl->dynamic_node_acl=1 |
1417 | */ | 1444 | */ |
1445 | lun_acl = rcu_dereference_check(se_deve->se_lun_acl, | ||
1446 | atomic_read(&se_deve->pr_kref.refcount) != 0); | ||
1418 | if (!lun_acl) { | 1447 | if (!lun_acl) { |
1419 | atomic_dec_mb(&se_deve->pr_ref_count); | 1448 | kref_put(&se_deve->pr_kref, target_pr_kref_release); |
1420 | return; | 1449 | return; |
1421 | } | 1450 | } |
1422 | nacl = lun_acl->se_lun_nacl; | 1451 | nacl = lun_acl->se_lun_nacl; |
1423 | tpg = nacl->se_tpg; | 1452 | tpg = nacl->se_tpg; |
1424 | 1453 | ||
1425 | target_undepend_item(&lun_acl->se_lun_group.cg_item); | 1454 | target_undepend_item(&lun_acl->se_lun_group.cg_item); |
1426 | atomic_dec_mb(&se_deve->pr_ref_count); | 1455 | kref_put(&se_deve->pr_kref, target_pr_kref_release); |
1427 | } | 1456 | } |
1428 | 1457 | ||
1429 | static sense_reason_t | 1458 | static sense_reason_t |
@@ -1436,30 +1465,25 @@ core_scsi3_decode_spec_i_port( | |||
1436 | int aptpl) | 1465 | int aptpl) |
1437 | { | 1466 | { |
1438 | struct se_device *dev = cmd->se_dev; | 1467 | struct se_device *dev = cmd->se_dev; |
1439 | struct se_port *tmp_port; | ||
1440 | struct se_portal_group *dest_tpg = NULL, *tmp_tpg; | 1468 | struct se_portal_group *dest_tpg = NULL, *tmp_tpg; |
1441 | struct se_session *se_sess = cmd->se_sess; | 1469 | struct se_session *se_sess = cmd->se_sess; |
1442 | struct se_node_acl *dest_node_acl = NULL; | 1470 | struct se_node_acl *dest_node_acl = NULL; |
1443 | struct se_dev_entry *dest_se_deve = NULL, *local_se_deve; | 1471 | struct se_dev_entry *dest_se_deve = NULL; |
1444 | struct t10_pr_registration *dest_pr_reg, *local_pr_reg, *pr_reg_e; | 1472 | struct t10_pr_registration *dest_pr_reg, *local_pr_reg, *pr_reg_e; |
1445 | struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe; | 1473 | struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe; |
1446 | LIST_HEAD(tid_dest_list); | 1474 | LIST_HEAD(tid_dest_list); |
1447 | struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp; | 1475 | struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp; |
1448 | const struct target_core_fabric_ops *tmp_tf_ops; | 1476 | unsigned char *buf, *ptr, proto_ident; |
1449 | unsigned char *buf; | 1477 | const unsigned char *i_str; |
1450 | unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident; | ||
1451 | char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN]; | 1478 | char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN]; |
1452 | sense_reason_t ret; | 1479 | sense_reason_t ret; |
1453 | u32 tpdl, tid_len = 0; | 1480 | u32 tpdl, tid_len = 0; |
1454 | int dest_local_nexus; | ||
1455 | u32 dest_rtpi = 0; | 1481 | u32 dest_rtpi = 0; |
1456 | 1482 | ||
1457 | local_se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; | ||
1458 | /* | 1483 | /* |
1459 | * Allocate a struct pr_transport_id_holder and setup the | 1484 | * Allocate a struct pr_transport_id_holder and setup the |
1460 | * local_node_acl and local_se_deve pointers and add to | 1485 | * local_node_acl pointer and add to struct list_head tid_dest_list |
1461 | * struct list_head tid_dest_list for add registration | 1486 | * for add registration processing in the loop of tid_dest_list below. |
1462 | * processing in the loop of tid_dest_list below. | ||
1463 | */ | 1487 | */ |
1464 | tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL); | 1488 | tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL); |
1465 | if (!tidh_new) { | 1489 | if (!tidh_new) { |
@@ -1469,10 +1493,10 @@ core_scsi3_decode_spec_i_port( | |||
1469 | INIT_LIST_HEAD(&tidh_new->dest_list); | 1493 | INIT_LIST_HEAD(&tidh_new->dest_list); |
1470 | tidh_new->dest_tpg = tpg; | 1494 | tidh_new->dest_tpg = tpg; |
1471 | tidh_new->dest_node_acl = se_sess->se_node_acl; | 1495 | tidh_new->dest_node_acl = se_sess->se_node_acl; |
1472 | tidh_new->dest_se_deve = local_se_deve; | ||
1473 | 1496 | ||
1474 | local_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev, | 1497 | local_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev, |
1475 | se_sess->se_node_acl, local_se_deve, l_isid, | 1498 | se_sess->se_node_acl, cmd->se_lun, |
1499 | NULL, cmd->orig_fe_lun, l_isid, | ||
1476 | sa_res_key, all_tg_pt, aptpl); | 1500 | sa_res_key, all_tg_pt, aptpl); |
1477 | if (!local_pr_reg) { | 1501 | if (!local_pr_reg) { |
1478 | kfree(tidh_new); | 1502 | kfree(tidh_new); |
@@ -1481,10 +1505,10 @@ core_scsi3_decode_spec_i_port( | |||
1481 | tidh_new->dest_pr_reg = local_pr_reg; | 1505 | tidh_new->dest_pr_reg = local_pr_reg; |
1482 | /* | 1506 | /* |
1483 | * The local I_T nexus does not hold any configfs dependances, | 1507 | * The local I_T nexus does not hold any configfs dependances, |
1484 | * so we set tid_h->dest_local_nexus=1 to prevent the | 1508 | * so we set tidh_new->dest_se_deve to NULL to prevent the |
1485 | * configfs_undepend_item() calls in the tid_dest_list loops below. | 1509 | * configfs_undepend_item() calls in the tid_dest_list loops below. |
1486 | */ | 1510 | */ |
1487 | tidh_new->dest_local_nexus = 1; | 1511 | tidh_new->dest_se_deve = NULL; |
1488 | list_add_tail(&tidh_new->dest_list, &tid_dest_list); | 1512 | list_add_tail(&tidh_new->dest_list, &tid_dest_list); |
1489 | 1513 | ||
1490 | if (cmd->data_length < 28) { | 1514 | if (cmd->data_length < 28) { |
@@ -1525,32 +1549,25 @@ core_scsi3_decode_spec_i_port( | |||
1525 | ptr = &buf[28]; | 1549 | ptr = &buf[28]; |
1526 | 1550 | ||
1527 | while (tpdl > 0) { | 1551 | while (tpdl > 0) { |
1552 | struct se_lun *dest_lun, *tmp_lun; | ||
1553 | |||
1528 | proto_ident = (ptr[0] & 0x0f); | 1554 | proto_ident = (ptr[0] & 0x0f); |
1529 | dest_tpg = NULL; | 1555 | dest_tpg = NULL; |
1530 | 1556 | ||
1531 | spin_lock(&dev->se_port_lock); | 1557 | spin_lock(&dev->se_port_lock); |
1532 | list_for_each_entry(tmp_port, &dev->dev_sep_list, sep_list) { | 1558 | list_for_each_entry(tmp_lun, &dev->dev_sep_list, lun_dev_link) { |
1533 | tmp_tpg = tmp_port->sep_tpg; | 1559 | tmp_tpg = tmp_lun->lun_tpg; |
1534 | if (!tmp_tpg) | 1560 | |
1535 | continue; | ||
1536 | tmp_tf_ops = tmp_tpg->se_tpg_tfo; | ||
1537 | if (!tmp_tf_ops) | ||
1538 | continue; | ||
1539 | if (!tmp_tf_ops->get_fabric_proto_ident || | ||
1540 | !tmp_tf_ops->tpg_parse_pr_out_transport_id) | ||
1541 | continue; | ||
1542 | /* | 1561 | /* |
1543 | * Look for the matching proto_ident provided by | 1562 | * Look for the matching proto_ident provided by |
1544 | * the received TransportID | 1563 | * the received TransportID |
1545 | */ | 1564 | */ |
1546 | tmp_proto_ident = tmp_tf_ops->get_fabric_proto_ident(tmp_tpg); | 1565 | if (tmp_tpg->proto_id != proto_ident) |
1547 | if (tmp_proto_ident != proto_ident) | ||
1548 | continue; | 1566 | continue; |
1549 | dest_rtpi = tmp_port->sep_rtpi; | 1567 | dest_rtpi = tmp_lun->lun_rtpi; |
1550 | 1568 | ||
1551 | i_str = tmp_tf_ops->tpg_parse_pr_out_transport_id( | 1569 | i_str = target_parse_pr_out_transport_id(tmp_tpg, |
1552 | tmp_tpg, (const char *)ptr, &tid_len, | 1570 | (const char *)ptr, &tid_len, &iport_ptr); |
1553 | &iport_ptr); | ||
1554 | if (!i_str) | 1571 | if (!i_str) |
1555 | continue; | 1572 | continue; |
1556 | 1573 | ||
@@ -1569,12 +1586,12 @@ core_scsi3_decode_spec_i_port( | |||
1569 | * from the decoded fabric module specific TransportID | 1586 | * from the decoded fabric module specific TransportID |
1570 | * at *i_str. | 1587 | * at *i_str. |
1571 | */ | 1588 | */ |
1572 | spin_lock_irq(&tmp_tpg->acl_node_lock); | 1589 | mutex_lock(&tmp_tpg->acl_node_mutex); |
1573 | dest_node_acl = __core_tpg_get_initiator_node_acl( | 1590 | dest_node_acl = __core_tpg_get_initiator_node_acl( |
1574 | tmp_tpg, i_str); | 1591 | tmp_tpg, i_str); |
1575 | if (dest_node_acl) | 1592 | if (dest_node_acl) |
1576 | atomic_inc_mb(&dest_node_acl->acl_pr_ref_count); | 1593 | atomic_inc_mb(&dest_node_acl->acl_pr_ref_count); |
1577 | spin_unlock_irq(&tmp_tpg->acl_node_lock); | 1594 | mutex_unlock(&tmp_tpg->acl_node_mutex); |
1578 | 1595 | ||
1579 | if (!dest_node_acl) { | 1596 | if (!dest_node_acl) { |
1580 | core_scsi3_tpg_undepend_item(tmp_tpg); | 1597 | core_scsi3_tpg_undepend_item(tmp_tpg); |
@@ -1644,7 +1661,7 @@ core_scsi3_decode_spec_i_port( | |||
1644 | if (core_scsi3_lunacl_depend_item(dest_se_deve)) { | 1661 | if (core_scsi3_lunacl_depend_item(dest_se_deve)) { |
1645 | pr_err("core_scsi3_lunacl_depend_item()" | 1662 | pr_err("core_scsi3_lunacl_depend_item()" |
1646 | " failed\n"); | 1663 | " failed\n"); |
1647 | atomic_dec_mb(&dest_se_deve->pr_ref_count); | 1664 | kref_put(&dest_se_deve->pr_kref, target_pr_kref_release); |
1648 | core_scsi3_nodeacl_undepend_item(dest_node_acl); | 1665 | core_scsi3_nodeacl_undepend_item(dest_node_acl); |
1649 | core_scsi3_tpg_undepend_item(dest_tpg); | 1666 | core_scsi3_tpg_undepend_item(dest_tpg); |
1650 | ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 1667 | ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
@@ -1652,7 +1669,7 @@ core_scsi3_decode_spec_i_port( | |||
1652 | } | 1669 | } |
1653 | 1670 | ||
1654 | pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node: %s" | 1671 | pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node: %s" |
1655 | " dest_se_deve mapped_lun: %u\n", | 1672 | " dest_se_deve mapped_lun: %llu\n", |
1656 | dest_tpg->se_tpg_tfo->get_fabric_name(), | 1673 | dest_tpg->se_tpg_tfo->get_fabric_name(), |
1657 | dest_node_acl->initiatorname, dest_se_deve->mapped_lun); | 1674 | dest_node_acl->initiatorname, dest_se_deve->mapped_lun); |
1658 | 1675 | ||
@@ -1708,9 +1725,13 @@ core_scsi3_decode_spec_i_port( | |||
1708 | * and then call __core_scsi3_add_registration() in the | 1725 | * and then call __core_scsi3_add_registration() in the |
1709 | * 2nd loop which will never fail. | 1726 | * 2nd loop which will never fail. |
1710 | */ | 1727 | */ |
1728 | dest_lun = rcu_dereference_check(dest_se_deve->se_lun, | ||
1729 | atomic_read(&dest_se_deve->pr_kref.refcount) != 0); | ||
1730 | |||
1711 | dest_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev, | 1731 | dest_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev, |
1712 | dest_node_acl, dest_se_deve, iport_ptr, | 1732 | dest_node_acl, dest_lun, dest_se_deve, |
1713 | sa_res_key, all_tg_pt, aptpl); | 1733 | dest_se_deve->mapped_lun, iport_ptr, |
1734 | sa_res_key, all_tg_pt, aptpl); | ||
1714 | if (!dest_pr_reg) { | 1735 | if (!dest_pr_reg) { |
1715 | core_scsi3_lunacl_undepend_item(dest_se_deve); | 1736 | core_scsi3_lunacl_undepend_item(dest_se_deve); |
1716 | core_scsi3_nodeacl_undepend_item(dest_node_acl); | 1737 | core_scsi3_nodeacl_undepend_item(dest_node_acl); |
@@ -1748,7 +1769,6 @@ core_scsi3_decode_spec_i_port( | |||
1748 | dest_node_acl = tidh->dest_node_acl; | 1769 | dest_node_acl = tidh->dest_node_acl; |
1749 | dest_se_deve = tidh->dest_se_deve; | 1770 | dest_se_deve = tidh->dest_se_deve; |
1750 | dest_pr_reg = tidh->dest_pr_reg; | 1771 | dest_pr_reg = tidh->dest_pr_reg; |
1751 | dest_local_nexus = tidh->dest_local_nexus; | ||
1752 | 1772 | ||
1753 | list_del(&tidh->dest_list); | 1773 | list_del(&tidh->dest_list); |
1754 | kfree(tidh); | 1774 | kfree(tidh); |
@@ -1761,10 +1781,11 @@ core_scsi3_decode_spec_i_port( | |||
1761 | 1781 | ||
1762 | pr_debug("SPC-3 PR [%s] SPEC_I_PT: Successfully" | 1782 | pr_debug("SPC-3 PR [%s] SPEC_I_PT: Successfully" |
1763 | " registered Transport ID for Node: %s%s Mapped LUN:" | 1783 | " registered Transport ID for Node: %s%s Mapped LUN:" |
1764 | " %u\n", dest_tpg->se_tpg_tfo->get_fabric_name(), | 1784 | " %llu\n", dest_tpg->se_tpg_tfo->get_fabric_name(), |
1765 | dest_node_acl->initiatorname, i_buf, dest_se_deve->mapped_lun); | 1785 | dest_node_acl->initiatorname, i_buf, (dest_se_deve) ? |
1786 | dest_se_deve->mapped_lun : 0); | ||
1766 | 1787 | ||
1767 | if (dest_local_nexus) | 1788 | if (!dest_se_deve) |
1768 | continue; | 1789 | continue; |
1769 | 1790 | ||
1770 | core_scsi3_lunacl_undepend_item(dest_se_deve); | 1791 | core_scsi3_lunacl_undepend_item(dest_se_deve); |
@@ -1785,7 +1806,6 @@ out: | |||
1785 | dest_node_acl = tidh->dest_node_acl; | 1806 | dest_node_acl = tidh->dest_node_acl; |
1786 | dest_se_deve = tidh->dest_se_deve; | 1807 | dest_se_deve = tidh->dest_se_deve; |
1787 | dest_pr_reg = tidh->dest_pr_reg; | 1808 | dest_pr_reg = tidh->dest_pr_reg; |
1788 | dest_local_nexus = tidh->dest_local_nexus; | ||
1789 | 1809 | ||
1790 | list_del(&tidh->dest_list); | 1810 | list_del(&tidh->dest_list); |
1791 | kfree(tidh); | 1811 | kfree(tidh); |
@@ -1803,7 +1823,7 @@ out: | |||
1803 | 1823 | ||
1804 | kmem_cache_free(t10_pr_reg_cache, dest_pr_reg); | 1824 | kmem_cache_free(t10_pr_reg_cache, dest_pr_reg); |
1805 | 1825 | ||
1806 | if (dest_local_nexus) | 1826 | if (!dest_se_deve) |
1807 | continue; | 1827 | continue; |
1808 | 1828 | ||
1809 | core_scsi3_lunacl_undepend_item(dest_se_deve); | 1829 | core_scsi3_lunacl_undepend_item(dest_se_deve); |
@@ -1818,7 +1838,6 @@ static int core_scsi3_update_aptpl_buf( | |||
1818 | unsigned char *buf, | 1838 | unsigned char *buf, |
1819 | u32 pr_aptpl_buf_len) | 1839 | u32 pr_aptpl_buf_len) |
1820 | { | 1840 | { |
1821 | struct se_lun *lun; | ||
1822 | struct se_portal_group *tpg; | 1841 | struct se_portal_group *tpg; |
1823 | struct t10_pr_registration *pr_reg; | 1842 | struct t10_pr_registration *pr_reg; |
1824 | unsigned char tmp[512], isid_buf[32]; | 1843 | unsigned char tmp[512], isid_buf[32]; |
@@ -1837,7 +1856,6 @@ static int core_scsi3_update_aptpl_buf( | |||
1837 | tmp[0] = '\0'; | 1856 | tmp[0] = '\0'; |
1838 | isid_buf[0] = '\0'; | 1857 | isid_buf[0] = '\0'; |
1839 | tpg = pr_reg->pr_reg_nacl->se_tpg; | 1858 | tpg = pr_reg->pr_reg_nacl->se_tpg; |
1840 | lun = pr_reg->pr_reg_tg_pt_lun; | ||
1841 | /* | 1859 | /* |
1842 | * Write out any ISID value to APTPL metadata that was included | 1860 | * Write out any ISID value to APTPL metadata that was included |
1843 | * in the original registration. | 1861 | * in the original registration. |
@@ -1856,7 +1874,7 @@ static int core_scsi3_update_aptpl_buf( | |||
1856 | "sa_res_key=%llu\n" | 1874 | "sa_res_key=%llu\n" |
1857 | "res_holder=1\nres_type=%02x\n" | 1875 | "res_holder=1\nres_type=%02x\n" |
1858 | "res_scope=%02x\nres_all_tg_pt=%d\n" | 1876 | "res_scope=%02x\nres_all_tg_pt=%d\n" |
1859 | "mapped_lun=%u\n", reg_count, | 1877 | "mapped_lun=%llu\n", reg_count, |
1860 | tpg->se_tpg_tfo->get_fabric_name(), | 1878 | tpg->se_tpg_tfo->get_fabric_name(), |
1861 | pr_reg->pr_reg_nacl->initiatorname, isid_buf, | 1879 | pr_reg->pr_reg_nacl->initiatorname, isid_buf, |
1862 | pr_reg->pr_res_key, pr_reg->pr_res_type, | 1880 | pr_reg->pr_res_key, pr_reg->pr_res_type, |
@@ -1866,7 +1884,7 @@ static int core_scsi3_update_aptpl_buf( | |||
1866 | snprintf(tmp, 512, "PR_REG_START: %d\n" | 1884 | snprintf(tmp, 512, "PR_REG_START: %d\n" |
1867 | "initiator_fabric=%s\ninitiator_node=%s\n%s" | 1885 | "initiator_fabric=%s\ninitiator_node=%s\n%s" |
1868 | "sa_res_key=%llu\nres_holder=0\n" | 1886 | "sa_res_key=%llu\nres_holder=0\n" |
1869 | "res_all_tg_pt=%d\nmapped_lun=%u\n", | 1887 | "res_all_tg_pt=%d\nmapped_lun=%llu\n", |
1870 | reg_count, tpg->se_tpg_tfo->get_fabric_name(), | 1888 | reg_count, tpg->se_tpg_tfo->get_fabric_name(), |
1871 | pr_reg->pr_reg_nacl->initiatorname, isid_buf, | 1889 | pr_reg->pr_reg_nacl->initiatorname, isid_buf, |
1872 | pr_reg->pr_res_key, pr_reg->pr_reg_all_tg_pt, | 1890 | pr_reg->pr_res_key, pr_reg->pr_reg_all_tg_pt, |
@@ -1885,11 +1903,12 @@ static int core_scsi3_update_aptpl_buf( | |||
1885 | * Include information about the associated SCSI target port. | 1903 | * Include information about the associated SCSI target port. |
1886 | */ | 1904 | */ |
1887 | snprintf(tmp, 512, "target_fabric=%s\ntarget_node=%s\n" | 1905 | snprintf(tmp, 512, "target_fabric=%s\ntarget_node=%s\n" |
1888 | "tpgt=%hu\nport_rtpi=%hu\ntarget_lun=%u\nPR_REG_END:" | 1906 | "tpgt=%hu\nport_rtpi=%hu\ntarget_lun=%llu\nPR_REG_END:" |
1889 | " %d\n", tpg->se_tpg_tfo->get_fabric_name(), | 1907 | " %d\n", tpg->se_tpg_tfo->get_fabric_name(), |
1890 | tpg->se_tpg_tfo->tpg_get_wwn(tpg), | 1908 | tpg->se_tpg_tfo->tpg_get_wwn(tpg), |
1891 | tpg->se_tpg_tfo->tpg_get_tag(tpg), | 1909 | tpg->se_tpg_tfo->tpg_get_tag(tpg), |
1892 | lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count); | 1910 | pr_reg->tg_pt_sep_rtpi, pr_reg->pr_aptpl_target_lun, |
1911 | reg_count); | ||
1893 | 1912 | ||
1894 | if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { | 1913 | if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { |
1895 | pr_err("Unable to update renaming APTPL metadata," | 1914 | pr_err("Unable to update renaming APTPL metadata," |
@@ -2000,7 +2019,6 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key, | |||
2000 | { | 2019 | { |
2001 | struct se_session *se_sess = cmd->se_sess; | 2020 | struct se_session *se_sess = cmd->se_sess; |
2002 | struct se_device *dev = cmd->se_dev; | 2021 | struct se_device *dev = cmd->se_dev; |
2003 | struct se_dev_entry *se_deve; | ||
2004 | struct se_lun *se_lun = cmd->se_lun; | 2022 | struct se_lun *se_lun = cmd->se_lun; |
2005 | struct se_portal_group *se_tpg; | 2023 | struct se_portal_group *se_tpg; |
2006 | struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp; | 2024 | struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp; |
@@ -2014,7 +2032,6 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key, | |||
2014 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 2032 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
2015 | } | 2033 | } |
2016 | se_tpg = se_sess->se_tpg; | 2034 | se_tpg = se_sess->se_tpg; |
2017 | se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; | ||
2018 | 2035 | ||
2019 | if (se_tpg->se_tpg_tfo->sess_get_initiator_sid) { | 2036 | if (se_tpg->se_tpg_tfo->sess_get_initiator_sid) { |
2020 | memset(&isid_buf[0], 0, PR_REG_ISID_LEN); | 2037 | memset(&isid_buf[0], 0, PR_REG_ISID_LEN); |
@@ -2045,7 +2062,8 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key, | |||
2045 | * Logical Unit of the SCSI device server. | 2062 | * Logical Unit of the SCSI device server. |
2046 | */ | 2063 | */ |
2047 | if (core_scsi3_alloc_registration(cmd->se_dev, | 2064 | if (core_scsi3_alloc_registration(cmd->se_dev, |
2048 | se_sess->se_node_acl, se_deve, isid_ptr, | 2065 | se_sess->se_node_acl, cmd->se_lun, |
2066 | NULL, cmd->orig_fe_lun, isid_ptr, | ||
2049 | sa_res_key, all_tg_pt, aptpl, | 2067 | sa_res_key, all_tg_pt, aptpl, |
2050 | register_type, 0)) { | 2068 | register_type, 0)) { |
2051 | pr_err("Unable to allocate" | 2069 | pr_err("Unable to allocate" |
@@ -2066,7 +2084,6 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key, | |||
2066 | if (ret != 0) | 2084 | if (ret != 0) |
2067 | return ret; | 2085 | return ret; |
2068 | } | 2086 | } |
2069 | |||
2070 | return core_scsi3_update_and_write_aptpl(dev, aptpl); | 2087 | return core_scsi3_update_and_write_aptpl(dev, aptpl); |
2071 | } | 2088 | } |
2072 | 2089 | ||
@@ -2180,7 +2197,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key, | |||
2180 | &pr_tmpl->registration_list, | 2197 | &pr_tmpl->registration_list, |
2181 | pr_reg_list) { | 2198 | pr_reg_list) { |
2182 | 2199 | ||
2183 | core_scsi3_ua_allocate( | 2200 | target_ua_allocate_lun( |
2184 | pr_reg_p->pr_reg_nacl, | 2201 | pr_reg_p->pr_reg_nacl, |
2185 | pr_reg_p->pr_res_mapped_lun, | 2202 | pr_reg_p->pr_res_mapped_lun, |
2186 | 0x2A, | 2203 | 0x2A, |
@@ -2607,7 +2624,7 @@ core_scsi3_emulate_pro_release(struct se_cmd *cmd, int type, int scope, | |||
2607 | if (pr_reg_p == pr_reg) | 2624 | if (pr_reg_p == pr_reg) |
2608 | continue; | 2625 | continue; |
2609 | 2626 | ||
2610 | core_scsi3_ua_allocate(pr_reg_p->pr_reg_nacl, | 2627 | target_ua_allocate_lun(pr_reg_p->pr_reg_nacl, |
2611 | pr_reg_p->pr_res_mapped_lun, | 2628 | pr_reg_p->pr_res_mapped_lun, |
2612 | 0x2A, ASCQ_2AH_RESERVATIONS_RELEASED); | 2629 | 0x2A, ASCQ_2AH_RESERVATIONS_RELEASED); |
2613 | } | 2630 | } |
@@ -2630,7 +2647,7 @@ core_scsi3_emulate_pro_clear(struct se_cmd *cmd, u64 res_key) | |||
2630 | struct se_session *se_sess = cmd->se_sess; | 2647 | struct se_session *se_sess = cmd->se_sess; |
2631 | struct t10_reservation *pr_tmpl = &dev->t10_pr; | 2648 | struct t10_reservation *pr_tmpl = &dev->t10_pr; |
2632 | struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder; | 2649 | struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder; |
2633 | u32 pr_res_mapped_lun = 0; | 2650 | u64 pr_res_mapped_lun = 0; |
2634 | int calling_it_nexus = 0; | 2651 | int calling_it_nexus = 0; |
2635 | /* | 2652 | /* |
2636 | * Locate the existing *pr_reg via struct se_node_acl pointers | 2653 | * Locate the existing *pr_reg via struct se_node_acl pointers |
@@ -2692,7 +2709,7 @@ core_scsi3_emulate_pro_clear(struct se_cmd *cmd, u64 res_key) | |||
2692 | * additional sense code set to RESERVATIONS PREEMPTED. | 2709 | * additional sense code set to RESERVATIONS PREEMPTED. |
2693 | */ | 2710 | */ |
2694 | if (!calling_it_nexus) | 2711 | if (!calling_it_nexus) |
2695 | core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, | 2712 | target_ua_allocate_lun(pr_reg_nacl, pr_res_mapped_lun, |
2696 | 0x2A, ASCQ_2AH_RESERVATIONS_PREEMPTED); | 2713 | 0x2A, ASCQ_2AH_RESERVATIONS_PREEMPTED); |
2697 | } | 2714 | } |
2698 | spin_unlock(&pr_tmpl->registration_lock); | 2715 | spin_unlock(&pr_tmpl->registration_lock); |
@@ -2786,7 +2803,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, | |||
2786 | LIST_HEAD(preempt_and_abort_list); | 2803 | LIST_HEAD(preempt_and_abort_list); |
2787 | struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder; | 2804 | struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder; |
2788 | struct t10_reservation *pr_tmpl = &dev->t10_pr; | 2805 | struct t10_reservation *pr_tmpl = &dev->t10_pr; |
2789 | u32 pr_res_mapped_lun = 0; | 2806 | u64 pr_res_mapped_lun = 0; |
2790 | int all_reg = 0, calling_it_nexus = 0; | 2807 | int all_reg = 0, calling_it_nexus = 0; |
2791 | bool sa_res_key_unmatched = sa_res_key != 0; | 2808 | bool sa_res_key_unmatched = sa_res_key != 0; |
2792 | int prh_type = 0, prh_scope = 0; | 2809 | int prh_type = 0, prh_scope = 0; |
@@ -2901,7 +2918,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, | |||
2901 | NULL, 0); | 2918 | NULL, 0); |
2902 | } | 2919 | } |
2903 | if (!calling_it_nexus) | 2920 | if (!calling_it_nexus) |
2904 | core_scsi3_ua_allocate(pr_reg_nacl, | 2921 | target_ua_allocate_lun(pr_reg_nacl, |
2905 | pr_res_mapped_lun, 0x2A, | 2922 | pr_res_mapped_lun, 0x2A, |
2906 | ASCQ_2AH_REGISTRATIONS_PREEMPTED); | 2923 | ASCQ_2AH_REGISTRATIONS_PREEMPTED); |
2907 | } | 2924 | } |
@@ -3007,7 +3024,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, | |||
3007 | * persistent reservation and/or registration, with the | 3024 | * persistent reservation and/or registration, with the |
3008 | * additional sense code set to REGISTRATIONS PREEMPTED; | 3025 | * additional sense code set to REGISTRATIONS PREEMPTED; |
3009 | */ | 3026 | */ |
3010 | core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, 0x2A, | 3027 | target_ua_allocate_lun(pr_reg_nacl, pr_res_mapped_lun, 0x2A, |
3011 | ASCQ_2AH_REGISTRATIONS_PREEMPTED); | 3028 | ASCQ_2AH_REGISTRATIONS_PREEMPTED); |
3012 | } | 3029 | } |
3013 | spin_unlock(&pr_tmpl->registration_lock); | 3030 | spin_unlock(&pr_tmpl->registration_lock); |
@@ -3040,7 +3057,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, | |||
3040 | if (calling_it_nexus) | 3057 | if (calling_it_nexus) |
3041 | continue; | 3058 | continue; |
3042 | 3059 | ||
3043 | core_scsi3_ua_allocate(pr_reg->pr_reg_nacl, | 3060 | target_ua_allocate_lun(pr_reg->pr_reg_nacl, |
3044 | pr_reg->pr_res_mapped_lun, 0x2A, | 3061 | pr_reg->pr_res_mapped_lun, 0x2A, |
3045 | ASCQ_2AH_RESERVATIONS_RELEASED); | 3062 | ASCQ_2AH_RESERVATIONS_RELEASED); |
3046 | } | 3063 | } |
@@ -3099,15 +3116,14 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key, | |||
3099 | struct se_session *se_sess = cmd->se_sess; | 3116 | struct se_session *se_sess = cmd->se_sess; |
3100 | struct se_device *dev = cmd->se_dev; | 3117 | struct se_device *dev = cmd->se_dev; |
3101 | struct se_dev_entry *dest_se_deve = NULL; | 3118 | struct se_dev_entry *dest_se_deve = NULL; |
3102 | struct se_lun *se_lun = cmd->se_lun; | 3119 | struct se_lun *se_lun = cmd->se_lun, *tmp_lun; |
3103 | struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL; | 3120 | struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL; |
3104 | struct se_port *se_port; | ||
3105 | struct se_portal_group *se_tpg, *dest_se_tpg = NULL; | 3121 | struct se_portal_group *se_tpg, *dest_se_tpg = NULL; |
3106 | const struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops; | 3122 | const struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops; |
3107 | struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg; | 3123 | struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg; |
3108 | struct t10_reservation *pr_tmpl = &dev->t10_pr; | 3124 | struct t10_reservation *pr_tmpl = &dev->t10_pr; |
3109 | unsigned char *buf; | 3125 | unsigned char *buf; |
3110 | unsigned char *initiator_str; | 3126 | const unsigned char *initiator_str; |
3111 | char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN]; | 3127 | char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN]; |
3112 | u32 tid_len, tmp_tid_len; | 3128 | u32 tid_len, tmp_tid_len; |
3113 | int new_reg = 0, type, scope, matching_iname; | 3129 | int new_reg = 0, type, scope, matching_iname; |
@@ -3186,12 +3202,10 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key, | |||
3186 | } | 3202 | } |
3187 | 3203 | ||
3188 | spin_lock(&dev->se_port_lock); | 3204 | spin_lock(&dev->se_port_lock); |
3189 | list_for_each_entry(se_port, &dev->dev_sep_list, sep_list) { | 3205 | list_for_each_entry(tmp_lun, &dev->dev_sep_list, lun_dev_link) { |
3190 | if (se_port->sep_rtpi != rtpi) | 3206 | if (tmp_lun->lun_rtpi != rtpi) |
3191 | continue; | ||
3192 | dest_se_tpg = se_port->sep_tpg; | ||
3193 | if (!dest_se_tpg) | ||
3194 | continue; | 3207 | continue; |
3208 | dest_se_tpg = tmp_lun->lun_tpg; | ||
3195 | dest_tf_ops = dest_se_tpg->se_tpg_tfo; | 3209 | dest_tf_ops = dest_se_tpg->se_tpg_tfo; |
3196 | if (!dest_tf_ops) | 3210 | if (!dest_tf_ops) |
3197 | continue; | 3211 | continue; |
@@ -3230,23 +3244,16 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key, | |||
3230 | pr_debug("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:" | 3244 | pr_debug("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:" |
3231 | " 0x%02x\n", proto_ident); | 3245 | " 0x%02x\n", proto_ident); |
3232 | 3246 | ||
3233 | if (proto_ident != dest_tf_ops->get_fabric_proto_ident(dest_se_tpg)) { | 3247 | if (proto_ident != dest_se_tpg->proto_id) { |
3234 | pr_err("SPC-3 PR REGISTER_AND_MOVE: Received" | 3248 | pr_err("SPC-3 PR REGISTER_AND_MOVE: Received" |
3235 | " proto_ident: 0x%02x does not match ident: 0x%02x" | 3249 | " proto_ident: 0x%02x does not match ident: 0x%02x" |
3236 | " from fabric: %s\n", proto_ident, | 3250 | " from fabric: %s\n", proto_ident, |
3237 | dest_tf_ops->get_fabric_proto_ident(dest_se_tpg), | 3251 | dest_se_tpg->proto_id, |
3238 | dest_tf_ops->get_fabric_name()); | 3252 | dest_tf_ops->get_fabric_name()); |
3239 | ret = TCM_INVALID_PARAMETER_LIST; | 3253 | ret = TCM_INVALID_PARAMETER_LIST; |
3240 | goto out; | 3254 | goto out; |
3241 | } | 3255 | } |
3242 | if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) { | 3256 | initiator_str = target_parse_pr_out_transport_id(dest_se_tpg, |
3243 | pr_err("SPC-3 PR REGISTER_AND_MOVE: Fabric does not" | ||
3244 | " containg a valid tpg_parse_pr_out_transport_id" | ||
3245 | " function pointer\n"); | ||
3246 | ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
3247 | goto out; | ||
3248 | } | ||
3249 | initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg, | ||
3250 | (const char *)&buf[24], &tmp_tid_len, &iport_ptr); | 3257 | (const char *)&buf[24], &tmp_tid_len, &iport_ptr); |
3251 | if (!initiator_str) { | 3258 | if (!initiator_str) { |
3252 | pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate" | 3259 | pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate" |
@@ -3295,12 +3302,12 @@ after_iport_check: | |||
3295 | /* | 3302 | /* |
3296 | * Locate the destination struct se_node_acl from the received Transport ID | 3303 | * Locate the destination struct se_node_acl from the received Transport ID |
3297 | */ | 3304 | */ |
3298 | spin_lock_irq(&dest_se_tpg->acl_node_lock); | 3305 | mutex_lock(&dest_se_tpg->acl_node_mutex); |
3299 | dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg, | 3306 | dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg, |
3300 | initiator_str); | 3307 | initiator_str); |
3301 | if (dest_node_acl) | 3308 | if (dest_node_acl) |
3302 | atomic_inc_mb(&dest_node_acl->acl_pr_ref_count); | 3309 | atomic_inc_mb(&dest_node_acl->acl_pr_ref_count); |
3303 | spin_unlock_irq(&dest_se_tpg->acl_node_lock); | 3310 | mutex_unlock(&dest_se_tpg->acl_node_mutex); |
3304 | 3311 | ||
3305 | if (!dest_node_acl) { | 3312 | if (!dest_node_acl) { |
3306 | pr_err("Unable to locate %s dest_node_acl for" | 3313 | pr_err("Unable to locate %s dest_node_acl for" |
@@ -3337,14 +3344,14 @@ after_iport_check: | |||
3337 | 3344 | ||
3338 | if (core_scsi3_lunacl_depend_item(dest_se_deve)) { | 3345 | if (core_scsi3_lunacl_depend_item(dest_se_deve)) { |
3339 | pr_err("core_scsi3_lunacl_depend_item() failed\n"); | 3346 | pr_err("core_scsi3_lunacl_depend_item() failed\n"); |
3340 | atomic_dec_mb(&dest_se_deve->pr_ref_count); | 3347 | kref_put(&dest_se_deve->pr_kref, target_pr_kref_release); |
3341 | dest_se_deve = NULL; | 3348 | dest_se_deve = NULL; |
3342 | ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 3349 | ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
3343 | goto out; | 3350 | goto out; |
3344 | } | 3351 | } |
3345 | 3352 | ||
3346 | pr_debug("SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN" | 3353 | pr_debug("SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN" |
3347 | " ACL for dest_se_deve->mapped_lun: %u\n", | 3354 | " ACL for dest_se_deve->mapped_lun: %llu\n", |
3348 | dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname, | 3355 | dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname, |
3349 | dest_se_deve->mapped_lun); | 3356 | dest_se_deve->mapped_lun); |
3350 | 3357 | ||
@@ -3421,13 +3428,17 @@ after_iport_check: | |||
3421 | dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl, | 3428 | dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl, |
3422 | iport_ptr); | 3429 | iport_ptr); |
3423 | if (!dest_pr_reg) { | 3430 | if (!dest_pr_reg) { |
3424 | if (core_scsi3_alloc_registration(cmd->se_dev, | 3431 | struct se_lun *dest_lun = rcu_dereference_check(dest_se_deve->se_lun, |
3425 | dest_node_acl, dest_se_deve, iport_ptr, | 3432 | atomic_read(&dest_se_deve->pr_kref.refcount) != 0); |
3426 | sa_res_key, 0, aptpl, 2, 1)) { | 3433 | |
3427 | spin_unlock(&dev->dev_reservation_lock); | 3434 | spin_unlock(&dev->dev_reservation_lock); |
3435 | if (core_scsi3_alloc_registration(cmd->se_dev, dest_node_acl, | ||
3436 | dest_lun, dest_se_deve, dest_se_deve->mapped_lun, | ||
3437 | iport_ptr, sa_res_key, 0, aptpl, 2, 1)) { | ||
3428 | ret = TCM_INVALID_PARAMETER_LIST; | 3438 | ret = TCM_INVALID_PARAMETER_LIST; |
3429 | goto out; | 3439 | goto out; |
3430 | } | 3440 | } |
3441 | spin_lock(&dev->dev_reservation_lock); | ||
3431 | dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl, | 3442 | dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl, |
3432 | iport_ptr); | 3443 | iport_ptr); |
3433 | new_reg = 1; | 3444 | new_reg = 1; |
@@ -3883,9 +3894,10 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) | |||
3883 | struct t10_pr_registration *pr_reg, *pr_reg_tmp; | 3894 | struct t10_pr_registration *pr_reg, *pr_reg_tmp; |
3884 | struct t10_reservation *pr_tmpl = &dev->t10_pr; | 3895 | struct t10_reservation *pr_tmpl = &dev->t10_pr; |
3885 | unsigned char *buf; | 3896 | unsigned char *buf; |
3886 | u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len; | 3897 | u32 add_desc_len = 0, add_len = 0; |
3887 | u32 off = 8; /* off into first Full Status descriptor */ | 3898 | u32 off = 8; /* off into first Full Status descriptor */ |
3888 | int format_code = 0, pr_res_type = 0, pr_res_scope = 0; | 3899 | int format_code = 0, pr_res_type = 0, pr_res_scope = 0; |
3900 | int exp_desc_len, desc_len; | ||
3889 | bool all_reg = false; | 3901 | bool all_reg = false; |
3890 | 3902 | ||
3891 | if (cmd->data_length < 8) { | 3903 | if (cmd->data_length < 8) { |
@@ -3930,10 +3942,10 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) | |||
3930 | * Determine expected length of $FABRIC_MOD specific | 3942 | * Determine expected length of $FABRIC_MOD specific |
3931 | * TransportID full status descriptor.. | 3943 | * TransportID full status descriptor.. |
3932 | */ | 3944 | */ |
3933 | exp_desc_len = se_tpg->se_tpg_tfo->tpg_get_pr_transport_id_len( | 3945 | exp_desc_len = target_get_pr_transport_id_len(se_nacl, pr_reg, |
3934 | se_tpg, se_nacl, pr_reg, &format_code); | 3946 | &format_code); |
3935 | 3947 | if (exp_desc_len < 0 || | |
3936 | if ((exp_desc_len + add_len) > cmd->data_length) { | 3948 | exp_desc_len + add_len > cmd->data_length) { |
3937 | pr_warn("SPC-3 PRIN READ_FULL_STATUS ran" | 3949 | pr_warn("SPC-3 PRIN READ_FULL_STATUS ran" |
3938 | " out of buffer: %d\n", cmd->data_length); | 3950 | " out of buffer: %d\n", cmd->data_length); |
3939 | spin_lock(&pr_tmpl->registration_lock); | 3951 | spin_lock(&pr_tmpl->registration_lock); |
@@ -3990,21 +4002,26 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) | |||
3990 | * IDENTIFIER field are not defined by this standard. | 4002 | * IDENTIFIER field are not defined by this standard. |
3991 | */ | 4003 | */ |
3992 | if (!pr_reg->pr_reg_all_tg_pt) { | 4004 | if (!pr_reg->pr_reg_all_tg_pt) { |
3993 | struct se_port *port = pr_reg->pr_reg_tg_pt_lun->lun_sep; | 4005 | u16 sep_rtpi = pr_reg->tg_pt_sep_rtpi; |
3994 | 4006 | ||
3995 | buf[off++] = ((port->sep_rtpi >> 8) & 0xff); | 4007 | buf[off++] = ((sep_rtpi >> 8) & 0xff); |
3996 | buf[off++] = (port->sep_rtpi & 0xff); | 4008 | buf[off++] = (sep_rtpi & 0xff); |
3997 | } else | 4009 | } else |
3998 | off += 2; /* Skip over RELATIVE TARGET PORT IDENTIFIER */ | 4010 | off += 2; /* Skip over RELATIVE TARGET PORT IDENTIFIER */ |
3999 | 4011 | ||
4012 | buf[off+4] = se_tpg->proto_id; | ||
4013 | |||
4000 | /* | 4014 | /* |
4001 | * Now, have the $FABRIC_MOD fill in the protocol identifier | 4015 | * Now, have the $FABRIC_MOD fill in the transport ID. |
4002 | */ | 4016 | */ |
4003 | desc_len = se_tpg->se_tpg_tfo->tpg_get_pr_transport_id(se_tpg, | 4017 | desc_len = target_get_pr_transport_id(se_nacl, pr_reg, |
4004 | se_nacl, pr_reg, &format_code, &buf[off+4]); | 4018 | &format_code, &buf[off+4]); |
4005 | 4019 | ||
4006 | spin_lock(&pr_tmpl->registration_lock); | 4020 | spin_lock(&pr_tmpl->registration_lock); |
4007 | atomic_dec_mb(&pr_reg->pr_res_holders); | 4021 | atomic_dec_mb(&pr_reg->pr_res_holders); |
4022 | |||
4023 | if (desc_len < 0) | ||
4024 | break; | ||
4008 | /* | 4025 | /* |
4009 | * Set the ADDITIONAL DESCRIPTOR LENGTH | 4026 | * Set the ADDITIONAL DESCRIPTOR LENGTH |
4010 | */ | 4027 | */ |
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h index 749fd7bb7510..e3d26e9126a0 100644 --- a/drivers/target/target_core_pr.h +++ b/drivers/target/target_core_pr.h | |||
@@ -56,11 +56,11 @@ extern sense_reason_t target_scsi2_reservation_release(struct se_cmd *); | |||
56 | extern sense_reason_t target_scsi2_reservation_reserve(struct se_cmd *); | 56 | extern sense_reason_t target_scsi2_reservation_reserve(struct se_cmd *); |
57 | extern int core_scsi3_alloc_aptpl_registration( | 57 | extern int core_scsi3_alloc_aptpl_registration( |
58 | struct t10_reservation *, u64, | 58 | struct t10_reservation *, u64, |
59 | unsigned char *, unsigned char *, u32, | 59 | unsigned char *, unsigned char *, u64, |
60 | unsigned char *, u16, u32, int, int, u8); | 60 | unsigned char *, u16, u64, int, int, u8); |
61 | extern int core_scsi3_check_aptpl_registration(struct se_device *, | 61 | extern int core_scsi3_check_aptpl_registration(struct se_device *, |
62 | struct se_portal_group *, struct se_lun *, | 62 | struct se_portal_group *, struct se_lun *, |
63 | struct se_node_acl *, u32); | 63 | struct se_node_acl *, u64); |
64 | extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *, | 64 | extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *, |
65 | struct se_node_acl *); | 65 | struct se_node_acl *); |
66 | extern void core_scsi3_free_all_registrations(struct se_device *); | 66 | extern void core_scsi3_free_all_registrations(struct se_device *); |
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 26581e215141..08e9084ee615 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c | |||
@@ -42,9 +42,9 @@ | |||
42 | 42 | ||
43 | #include <target/target_core_base.h> | 43 | #include <target/target_core_base.h> |
44 | #include <target/target_core_backend.h> | 44 | #include <target/target_core_backend.h> |
45 | #include <target/target_core_backend_configfs.h> | ||
46 | 45 | ||
47 | #include "target_core_alua.h" | 46 | #include "target_core_alua.h" |
47 | #include "target_core_internal.h" | ||
48 | #include "target_core_pscsi.h" | 48 | #include "target_core_pscsi.h" |
49 | 49 | ||
50 | #define ISPRINT(a) ((a >= ' ') && (a <= '~')) | 50 | #define ISPRINT(a) ((a >= ' ') && (a <= '~')) |
@@ -54,8 +54,6 @@ static inline struct pscsi_dev_virt *PSCSI_DEV(struct se_device *dev) | |||
54 | return container_of(dev, struct pscsi_dev_virt, dev); | 54 | return container_of(dev, struct pscsi_dev_virt, dev); |
55 | } | 55 | } |
56 | 56 | ||
57 | static struct se_subsystem_api pscsi_template; | ||
58 | |||
59 | static sense_reason_t pscsi_execute_cmd(struct se_cmd *cmd); | 57 | static sense_reason_t pscsi_execute_cmd(struct se_cmd *cmd); |
60 | static void pscsi_req_done(struct request *, int); | 58 | static void pscsi_req_done(struct request *, int); |
61 | 59 | ||
@@ -80,7 +78,7 @@ static int pscsi_attach_hba(struct se_hba *hba, u32 host_id) | |||
80 | 78 | ||
81 | pr_debug("CORE_HBA[%d] - TCM SCSI HBA Driver %s on" | 79 | pr_debug("CORE_HBA[%d] - TCM SCSI HBA Driver %s on" |
82 | " Generic Target Core Stack %s\n", hba->hba_id, | 80 | " Generic Target Core Stack %s\n", hba->hba_id, |
83 | PSCSI_VERSION, TARGET_CORE_MOD_VERSION); | 81 | PSCSI_VERSION, TARGET_CORE_VERSION); |
84 | pr_debug("CORE_HBA[%d] - Attached SCSI HBA to Generic\n", | 82 | pr_debug("CORE_HBA[%d] - Attached SCSI HBA to Generic\n", |
85 | hba->hba_id); | 83 | hba->hba_id); |
86 | 84 | ||
@@ -579,6 +577,14 @@ static int pscsi_configure_device(struct se_device *dev) | |||
579 | return -ENODEV; | 577 | return -ENODEV; |
580 | } | 578 | } |
581 | 579 | ||
580 | static void pscsi_dev_call_rcu(struct rcu_head *p) | ||
581 | { | ||
582 | struct se_device *dev = container_of(p, struct se_device, rcu_head); | ||
583 | struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); | ||
584 | |||
585 | kfree(pdv); | ||
586 | } | ||
587 | |||
582 | static void pscsi_free_device(struct se_device *dev) | 588 | static void pscsi_free_device(struct se_device *dev) |
583 | { | 589 | { |
584 | struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); | 590 | struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); |
@@ -610,8 +616,7 @@ static void pscsi_free_device(struct se_device *dev) | |||
610 | 616 | ||
611 | pdv->pdv_sd = NULL; | 617 | pdv->pdv_sd = NULL; |
612 | } | 618 | } |
613 | 619 | call_rcu(&dev->rcu_head, pscsi_dev_call_rcu); | |
614 | kfree(pdv); | ||
615 | } | 620 | } |
616 | 621 | ||
617 | static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg, | 622 | static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg, |
@@ -635,12 +640,14 @@ static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg, | |||
635 | * Hack to make sure that Write-Protect modepage is set if R/O mode is | 640 | * Hack to make sure that Write-Protect modepage is set if R/O mode is |
636 | * forced. | 641 | * forced. |
637 | */ | 642 | */ |
638 | if (!cmd->se_deve || !cmd->data_length) | 643 | if (!cmd->data_length) |
639 | goto after_mode_sense; | 644 | goto after_mode_sense; |
640 | 645 | ||
641 | if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) && | 646 | if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) && |
642 | (status_byte(result) << 1) == SAM_STAT_GOOD) { | 647 | (status_byte(result) << 1) == SAM_STAT_GOOD) { |
643 | if (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) { | 648 | bool read_only = target_lun_is_rdonly(cmd); |
649 | |||
650 | if (read_only) { | ||
644 | unsigned char *buf; | 651 | unsigned char *buf; |
645 | 652 | ||
646 | buf = transport_kmap_data_sg(cmd); | 653 | buf = transport_kmap_data_sg(cmd); |
@@ -1116,27 +1123,7 @@ static void pscsi_req_done(struct request *req, int uptodate) | |||
1116 | kfree(pt); | 1123 | kfree(pt); |
1117 | } | 1124 | } |
1118 | 1125 | ||
1119 | DEF_TB_DEV_ATTRIB_RO(pscsi, hw_pi_prot_type); | 1126 | static const struct target_backend_ops pscsi_ops = { |
1120 | TB_DEV_ATTR_RO(pscsi, hw_pi_prot_type); | ||
1121 | |||
1122 | DEF_TB_DEV_ATTRIB_RO(pscsi, hw_block_size); | ||
1123 | TB_DEV_ATTR_RO(pscsi, hw_block_size); | ||
1124 | |||
1125 | DEF_TB_DEV_ATTRIB_RO(pscsi, hw_max_sectors); | ||
1126 | TB_DEV_ATTR_RO(pscsi, hw_max_sectors); | ||
1127 | |||
1128 | DEF_TB_DEV_ATTRIB_RO(pscsi, hw_queue_depth); | ||
1129 | TB_DEV_ATTR_RO(pscsi, hw_queue_depth); | ||
1130 | |||
1131 | static struct configfs_attribute *pscsi_backend_dev_attrs[] = { | ||
1132 | &pscsi_dev_attrib_hw_pi_prot_type.attr, | ||
1133 | &pscsi_dev_attrib_hw_block_size.attr, | ||
1134 | &pscsi_dev_attrib_hw_max_sectors.attr, | ||
1135 | &pscsi_dev_attrib_hw_queue_depth.attr, | ||
1136 | NULL, | ||
1137 | }; | ||
1138 | |||
1139 | static struct se_subsystem_api pscsi_template = { | ||
1140 | .name = "pscsi", | 1127 | .name = "pscsi", |
1141 | .owner = THIS_MODULE, | 1128 | .owner = THIS_MODULE, |
1142 | .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, | 1129 | .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, |
@@ -1152,21 +1139,17 @@ static struct se_subsystem_api pscsi_template = { | |||
1152 | .show_configfs_dev_params = pscsi_show_configfs_dev_params, | 1139 | .show_configfs_dev_params = pscsi_show_configfs_dev_params, |
1153 | .get_device_type = pscsi_get_device_type, | 1140 | .get_device_type = pscsi_get_device_type, |
1154 | .get_blocks = pscsi_get_blocks, | 1141 | .get_blocks = pscsi_get_blocks, |
1142 | .tb_dev_attrib_attrs = passthrough_attrib_attrs, | ||
1155 | }; | 1143 | }; |
1156 | 1144 | ||
1157 | static int __init pscsi_module_init(void) | 1145 | static int __init pscsi_module_init(void) |
1158 | { | 1146 | { |
1159 | struct target_backend_cits *tbc = &pscsi_template.tb_cits; | 1147 | return transport_backend_register(&pscsi_ops); |
1160 | |||
1161 | target_core_setup_sub_cits(&pscsi_template); | ||
1162 | tbc->tb_dev_attrib_cit.ct_attrs = pscsi_backend_dev_attrs; | ||
1163 | |||
1164 | return transport_subsystem_register(&pscsi_template); | ||
1165 | } | 1148 | } |
1166 | 1149 | ||
1167 | static void __exit pscsi_module_exit(void) | 1150 | static void __exit pscsi_module_exit(void) |
1168 | { | 1151 | { |
1169 | transport_subsystem_release(&pscsi_template); | 1152 | target_backend_unregister(&pscsi_ops); |
1170 | } | 1153 | } |
1171 | 1154 | ||
1172 | MODULE_DESCRIPTION("TCM PSCSI subsystem plugin"); | 1155 | MODULE_DESCRIPTION("TCM PSCSI subsystem plugin"); |
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index b2d8f6f91633..4703f403f31c 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c | |||
@@ -33,7 +33,6 @@ | |||
33 | 33 | ||
34 | #include <target/target_core_base.h> | 34 | #include <target/target_core_base.h> |
35 | #include <target/target_core_backend.h> | 35 | #include <target/target_core_backend.h> |
36 | #include <target/target_core_backend_configfs.h> | ||
37 | 36 | ||
38 | #include "target_core_rd.h" | 37 | #include "target_core_rd.h" |
39 | 38 | ||
@@ -42,10 +41,6 @@ static inline struct rd_dev *RD_DEV(struct se_device *dev) | |||
42 | return container_of(dev, struct rd_dev, dev); | 41 | return container_of(dev, struct rd_dev, dev); |
43 | } | 42 | } |
44 | 43 | ||
45 | /* rd_attach_hba(): (Part of se_subsystem_api_t template) | ||
46 | * | ||
47 | * | ||
48 | */ | ||
49 | static int rd_attach_hba(struct se_hba *hba, u32 host_id) | 44 | static int rd_attach_hba(struct se_hba *hba, u32 host_id) |
50 | { | 45 | { |
51 | struct rd_host *rd_host; | 46 | struct rd_host *rd_host; |
@@ -62,7 +57,7 @@ static int rd_attach_hba(struct se_hba *hba, u32 host_id) | |||
62 | 57 | ||
63 | pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on" | 58 | pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on" |
64 | " Generic Target Core Stack %s\n", hba->hba_id, | 59 | " Generic Target Core Stack %s\n", hba->hba_id, |
65 | RD_HBA_VERSION, TARGET_CORE_MOD_VERSION); | 60 | RD_HBA_VERSION, TARGET_CORE_VERSION); |
66 | 61 | ||
67 | return 0; | 62 | return 0; |
68 | } | 63 | } |
@@ -354,12 +349,20 @@ fail: | |||
354 | return ret; | 349 | return ret; |
355 | } | 350 | } |
356 | 351 | ||
352 | static void rd_dev_call_rcu(struct rcu_head *p) | ||
353 | { | ||
354 | struct se_device *dev = container_of(p, struct se_device, rcu_head); | ||
355 | struct rd_dev *rd_dev = RD_DEV(dev); | ||
356 | |||
357 | kfree(rd_dev); | ||
358 | } | ||
359 | |||
357 | static void rd_free_device(struct se_device *dev) | 360 | static void rd_free_device(struct se_device *dev) |
358 | { | 361 | { |
359 | struct rd_dev *rd_dev = RD_DEV(dev); | 362 | struct rd_dev *rd_dev = RD_DEV(dev); |
360 | 363 | ||
361 | rd_release_device_space(rd_dev); | 364 | rd_release_device_space(rd_dev); |
362 | kfree(rd_dev); | 365 | call_rcu(&dev->rcu_head, rd_dev_call_rcu); |
363 | } | 366 | } |
364 | 367 | ||
365 | static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) | 368 | static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) |
@@ -402,10 +405,7 @@ static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page | |||
402 | return NULL; | 405 | return NULL; |
403 | } | 406 | } |
404 | 407 | ||
405 | typedef sense_reason_t (*dif_verify)(struct se_cmd *, sector_t, unsigned int, | 408 | static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, bool is_read) |
406 | unsigned int, struct scatterlist *, int); | ||
407 | |||
408 | static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, dif_verify dif_verify) | ||
409 | { | 409 | { |
410 | struct se_device *se_dev = cmd->se_dev; | 410 | struct se_device *se_dev = cmd->se_dev; |
411 | struct rd_dev *dev = RD_DEV(se_dev); | 411 | struct rd_dev *dev = RD_DEV(se_dev); |
@@ -465,7 +465,16 @@ static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, dif_verify dif_verify) | |||
465 | 465 | ||
466 | #endif /* !CONFIG_ARCH_HAS_SG_CHAIN */ | 466 | #endif /* !CONFIG_ARCH_HAS_SG_CHAIN */ |
467 | 467 | ||
468 | rc = dif_verify(cmd, cmd->t_task_lba, sectors, 0, prot_sg, prot_offset); | 468 | if (is_read) |
469 | rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0, | ||
470 | prot_sg, prot_offset); | ||
471 | else | ||
472 | rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0, | ||
473 | cmd->t_prot_sg, 0); | ||
474 | |||
475 | if (!rc) | ||
476 | sbc_dif_copy_prot(cmd, sectors, is_read, prot_sg, prot_offset); | ||
477 | |||
469 | if (need_to_release) | 478 | if (need_to_release) |
470 | kfree(prot_sg); | 479 | kfree(prot_sg); |
471 | 480 | ||
@@ -511,7 +520,7 @@ rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, | |||
511 | 520 | ||
512 | if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type && | 521 | if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type && |
513 | data_direction == DMA_TO_DEVICE) { | 522 | data_direction == DMA_TO_DEVICE) { |
514 | rc = rd_do_prot_rw(cmd, sbc_dif_verify_write); | 523 | rc = rd_do_prot_rw(cmd, false); |
515 | if (rc) | 524 | if (rc) |
516 | return rc; | 525 | return rc; |
517 | } | 526 | } |
@@ -579,7 +588,7 @@ rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, | |||
579 | 588 | ||
580 | if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type && | 589 | if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type && |
581 | data_direction == DMA_FROM_DEVICE) { | 590 | data_direction == DMA_FROM_DEVICE) { |
582 | rc = rd_do_prot_rw(cmd, sbc_dif_verify_read); | 591 | rc = rd_do_prot_rw(cmd, true); |
583 | if (rc) | 592 | if (rc) |
584 | return rc; | 593 | return rc; |
585 | } | 594 | } |
@@ -693,42 +702,7 @@ rd_parse_cdb(struct se_cmd *cmd) | |||
693 | return sbc_parse_cdb(cmd, &rd_sbc_ops); | 702 | return sbc_parse_cdb(cmd, &rd_sbc_ops); |
694 | } | 703 | } |
695 | 704 | ||
696 | DEF_TB_DEFAULT_ATTRIBS(rd_mcp); | 705 | static const struct target_backend_ops rd_mcp_ops = { |
697 | |||
698 | static struct configfs_attribute *rd_mcp_backend_dev_attrs[] = { | ||
699 | &rd_mcp_dev_attrib_emulate_model_alias.attr, | ||
700 | &rd_mcp_dev_attrib_emulate_dpo.attr, | ||
701 | &rd_mcp_dev_attrib_emulate_fua_write.attr, | ||
702 | &rd_mcp_dev_attrib_emulate_fua_read.attr, | ||
703 | &rd_mcp_dev_attrib_emulate_write_cache.attr, | ||
704 | &rd_mcp_dev_attrib_emulate_ua_intlck_ctrl.attr, | ||
705 | &rd_mcp_dev_attrib_emulate_tas.attr, | ||
706 | &rd_mcp_dev_attrib_emulate_tpu.attr, | ||
707 | &rd_mcp_dev_attrib_emulate_tpws.attr, | ||
708 | &rd_mcp_dev_attrib_emulate_caw.attr, | ||
709 | &rd_mcp_dev_attrib_emulate_3pc.attr, | ||
710 | &rd_mcp_dev_attrib_pi_prot_type.attr, | ||
711 | &rd_mcp_dev_attrib_hw_pi_prot_type.attr, | ||
712 | &rd_mcp_dev_attrib_pi_prot_format.attr, | ||
713 | &rd_mcp_dev_attrib_enforce_pr_isids.attr, | ||
714 | &rd_mcp_dev_attrib_is_nonrot.attr, | ||
715 | &rd_mcp_dev_attrib_emulate_rest_reord.attr, | ||
716 | &rd_mcp_dev_attrib_force_pr_aptpl.attr, | ||
717 | &rd_mcp_dev_attrib_hw_block_size.attr, | ||
718 | &rd_mcp_dev_attrib_block_size.attr, | ||
719 | &rd_mcp_dev_attrib_hw_max_sectors.attr, | ||
720 | &rd_mcp_dev_attrib_optimal_sectors.attr, | ||
721 | &rd_mcp_dev_attrib_hw_queue_depth.attr, | ||
722 | &rd_mcp_dev_attrib_queue_depth.attr, | ||
723 | &rd_mcp_dev_attrib_max_unmap_lba_count.attr, | ||
724 | &rd_mcp_dev_attrib_max_unmap_block_desc_count.attr, | ||
725 | &rd_mcp_dev_attrib_unmap_granularity.attr, | ||
726 | &rd_mcp_dev_attrib_unmap_granularity_alignment.attr, | ||
727 | &rd_mcp_dev_attrib_max_write_same_len.attr, | ||
728 | NULL, | ||
729 | }; | ||
730 | |||
731 | static struct se_subsystem_api rd_mcp_template = { | ||
732 | .name = "rd_mcp", | 706 | .name = "rd_mcp", |
733 | .inquiry_prod = "RAMDISK-MCP", | 707 | .inquiry_prod = "RAMDISK-MCP", |
734 | .inquiry_rev = RD_MCP_VERSION, | 708 | .inquiry_rev = RD_MCP_VERSION, |
@@ -744,25 +718,15 @@ static struct se_subsystem_api rd_mcp_template = { | |||
744 | .get_blocks = rd_get_blocks, | 718 | .get_blocks = rd_get_blocks, |
745 | .init_prot = rd_init_prot, | 719 | .init_prot = rd_init_prot, |
746 | .free_prot = rd_free_prot, | 720 | .free_prot = rd_free_prot, |
721 | .tb_dev_attrib_attrs = sbc_attrib_attrs, | ||
747 | }; | 722 | }; |
748 | 723 | ||
749 | int __init rd_module_init(void) | 724 | int __init rd_module_init(void) |
750 | { | 725 | { |
751 | struct target_backend_cits *tbc = &rd_mcp_template.tb_cits; | 726 | return transport_backend_register(&rd_mcp_ops); |
752 | int ret; | ||
753 | |||
754 | target_core_setup_sub_cits(&rd_mcp_template); | ||
755 | tbc->tb_dev_attrib_cit.ct_attrs = rd_mcp_backend_dev_attrs; | ||
756 | |||
757 | ret = transport_subsystem_register(&rd_mcp_template); | ||
758 | if (ret < 0) { | ||
759 | return ret; | ||
760 | } | ||
761 | |||
762 | return 0; | ||
763 | } | 727 | } |
764 | 728 | ||
765 | void rd_module_exit(void) | 729 | void rd_module_exit(void) |
766 | { | 730 | { |
767 | transport_subsystem_release(&rd_mcp_template); | 731 | target_backend_unregister(&rd_mcp_ops); |
768 | } | 732 | } |
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 43719b393ca9..e318ddbe15da 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c | |||
@@ -38,6 +38,7 @@ | |||
38 | 38 | ||
39 | static sense_reason_t | 39 | static sense_reason_t |
40 | sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char *, u32, bool); | 40 | sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char *, u32, bool); |
41 | static sense_reason_t sbc_execute_unmap(struct se_cmd *cmd); | ||
41 | 42 | ||
42 | static sense_reason_t | 43 | static sense_reason_t |
43 | sbc_emulate_readcapacity(struct se_cmd *cmd) | 44 | sbc_emulate_readcapacity(struct se_cmd *cmd) |
@@ -177,6 +178,23 @@ sector_t sbc_get_write_same_sectors(struct se_cmd *cmd) | |||
177 | EXPORT_SYMBOL(sbc_get_write_same_sectors); | 178 | EXPORT_SYMBOL(sbc_get_write_same_sectors); |
178 | 179 | ||
179 | static sense_reason_t | 180 | static sense_reason_t |
181 | sbc_execute_write_same_unmap(struct se_cmd *cmd) | ||
182 | { | ||
183 | struct sbc_ops *ops = cmd->protocol_data; | ||
184 | sector_t nolb = sbc_get_write_same_sectors(cmd); | ||
185 | sense_reason_t ret; | ||
186 | |||
187 | if (nolb) { | ||
188 | ret = ops->execute_unmap(cmd, cmd->t_task_lba, nolb); | ||
189 | if (ret) | ||
190 | return ret; | ||
191 | } | ||
192 | |||
193 | target_complete_cmd(cmd, GOOD); | ||
194 | return 0; | ||
195 | } | ||
196 | |||
197 | static sense_reason_t | ||
180 | sbc_emulate_noop(struct se_cmd *cmd) | 198 | sbc_emulate_noop(struct se_cmd *cmd) |
181 | { | 199 | { |
182 | target_complete_cmd(cmd, GOOD); | 200 | target_complete_cmd(cmd, GOOD); |
@@ -299,7 +317,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o | |||
299 | * translated into block discard requests within backend code. | 317 | * translated into block discard requests within backend code. |
300 | */ | 318 | */ |
301 | if (flags[0] & 0x08) { | 319 | if (flags[0] & 0x08) { |
302 | if (!ops->execute_write_same_unmap) | 320 | if (!ops->execute_unmap) |
303 | return TCM_UNSUPPORTED_SCSI_OPCODE; | 321 | return TCM_UNSUPPORTED_SCSI_OPCODE; |
304 | 322 | ||
305 | if (!dev->dev_attrib.emulate_tpws) { | 323 | if (!dev->dev_attrib.emulate_tpws) { |
@@ -307,7 +325,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o | |||
307 | " has emulate_tpws disabled\n"); | 325 | " has emulate_tpws disabled\n"); |
308 | return TCM_UNSUPPORTED_SCSI_OPCODE; | 326 | return TCM_UNSUPPORTED_SCSI_OPCODE; |
309 | } | 327 | } |
310 | cmd->execute_cmd = ops->execute_write_same_unmap; | 328 | cmd->execute_cmd = sbc_execute_write_same_unmap; |
311 | return 0; | 329 | return 0; |
312 | } | 330 | } |
313 | if (!ops->execute_write_same) | 331 | if (!ops->execute_write_same) |
@@ -381,7 +399,9 @@ out: | |||
381 | static sense_reason_t | 399 | static sense_reason_t |
382 | sbc_execute_rw(struct se_cmd *cmd) | 400 | sbc_execute_rw(struct se_cmd *cmd) |
383 | { | 401 | { |
384 | return cmd->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents, | 402 | struct sbc_ops *ops = cmd->protocol_data; |
403 | |||
404 | return ops->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents, | ||
385 | cmd->data_direction); | 405 | cmd->data_direction); |
386 | } | 406 | } |
387 | 407 | ||
@@ -560,6 +580,7 @@ out: | |||
560 | static sense_reason_t | 580 | static sense_reason_t |
561 | sbc_compare_and_write(struct se_cmd *cmd) | 581 | sbc_compare_and_write(struct se_cmd *cmd) |
562 | { | 582 | { |
583 | struct sbc_ops *ops = cmd->protocol_data; | ||
563 | struct se_device *dev = cmd->se_dev; | 584 | struct se_device *dev = cmd->se_dev; |
564 | sense_reason_t ret; | 585 | sense_reason_t ret; |
565 | int rc; | 586 | int rc; |
@@ -579,7 +600,7 @@ sbc_compare_and_write(struct se_cmd *cmd) | |||
579 | */ | 600 | */ |
580 | cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size; | 601 | cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size; |
581 | 602 | ||
582 | ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents, | 603 | ret = ops->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents, |
583 | DMA_FROM_DEVICE); | 604 | DMA_FROM_DEVICE); |
584 | if (ret) { | 605 | if (ret) { |
585 | cmd->transport_complete_callback = NULL; | 606 | cmd->transport_complete_callback = NULL; |
@@ -738,14 +759,15 @@ static int | |||
738 | sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb) | 759 | sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb) |
739 | { | 760 | { |
740 | if (cdb[1] & 0x10) { | 761 | if (cdb[1] & 0x10) { |
741 | if (!dev->dev_attrib.emulate_dpo) { | 762 | /* see explanation in spc_emulate_modesense */ |
763 | if (!target_check_fua(dev)) { | ||
742 | pr_err("Got CDB: 0x%02x with DPO bit set, but device" | 764 | pr_err("Got CDB: 0x%02x with DPO bit set, but device" |
743 | " does not advertise support for DPO\n", cdb[0]); | 765 | " does not advertise support for DPO\n", cdb[0]); |
744 | return -EINVAL; | 766 | return -EINVAL; |
745 | } | 767 | } |
746 | } | 768 | } |
747 | if (cdb[1] & 0x8) { | 769 | if (cdb[1] & 0x8) { |
748 | if (!dev->dev_attrib.emulate_fua_write || !se_dev_check_wce(dev)) { | 770 | if (!target_check_fua(dev)) { |
749 | pr_err("Got CDB: 0x%02x with FUA bit set, but device" | 771 | pr_err("Got CDB: 0x%02x with FUA bit set, but device" |
750 | " does not advertise support for FUA write\n", | 772 | " does not advertise support for FUA write\n", |
751 | cdb[0]); | 773 | cdb[0]); |
@@ -765,12 +787,13 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) | |||
765 | u32 sectors = 0; | 787 | u32 sectors = 0; |
766 | sense_reason_t ret; | 788 | sense_reason_t ret; |
767 | 789 | ||
790 | cmd->protocol_data = ops; | ||
791 | |||
768 | switch (cdb[0]) { | 792 | switch (cdb[0]) { |
769 | case READ_6: | 793 | case READ_6: |
770 | sectors = transport_get_sectors_6(cdb); | 794 | sectors = transport_get_sectors_6(cdb); |
771 | cmd->t_task_lba = transport_lba_21(cdb); | 795 | cmd->t_task_lba = transport_lba_21(cdb); |
772 | cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; | 796 | cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; |
773 | cmd->execute_rw = ops->execute_rw; | ||
774 | cmd->execute_cmd = sbc_execute_rw; | 797 | cmd->execute_cmd = sbc_execute_rw; |
775 | break; | 798 | break; |
776 | case READ_10: | 799 | case READ_10: |
@@ -785,7 +808,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) | |||
785 | return ret; | 808 | return ret; |
786 | 809 | ||
787 | cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; | 810 | cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; |
788 | cmd->execute_rw = ops->execute_rw; | ||
789 | cmd->execute_cmd = sbc_execute_rw; | 811 | cmd->execute_cmd = sbc_execute_rw; |
790 | break; | 812 | break; |
791 | case READ_12: | 813 | case READ_12: |
@@ -800,7 +822,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) | |||
800 | return ret; | 822 | return ret; |
801 | 823 | ||
802 | cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; | 824 | cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; |
803 | cmd->execute_rw = ops->execute_rw; | ||
804 | cmd->execute_cmd = sbc_execute_rw; | 825 | cmd->execute_cmd = sbc_execute_rw; |
805 | break; | 826 | break; |
806 | case READ_16: | 827 | case READ_16: |
@@ -815,14 +836,12 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) | |||
815 | return ret; | 836 | return ret; |
816 | 837 | ||
817 | cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; | 838 | cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; |
818 | cmd->execute_rw = ops->execute_rw; | ||
819 | cmd->execute_cmd = sbc_execute_rw; | 839 | cmd->execute_cmd = sbc_execute_rw; |
820 | break; | 840 | break; |
821 | case WRITE_6: | 841 | case WRITE_6: |
822 | sectors = transport_get_sectors_6(cdb); | 842 | sectors = transport_get_sectors_6(cdb); |
823 | cmd->t_task_lba = transport_lba_21(cdb); | 843 | cmd->t_task_lba = transport_lba_21(cdb); |
824 | cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; | 844 | cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; |
825 | cmd->execute_rw = ops->execute_rw; | ||
826 | cmd->execute_cmd = sbc_execute_rw; | 845 | cmd->execute_cmd = sbc_execute_rw; |
827 | break; | 846 | break; |
828 | case WRITE_10: | 847 | case WRITE_10: |
@@ -838,7 +857,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) | |||
838 | return ret; | 857 | return ret; |
839 | 858 | ||
840 | cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; | 859 | cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; |
841 | cmd->execute_rw = ops->execute_rw; | ||
842 | cmd->execute_cmd = sbc_execute_rw; | 860 | cmd->execute_cmd = sbc_execute_rw; |
843 | break; | 861 | break; |
844 | case WRITE_12: | 862 | case WRITE_12: |
@@ -853,7 +871,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) | |||
853 | return ret; | 871 | return ret; |
854 | 872 | ||
855 | cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; | 873 | cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; |
856 | cmd->execute_rw = ops->execute_rw; | ||
857 | cmd->execute_cmd = sbc_execute_rw; | 874 | cmd->execute_cmd = sbc_execute_rw; |
858 | break; | 875 | break; |
859 | case WRITE_16: | 876 | case WRITE_16: |
@@ -868,7 +885,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) | |||
868 | return ret; | 885 | return ret; |
869 | 886 | ||
870 | cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; | 887 | cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; |
871 | cmd->execute_rw = ops->execute_rw; | ||
872 | cmd->execute_cmd = sbc_execute_rw; | 888 | cmd->execute_cmd = sbc_execute_rw; |
873 | break; | 889 | break; |
874 | case XDWRITEREAD_10: | 890 | case XDWRITEREAD_10: |
@@ -886,7 +902,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) | |||
886 | /* | 902 | /* |
887 | * Setup BIDI XOR callback to be run after I/O completion. | 903 | * Setup BIDI XOR callback to be run after I/O completion. |
888 | */ | 904 | */ |
889 | cmd->execute_rw = ops->execute_rw; | ||
890 | cmd->execute_cmd = sbc_execute_rw; | 905 | cmd->execute_cmd = sbc_execute_rw; |
891 | cmd->transport_complete_callback = &xdreadwrite_callback; | 906 | cmd->transport_complete_callback = &xdreadwrite_callback; |
892 | break; | 907 | break; |
@@ -910,7 +925,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) | |||
910 | * Setup BIDI XOR callback to be run during after I/O | 925 | * Setup BIDI XOR callback to be run during after I/O |
911 | * completion. | 926 | * completion. |
912 | */ | 927 | */ |
913 | cmd->execute_rw = ops->execute_rw; | ||
914 | cmd->execute_cmd = sbc_execute_rw; | 928 | cmd->execute_cmd = sbc_execute_rw; |
915 | cmd->transport_complete_callback = &xdreadwrite_callback; | 929 | cmd->transport_complete_callback = &xdreadwrite_callback; |
916 | break; | 930 | break; |
@@ -954,7 +968,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) | |||
954 | cmd->t_task_lba = get_unaligned_be64(&cdb[2]); | 968 | cmd->t_task_lba = get_unaligned_be64(&cdb[2]); |
955 | cmd->t_task_nolb = sectors; | 969 | cmd->t_task_nolb = sectors; |
956 | cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE; | 970 | cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE; |
957 | cmd->execute_rw = ops->execute_rw; | ||
958 | cmd->execute_cmd = sbc_compare_and_write; | 971 | cmd->execute_cmd = sbc_compare_and_write; |
959 | cmd->transport_complete_callback = compare_and_write_callback; | 972 | cmd->transport_complete_callback = compare_and_write_callback; |
960 | break; | 973 | break; |
@@ -1004,7 +1017,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) | |||
1004 | return TCM_UNSUPPORTED_SCSI_OPCODE; | 1017 | return TCM_UNSUPPORTED_SCSI_OPCODE; |
1005 | } | 1018 | } |
1006 | size = get_unaligned_be16(&cdb[7]); | 1019 | size = get_unaligned_be16(&cdb[7]); |
1007 | cmd->execute_cmd = ops->execute_unmap; | 1020 | cmd->execute_cmd = sbc_execute_unmap; |
1008 | break; | 1021 | break; |
1009 | case WRITE_SAME_16: | 1022 | case WRITE_SAME_16: |
1010 | sectors = transport_get_sectors_16(cdb); | 1023 | sectors = transport_get_sectors_16(cdb); |
@@ -1092,12 +1105,10 @@ u32 sbc_get_device_type(struct se_device *dev) | |||
1092 | } | 1105 | } |
1093 | EXPORT_SYMBOL(sbc_get_device_type); | 1106 | EXPORT_SYMBOL(sbc_get_device_type); |
1094 | 1107 | ||
1095 | sense_reason_t | 1108 | static sense_reason_t |
1096 | sbc_execute_unmap(struct se_cmd *cmd, | 1109 | sbc_execute_unmap(struct se_cmd *cmd) |
1097 | sense_reason_t (*do_unmap_fn)(struct se_cmd *, void *, | ||
1098 | sector_t, sector_t), | ||
1099 | void *priv) | ||
1100 | { | 1110 | { |
1111 | struct sbc_ops *ops = cmd->protocol_data; | ||
1101 | struct se_device *dev = cmd->se_dev; | 1112 | struct se_device *dev = cmd->se_dev; |
1102 | unsigned char *buf, *ptr = NULL; | 1113 | unsigned char *buf, *ptr = NULL; |
1103 | sector_t lba; | 1114 | sector_t lba; |
@@ -1161,7 +1172,7 @@ sbc_execute_unmap(struct se_cmd *cmd, | |||
1161 | goto err; | 1172 | goto err; |
1162 | } | 1173 | } |
1163 | 1174 | ||
1164 | ret = do_unmap_fn(cmd, priv, lba, range); | 1175 | ret = ops->execute_unmap(cmd, lba, range); |
1165 | if (ret) | 1176 | if (ret) |
1166 | goto err; | 1177 | goto err; |
1167 | 1178 | ||
@@ -1175,34 +1186,56 @@ err: | |||
1175 | target_complete_cmd(cmd, GOOD); | 1186 | target_complete_cmd(cmd, GOOD); |
1176 | return ret; | 1187 | return ret; |
1177 | } | 1188 | } |
1178 | EXPORT_SYMBOL(sbc_execute_unmap); | ||
1179 | 1189 | ||
1180 | void | 1190 | void |
1181 | sbc_dif_generate(struct se_cmd *cmd) | 1191 | sbc_dif_generate(struct se_cmd *cmd) |
1182 | { | 1192 | { |
1183 | struct se_device *dev = cmd->se_dev; | 1193 | struct se_device *dev = cmd->se_dev; |
1184 | struct se_dif_v1_tuple *sdt; | 1194 | struct se_dif_v1_tuple *sdt; |
1185 | struct scatterlist *dsg, *psg = cmd->t_prot_sg; | 1195 | struct scatterlist *dsg = cmd->t_data_sg, *psg; |
1186 | sector_t sector = cmd->t_task_lba; | 1196 | sector_t sector = cmd->t_task_lba; |
1187 | void *daddr, *paddr; | 1197 | void *daddr, *paddr; |
1188 | int i, j, offset = 0; | 1198 | int i, j, offset = 0; |
1199 | unsigned int block_size = dev->dev_attrib.block_size; | ||
1189 | 1200 | ||
1190 | for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { | 1201 | for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { |
1191 | daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; | ||
1192 | paddr = kmap_atomic(sg_page(psg)) + psg->offset; | 1202 | paddr = kmap_atomic(sg_page(psg)) + psg->offset; |
1203 | daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; | ||
1193 | 1204 | ||
1194 | for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { | 1205 | for (j = 0; j < psg->length; |
1206 | j += sizeof(struct se_dif_v1_tuple)) { | ||
1207 | __u16 crc; | ||
1208 | unsigned int avail; | ||
1209 | |||
1210 | if (offset >= dsg->length) { | ||
1211 | offset -= dsg->length; | ||
1212 | kunmap_atomic(daddr - dsg->offset); | ||
1213 | dsg = sg_next(dsg); | ||
1214 | if (!dsg) { | ||
1215 | kunmap_atomic(paddr - psg->offset); | ||
1216 | return; | ||
1217 | } | ||
1218 | daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; | ||
1219 | } | ||
1195 | 1220 | ||
1196 | if (offset >= psg->length) { | 1221 | sdt = paddr + j; |
1197 | kunmap_atomic(paddr); | 1222 | avail = min(block_size, dsg->length - offset); |
1198 | psg = sg_next(psg); | 1223 | crc = crc_t10dif(daddr + offset, avail); |
1199 | paddr = kmap_atomic(sg_page(psg)) + psg->offset; | 1224 | if (avail < block_size) { |
1200 | offset = 0; | 1225 | kunmap_atomic(daddr - dsg->offset); |
1226 | dsg = sg_next(dsg); | ||
1227 | if (!dsg) { | ||
1228 | kunmap_atomic(paddr - psg->offset); | ||
1229 | return; | ||
1230 | } | ||
1231 | daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; | ||
1232 | offset = block_size - avail; | ||
1233 | crc = crc_t10dif_update(crc, daddr, offset); | ||
1234 | } else { | ||
1235 | offset += block_size; | ||
1201 | } | 1236 | } |
1202 | 1237 | ||
1203 | sdt = paddr + offset; | 1238 | sdt->guard_tag = cpu_to_be16(crc); |
1204 | sdt->guard_tag = cpu_to_be16(crc_t10dif(daddr + j, | ||
1205 | dev->dev_attrib.block_size)); | ||
1206 | if (cmd->prot_type == TARGET_DIF_TYPE1_PROT) | 1239 | if (cmd->prot_type == TARGET_DIF_TYPE1_PROT) |
1207 | sdt->ref_tag = cpu_to_be32(sector & 0xffffffff); | 1240 | sdt->ref_tag = cpu_to_be32(sector & 0xffffffff); |
1208 | sdt->app_tag = 0; | 1241 | sdt->app_tag = 0; |
@@ -1215,26 +1248,23 @@ sbc_dif_generate(struct se_cmd *cmd) | |||
1215 | be32_to_cpu(sdt->ref_tag)); | 1248 | be32_to_cpu(sdt->ref_tag)); |
1216 | 1249 | ||
1217 | sector++; | 1250 | sector++; |
1218 | offset += sizeof(struct se_dif_v1_tuple); | ||
1219 | } | 1251 | } |
1220 | 1252 | ||
1221 | kunmap_atomic(paddr); | 1253 | kunmap_atomic(daddr - dsg->offset); |
1222 | kunmap_atomic(daddr); | 1254 | kunmap_atomic(paddr - psg->offset); |
1223 | } | 1255 | } |
1224 | } | 1256 | } |
1225 | 1257 | ||
1226 | static sense_reason_t | 1258 | static sense_reason_t |
1227 | sbc_dif_v1_verify(struct se_cmd *cmd, struct se_dif_v1_tuple *sdt, | 1259 | sbc_dif_v1_verify(struct se_cmd *cmd, struct se_dif_v1_tuple *sdt, |
1228 | const void *p, sector_t sector, unsigned int ei_lba) | 1260 | __u16 crc, sector_t sector, unsigned int ei_lba) |
1229 | { | 1261 | { |
1230 | struct se_device *dev = cmd->se_dev; | ||
1231 | int block_size = dev->dev_attrib.block_size; | ||
1232 | __be16 csum; | 1262 | __be16 csum; |
1233 | 1263 | ||
1234 | if (!(cmd->prot_checks & TARGET_DIF_CHECK_GUARD)) | 1264 | if (!(cmd->prot_checks & TARGET_DIF_CHECK_GUARD)) |
1235 | goto check_ref; | 1265 | goto check_ref; |
1236 | 1266 | ||
1237 | csum = cpu_to_be16(crc_t10dif(p, block_size)); | 1267 | csum = cpu_to_be16(crc); |
1238 | 1268 | ||
1239 | if (sdt->guard_tag != csum) { | 1269 | if (sdt->guard_tag != csum) { |
1240 | pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x" | 1270 | pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x" |
@@ -1266,9 +1296,8 @@ check_ref: | |||
1266 | return 0; | 1296 | return 0; |
1267 | } | 1297 | } |
1268 | 1298 | ||
1269 | static void | 1299 | void sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read, |
1270 | sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read, | 1300 | struct scatterlist *sg, int sg_off) |
1271 | struct scatterlist *sg, int sg_off) | ||
1272 | { | 1301 | { |
1273 | struct se_device *dev = cmd->se_dev; | 1302 | struct se_device *dev = cmd->se_dev; |
1274 | struct scatterlist *psg; | 1303 | struct scatterlist *psg; |
@@ -1300,100 +1329,54 @@ sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read, | |||
1300 | copied += len; | 1329 | copied += len; |
1301 | psg_len -= len; | 1330 | psg_len -= len; |
1302 | 1331 | ||
1332 | kunmap_atomic(addr - sg->offset - offset); | ||
1333 | |||
1303 | if (offset >= sg->length) { | 1334 | if (offset >= sg->length) { |
1304 | sg = sg_next(sg); | 1335 | sg = sg_next(sg); |
1305 | offset = 0; | 1336 | offset = 0; |
1306 | } | 1337 | } |
1307 | kunmap_atomic(addr); | ||
1308 | } | 1338 | } |
1309 | kunmap_atomic(paddr); | 1339 | kunmap_atomic(paddr - psg->offset); |
1310 | } | 1340 | } |
1311 | } | 1341 | } |
1342 | EXPORT_SYMBOL(sbc_dif_copy_prot); | ||
1312 | 1343 | ||
1313 | sense_reason_t | 1344 | sense_reason_t |
1314 | sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors, | 1345 | sbc_dif_verify(struct se_cmd *cmd, sector_t start, unsigned int sectors, |
1315 | unsigned int ei_lba, struct scatterlist *sg, int sg_off) | 1346 | unsigned int ei_lba, struct scatterlist *psg, int psg_off) |
1316 | { | 1347 | { |
1317 | struct se_device *dev = cmd->se_dev; | 1348 | struct se_device *dev = cmd->se_dev; |
1318 | struct se_dif_v1_tuple *sdt; | 1349 | struct se_dif_v1_tuple *sdt; |
1319 | struct scatterlist *dsg, *psg = cmd->t_prot_sg; | 1350 | struct scatterlist *dsg = cmd->t_data_sg; |
1320 | sector_t sector = start; | 1351 | sector_t sector = start; |
1321 | void *daddr, *paddr; | 1352 | void *daddr, *paddr; |
1322 | int i, j, offset = 0; | 1353 | int i; |
1323 | sense_reason_t rc; | 1354 | sense_reason_t rc; |
1355 | int dsg_off = 0; | ||
1356 | unsigned int block_size = dev->dev_attrib.block_size; | ||
1324 | 1357 | ||
1325 | for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { | 1358 | for (; psg && sector < start + sectors; psg = sg_next(psg)) { |
1326 | daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; | ||
1327 | paddr = kmap_atomic(sg_page(psg)) + psg->offset; | 1359 | paddr = kmap_atomic(sg_page(psg)) + psg->offset; |
1328 | |||
1329 | for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { | ||
1330 | |||
1331 | if (offset >= psg->length) { | ||
1332 | kunmap_atomic(paddr); | ||
1333 | psg = sg_next(psg); | ||
1334 | paddr = kmap_atomic(sg_page(psg)) + psg->offset; | ||
1335 | offset = 0; | ||
1336 | } | ||
1337 | |||
1338 | sdt = paddr + offset; | ||
1339 | |||
1340 | pr_debug("DIF WRITE sector: %llu guard_tag: 0x%04x" | ||
1341 | " app_tag: 0x%04x ref_tag: %u\n", | ||
1342 | (unsigned long long)sector, sdt->guard_tag, | ||
1343 | sdt->app_tag, be32_to_cpu(sdt->ref_tag)); | ||
1344 | |||
1345 | rc = sbc_dif_v1_verify(cmd, sdt, daddr + j, sector, | ||
1346 | ei_lba); | ||
1347 | if (rc) { | ||
1348 | kunmap_atomic(paddr); | ||
1349 | kunmap_atomic(daddr); | ||
1350 | cmd->bad_sector = sector; | ||
1351 | return rc; | ||
1352 | } | ||
1353 | |||
1354 | sector++; | ||
1355 | ei_lba++; | ||
1356 | offset += sizeof(struct se_dif_v1_tuple); | ||
1357 | } | ||
1358 | |||
1359 | kunmap_atomic(paddr); | ||
1360 | kunmap_atomic(daddr); | ||
1361 | } | ||
1362 | if (!sg) | ||
1363 | return 0; | ||
1364 | |||
1365 | sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off); | ||
1366 | |||
1367 | return 0; | ||
1368 | } | ||
1369 | EXPORT_SYMBOL(sbc_dif_verify_write); | ||
1370 | |||
1371 | static sense_reason_t | ||
1372 | __sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors, | ||
1373 | unsigned int ei_lba, struct scatterlist *sg, int sg_off) | ||
1374 | { | ||
1375 | struct se_device *dev = cmd->se_dev; | ||
1376 | struct se_dif_v1_tuple *sdt; | ||
1377 | struct scatterlist *dsg, *psg = sg; | ||
1378 | sector_t sector = start; | ||
1379 | void *daddr, *paddr; | ||
1380 | int i, j, offset = sg_off; | ||
1381 | sense_reason_t rc; | ||
1382 | |||
1383 | for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { | ||
1384 | daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; | 1360 | daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; |
1385 | paddr = kmap_atomic(sg_page(psg)) + sg->offset; | ||
1386 | |||
1387 | for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { | ||
1388 | 1361 | ||
1389 | if (offset >= psg->length) { | 1362 | for (i = psg_off; i < psg->length && |
1390 | kunmap_atomic(paddr); | 1363 | sector < start + sectors; |
1391 | psg = sg_next(psg); | 1364 | i += sizeof(struct se_dif_v1_tuple)) { |
1392 | paddr = kmap_atomic(sg_page(psg)) + psg->offset; | 1365 | __u16 crc; |
1393 | offset = 0; | 1366 | unsigned int avail; |
1367 | |||
1368 | if (dsg_off >= dsg->length) { | ||
1369 | dsg_off -= dsg->length; | ||
1370 | kunmap_atomic(daddr - dsg->offset); | ||
1371 | dsg = sg_next(dsg); | ||
1372 | if (!dsg) { | ||
1373 | kunmap_atomic(paddr - psg->offset); | ||
1374 | return 0; | ||
1375 | } | ||
1376 | daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; | ||
1394 | } | 1377 | } |
1395 | 1378 | ||
1396 | sdt = paddr + offset; | 1379 | sdt = paddr + i; |
1397 | 1380 | ||
1398 | pr_debug("DIF READ sector: %llu guard_tag: 0x%04x" | 1381 | pr_debug("DIF READ sector: %llu guard_tag: 0x%04x" |
1399 | " app_tag: 0x%04x ref_tag: %u\n", | 1382 | " app_tag: 0x%04x ref_tag: %u\n", |
@@ -1401,53 +1384,43 @@ __sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors, | |||
1401 | sdt->app_tag, be32_to_cpu(sdt->ref_tag)); | 1384 | sdt->app_tag, be32_to_cpu(sdt->ref_tag)); |
1402 | 1385 | ||
1403 | if (sdt->app_tag == cpu_to_be16(0xffff)) { | 1386 | if (sdt->app_tag == cpu_to_be16(0xffff)) { |
1404 | sector++; | 1387 | dsg_off += block_size; |
1405 | offset += sizeof(struct se_dif_v1_tuple); | 1388 | goto next; |
1406 | continue; | 1389 | } |
1390 | |||
1391 | avail = min(block_size, dsg->length - dsg_off); | ||
1392 | crc = crc_t10dif(daddr + dsg_off, avail); | ||
1393 | if (avail < block_size) { | ||
1394 | kunmap_atomic(daddr - dsg->offset); | ||
1395 | dsg = sg_next(dsg); | ||
1396 | if (!dsg) { | ||
1397 | kunmap_atomic(paddr - psg->offset); | ||
1398 | return 0; | ||
1399 | } | ||
1400 | daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; | ||
1401 | dsg_off = block_size - avail; | ||
1402 | crc = crc_t10dif_update(crc, daddr, dsg_off); | ||
1403 | } else { | ||
1404 | dsg_off += block_size; | ||
1407 | } | 1405 | } |
1408 | 1406 | ||
1409 | rc = sbc_dif_v1_verify(cmd, sdt, daddr + j, sector, | 1407 | rc = sbc_dif_v1_verify(cmd, sdt, crc, sector, ei_lba); |
1410 | ei_lba); | ||
1411 | if (rc) { | 1408 | if (rc) { |
1412 | kunmap_atomic(paddr); | 1409 | kunmap_atomic(daddr - dsg->offset); |
1413 | kunmap_atomic(daddr); | 1410 | kunmap_atomic(paddr - psg->offset); |
1414 | cmd->bad_sector = sector; | 1411 | cmd->bad_sector = sector; |
1415 | return rc; | 1412 | return rc; |
1416 | } | 1413 | } |
1417 | 1414 | next: | |
1418 | sector++; | 1415 | sector++; |
1419 | ei_lba++; | 1416 | ei_lba++; |
1420 | offset += sizeof(struct se_dif_v1_tuple); | ||
1421 | } | 1417 | } |
1422 | 1418 | ||
1423 | kunmap_atomic(paddr); | 1419 | psg_off = 0; |
1424 | kunmap_atomic(daddr); | 1420 | kunmap_atomic(daddr - dsg->offset); |
1421 | kunmap_atomic(paddr - psg->offset); | ||
1425 | } | 1422 | } |
1426 | 1423 | ||
1427 | return 0; | 1424 | return 0; |
1428 | } | 1425 | } |
1429 | 1426 | EXPORT_SYMBOL(sbc_dif_verify); | |
1430 | sense_reason_t | ||
1431 | sbc_dif_read_strip(struct se_cmd *cmd) | ||
1432 | { | ||
1433 | struct se_device *dev = cmd->se_dev; | ||
1434 | u32 sectors = cmd->prot_length / dev->prot_length; | ||
1435 | |||
1436 | return __sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0, | ||
1437 | cmd->t_prot_sg, 0); | ||
1438 | } | ||
1439 | |||
1440 | sense_reason_t | ||
1441 | sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors, | ||
1442 | unsigned int ei_lba, struct scatterlist *sg, int sg_off) | ||
1443 | { | ||
1444 | sense_reason_t rc; | ||
1445 | |||
1446 | rc = __sbc_dif_verify_read(cmd, start, sectors, ei_lba, sg, sg_off); | ||
1447 | if (rc) | ||
1448 | return rc; | ||
1449 | |||
1450 | sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off); | ||
1451 | return 0; | ||
1452 | } | ||
1453 | EXPORT_SYMBOL(sbc_dif_verify_read); | ||
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 52ea640274f4..b0744433315a 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c | |||
@@ -38,10 +38,9 @@ | |||
38 | #include "target_core_ua.h" | 38 | #include "target_core_ua.h" |
39 | #include "target_core_xcopy.h" | 39 | #include "target_core_xcopy.h" |
40 | 40 | ||
41 | static void spc_fill_alua_data(struct se_port *port, unsigned char *buf) | 41 | static void spc_fill_alua_data(struct se_lun *lun, unsigned char *buf) |
42 | { | 42 | { |
43 | struct t10_alua_tg_pt_gp *tg_pt_gp; | 43 | struct t10_alua_tg_pt_gp *tg_pt_gp; |
44 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; | ||
45 | 44 | ||
46 | /* | 45 | /* |
47 | * Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS. | 46 | * Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS. |
@@ -54,17 +53,11 @@ static void spc_fill_alua_data(struct se_port *port, unsigned char *buf) | |||
54 | * | 53 | * |
55 | * See spc4r17 section 6.4.2 Table 135 | 54 | * See spc4r17 section 6.4.2 Table 135 |
56 | */ | 55 | */ |
57 | if (!port) | 56 | spin_lock(&lun->lun_tg_pt_gp_lock); |
58 | return; | 57 | tg_pt_gp = lun->lun_tg_pt_gp; |
59 | tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; | ||
60 | if (!tg_pt_gp_mem) | ||
61 | return; | ||
62 | |||
63 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | ||
64 | tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; | ||
65 | if (tg_pt_gp) | 58 | if (tg_pt_gp) |
66 | buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type; | 59 | buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type; |
67 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | 60 | spin_unlock(&lun->lun_tg_pt_gp_lock); |
68 | } | 61 | } |
69 | 62 | ||
70 | sense_reason_t | 63 | sense_reason_t |
@@ -95,7 +88,7 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf) | |||
95 | /* | 88 | /* |
96 | * Enable SCCS and TPGS fields for Emulated ALUA | 89 | * Enable SCCS and TPGS fields for Emulated ALUA |
97 | */ | 90 | */ |
98 | spc_fill_alua_data(lun->lun_sep, buf); | 91 | spc_fill_alua_data(lun, buf); |
99 | 92 | ||
100 | /* | 93 | /* |
101 | * Set Third-Party Copy (3PC) bit to indicate support for EXTENDED_COPY | 94 | * Set Third-Party Copy (3PC) bit to indicate support for EXTENDED_COPY |
@@ -182,11 +175,9 @@ spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) | |||
182 | { | 175 | { |
183 | struct se_device *dev = cmd->se_dev; | 176 | struct se_device *dev = cmd->se_dev; |
184 | struct se_lun *lun = cmd->se_lun; | 177 | struct se_lun *lun = cmd->se_lun; |
185 | struct se_port *port = NULL; | ||
186 | struct se_portal_group *tpg = NULL; | 178 | struct se_portal_group *tpg = NULL; |
187 | struct t10_alua_lu_gp_member *lu_gp_mem; | 179 | struct t10_alua_lu_gp_member *lu_gp_mem; |
188 | struct t10_alua_tg_pt_gp *tg_pt_gp; | 180 | struct t10_alua_tg_pt_gp *tg_pt_gp; |
189 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; | ||
190 | unsigned char *prod = &dev->t10_wwn.model[0]; | 181 | unsigned char *prod = &dev->t10_wwn.model[0]; |
191 | u32 prod_len; | 182 | u32 prod_len; |
192 | u32 unit_serial_len, off = 0; | 183 | u32 unit_serial_len, off = 0; |
@@ -268,18 +259,15 @@ check_t10_vend_desc: | |||
268 | /* Header size for Designation descriptor */ | 259 | /* Header size for Designation descriptor */ |
269 | len += (id_len + 4); | 260 | len += (id_len + 4); |
270 | off += (id_len + 4); | 261 | off += (id_len + 4); |
271 | /* | 262 | |
272 | * struct se_port is only set for INQUIRY VPD=1 through $FABRIC_MOD | 263 | if (1) { |
273 | */ | ||
274 | port = lun->lun_sep; | ||
275 | if (port) { | ||
276 | struct t10_alua_lu_gp *lu_gp; | 264 | struct t10_alua_lu_gp *lu_gp; |
277 | u32 padding, scsi_name_len, scsi_target_len; | 265 | u32 padding, scsi_name_len, scsi_target_len; |
278 | u16 lu_gp_id = 0; | 266 | u16 lu_gp_id = 0; |
279 | u16 tg_pt_gp_id = 0; | 267 | u16 tg_pt_gp_id = 0; |
280 | u16 tpgt; | 268 | u16 tpgt; |
281 | 269 | ||
282 | tpg = port->sep_tpg; | 270 | tpg = lun->lun_tpg; |
283 | /* | 271 | /* |
284 | * Relative target port identifer, see spc4r17 | 272 | * Relative target port identifer, see spc4r17 |
285 | * section 7.7.3.7 | 273 | * section 7.7.3.7 |
@@ -287,8 +275,7 @@ check_t10_vend_desc: | |||
287 | * Get the PROTOCOL IDENTIFIER as defined by spc4r17 | 275 | * Get the PROTOCOL IDENTIFIER as defined by spc4r17 |
288 | * section 7.5.1 Table 362 | 276 | * section 7.5.1 Table 362 |
289 | */ | 277 | */ |
290 | buf[off] = | 278 | buf[off] = tpg->proto_id << 4; |
291 | (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); | ||
292 | buf[off++] |= 0x1; /* CODE SET == Binary */ | 279 | buf[off++] |= 0x1; /* CODE SET == Binary */ |
293 | buf[off] = 0x80; /* Set PIV=1 */ | 280 | buf[off] = 0x80; /* Set PIV=1 */ |
294 | /* Set ASSOCIATION == target port: 01b */ | 281 | /* Set ASSOCIATION == target port: 01b */ |
@@ -300,8 +287,8 @@ check_t10_vend_desc: | |||
300 | /* Skip over Obsolete field in RTPI payload | 287 | /* Skip over Obsolete field in RTPI payload |
301 | * in Table 472 */ | 288 | * in Table 472 */ |
302 | off += 2; | 289 | off += 2; |
303 | buf[off++] = ((port->sep_rtpi >> 8) & 0xff); | 290 | buf[off++] = ((lun->lun_rtpi >> 8) & 0xff); |
304 | buf[off++] = (port->sep_rtpi & 0xff); | 291 | buf[off++] = (lun->lun_rtpi & 0xff); |
305 | len += 8; /* Header size + Designation descriptor */ | 292 | len += 8; /* Header size + Designation descriptor */ |
306 | /* | 293 | /* |
307 | * Target port group identifier, see spc4r17 | 294 | * Target port group identifier, see spc4r17 |
@@ -310,21 +297,16 @@ check_t10_vend_desc: | |||
310 | * Get the PROTOCOL IDENTIFIER as defined by spc4r17 | 297 | * Get the PROTOCOL IDENTIFIER as defined by spc4r17 |
311 | * section 7.5.1 Table 362 | 298 | * section 7.5.1 Table 362 |
312 | */ | 299 | */ |
313 | tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; | 300 | spin_lock(&lun->lun_tg_pt_gp_lock); |
314 | if (!tg_pt_gp_mem) | 301 | tg_pt_gp = lun->lun_tg_pt_gp; |
315 | goto check_lu_gp; | ||
316 | |||
317 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | ||
318 | tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; | ||
319 | if (!tg_pt_gp) { | 302 | if (!tg_pt_gp) { |
320 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | 303 | spin_unlock(&lun->lun_tg_pt_gp_lock); |
321 | goto check_lu_gp; | 304 | goto check_lu_gp; |
322 | } | 305 | } |
323 | tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id; | 306 | tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id; |
324 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | 307 | spin_unlock(&lun->lun_tg_pt_gp_lock); |
325 | 308 | ||
326 | buf[off] = | 309 | buf[off] = tpg->proto_id << 4; |
327 | (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); | ||
328 | buf[off++] |= 0x1; /* CODE SET == Binary */ | 310 | buf[off++] |= 0x1; /* CODE SET == Binary */ |
329 | buf[off] = 0x80; /* Set PIV=1 */ | 311 | buf[off] = 0x80; /* Set PIV=1 */ |
330 | /* Set ASSOCIATION == target port: 01b */ | 312 | /* Set ASSOCIATION == target port: 01b */ |
@@ -372,8 +354,7 @@ check_lu_gp: | |||
372 | * section 7.5.1 Table 362 | 354 | * section 7.5.1 Table 362 |
373 | */ | 355 | */ |
374 | check_scsi_name: | 356 | check_scsi_name: |
375 | buf[off] = | 357 | buf[off] = tpg->proto_id << 4; |
376 | (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); | ||
377 | buf[off++] |= 0x3; /* CODE SET == UTF-8 */ | 358 | buf[off++] |= 0x3; /* CODE SET == UTF-8 */ |
378 | buf[off] = 0x80; /* Set PIV=1 */ | 359 | buf[off] = 0x80; /* Set PIV=1 */ |
379 | /* Set ASSOCIATION == target port: 01b */ | 360 | /* Set ASSOCIATION == target port: 01b */ |
@@ -413,8 +394,7 @@ check_scsi_name: | |||
413 | /* | 394 | /* |
414 | * Target device designator | 395 | * Target device designator |
415 | */ | 396 | */ |
416 | buf[off] = | 397 | buf[off] = tpg->proto_id << 4; |
417 | (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); | ||
418 | buf[off++] |= 0x3; /* CODE SET == UTF-8 */ | 398 | buf[off++] |= 0x3; /* CODE SET == UTF-8 */ |
419 | buf[off] = 0x80; /* Set PIV=1 */ | 399 | buf[off] = 0x80; /* Set PIV=1 */ |
420 | /* Set ASSOCIATION == target device: 10b */ | 400 | /* Set ASSOCIATION == target device: 10b */ |
@@ -482,7 +462,7 @@ spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) | |||
482 | buf[5] = 0x07; | 462 | buf[5] = 0x07; |
483 | 463 | ||
484 | /* If WriteCache emulation is enabled, set V_SUP */ | 464 | /* If WriteCache emulation is enabled, set V_SUP */ |
485 | if (se_dev_check_wce(dev)) | 465 | if (target_check_wce(dev)) |
486 | buf[6] = 0x01; | 466 | buf[6] = 0x01; |
487 | /* If an LBA map is present set R_SUP */ | 467 | /* If an LBA map is present set R_SUP */ |
488 | spin_lock(&cmd->se_dev->t10_alua.lba_map_lock); | 468 | spin_lock(&cmd->se_dev->t10_alua.lba_map_lock); |
@@ -699,7 +679,7 @@ static sense_reason_t | |||
699 | spc_emulate_inquiry(struct se_cmd *cmd) | 679 | spc_emulate_inquiry(struct se_cmd *cmd) |
700 | { | 680 | { |
701 | struct se_device *dev = cmd->se_dev; | 681 | struct se_device *dev = cmd->se_dev; |
702 | struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg; | 682 | struct se_portal_group *tpg = cmd->se_lun->lun_tpg; |
703 | unsigned char *rbuf; | 683 | unsigned char *rbuf; |
704 | unsigned char *cdb = cmd->t_task_cdb; | 684 | unsigned char *cdb = cmd->t_task_cdb; |
705 | unsigned char *buf; | 685 | unsigned char *buf; |
@@ -713,7 +693,7 @@ spc_emulate_inquiry(struct se_cmd *cmd) | |||
713 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 693 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
714 | } | 694 | } |
715 | 695 | ||
716 | if (dev == tpg->tpg_virt_lun0.lun_se_dev) | 696 | if (dev == rcu_access_pointer(tpg->tpg_virt_lun0->lun_se_dev)) |
717 | buf[0] = 0x3f; /* Not connected */ | 697 | buf[0] = 0x3f; /* Not connected */ |
718 | else | 698 | else |
719 | buf[0] = dev->transport->get_device_type(dev); | 699 | buf[0] = dev->transport->get_device_type(dev); |
@@ -889,7 +869,7 @@ static int spc_modesense_caching(struct se_cmd *cmd, u8 pc, u8 *p) | |||
889 | if (pc == 1) | 869 | if (pc == 1) |
890 | goto out; | 870 | goto out; |
891 | 871 | ||
892 | if (se_dev_check_wce(dev)) | 872 | if (target_check_wce(dev)) |
893 | p[2] = 0x04; /* Write Cache Enable */ | 873 | p[2] = 0x04; /* Write Cache Enable */ |
894 | p[12] = 0x20; /* Disabled Read Ahead */ | 874 | p[12] = 0x20; /* Disabled Read Ahead */ |
895 | 875 | ||
@@ -986,6 +966,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd) | |||
986 | int length = 0; | 966 | int length = 0; |
987 | int ret; | 967 | int ret; |
988 | int i; | 968 | int i; |
969 | bool read_only = target_lun_is_rdonly(cmd);; | ||
989 | 970 | ||
990 | memset(buf, 0, SE_MODE_PAGE_BUF); | 971 | memset(buf, 0, SE_MODE_PAGE_BUF); |
991 | 972 | ||
@@ -996,13 +977,15 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd) | |||
996 | length = ten ? 3 : 2; | 977 | length = ten ? 3 : 2; |
997 | 978 | ||
998 | /* DEVICE-SPECIFIC PARAMETER */ | 979 | /* DEVICE-SPECIFIC PARAMETER */ |
999 | if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || | 980 | if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || read_only) |
1000 | (cmd->se_deve && | ||
1001 | (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) | ||
1002 | spc_modesense_write_protect(&buf[length], type); | 981 | spc_modesense_write_protect(&buf[length], type); |
1003 | 982 | ||
1004 | if ((se_dev_check_wce(dev)) && | 983 | /* |
1005 | (dev->dev_attrib.emulate_fua_write > 0)) | 984 | * SBC only allows us to enable FUA and DPO together. Fortunately |
985 | * DPO is explicitly specified as a hint, so a noop is a perfectly | ||
986 | * valid implementation. | ||
987 | */ | ||
988 | if (target_check_fua(dev)) | ||
1006 | spc_modesense_dpofua(&buf[length], type); | 989 | spc_modesense_dpofua(&buf[length], type); |
1007 | 990 | ||
1008 | ++length; | 991 | ++length; |
@@ -1212,8 +1195,9 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd) | |||
1212 | { | 1195 | { |
1213 | struct se_dev_entry *deve; | 1196 | struct se_dev_entry *deve; |
1214 | struct se_session *sess = cmd->se_sess; | 1197 | struct se_session *sess = cmd->se_sess; |
1198 | struct se_node_acl *nacl; | ||
1215 | unsigned char *buf; | 1199 | unsigned char *buf; |
1216 | u32 lun_count = 0, offset = 8, i; | 1200 | u32 lun_count = 0, offset = 8; |
1217 | 1201 | ||
1218 | if (cmd->data_length < 16) { | 1202 | if (cmd->data_length < 16) { |
1219 | pr_warn("REPORT LUNS allocation length %u too small\n", | 1203 | pr_warn("REPORT LUNS allocation length %u too small\n", |
@@ -1235,12 +1219,10 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd) | |||
1235 | lun_count = 1; | 1219 | lun_count = 1; |
1236 | goto done; | 1220 | goto done; |
1237 | } | 1221 | } |
1222 | nacl = sess->se_node_acl; | ||
1238 | 1223 | ||
1239 | spin_lock_irq(&sess->se_node_acl->device_list_lock); | 1224 | rcu_read_lock(); |
1240 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | 1225 | hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { |
1241 | deve = sess->se_node_acl->device_list[i]; | ||
1242 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) | ||
1243 | continue; | ||
1244 | /* | 1226 | /* |
1245 | * We determine the correct LUN LIST LENGTH even once we | 1227 | * We determine the correct LUN LIST LENGTH even once we |
1246 | * have reached the initial allocation length. | 1228 | * have reached the initial allocation length. |
@@ -1253,7 +1235,7 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd) | |||
1253 | int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]); | 1235 | int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]); |
1254 | offset += 8; | 1236 | offset += 8; |
1255 | } | 1237 | } |
1256 | spin_unlock_irq(&sess->se_node_acl->device_list_lock); | 1238 | rcu_read_unlock(); |
1257 | 1239 | ||
1258 | /* | 1240 | /* |
1259 | * See SPC3 r07, page 159. | 1241 | * See SPC3 r07, page 159. |
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c index 40f6c1378041..20ed5d2e151a 100644 --- a/drivers/target/target_core_stat.c +++ b/drivers/target/target_core_stat.c | |||
@@ -37,7 +37,6 @@ | |||
37 | #include <target/target_core_base.h> | 37 | #include <target/target_core_base.h> |
38 | #include <target/target_core_backend.h> | 38 | #include <target/target_core_backend.h> |
39 | #include <target/target_core_fabric.h> | 39 | #include <target/target_core_fabric.h> |
40 | #include <target/target_core_configfs.h> | ||
41 | #include <target/configfs_macros.h> | 40 | #include <target/configfs_macros.h> |
42 | 41 | ||
43 | #include "target_core_internal.h" | 42 | #include "target_core_internal.h" |
@@ -104,7 +103,7 @@ static ssize_t target_stat_scsi_dev_show_attr_ports( | |||
104 | struct se_device *dev = | 103 | struct se_device *dev = |
105 | container_of(sgrps, struct se_device, dev_stat_grps); | 104 | container_of(sgrps, struct se_device, dev_stat_grps); |
106 | 105 | ||
107 | return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_port_count); | 106 | return snprintf(page, PAGE_SIZE, "%u\n", dev->export_count); |
108 | } | 107 | } |
109 | DEV_STAT_SCSI_DEV_ATTR_RO(ports); | 108 | DEV_STAT_SCSI_DEV_ATTR_RO(ports); |
110 | 109 | ||
@@ -540,20 +539,14 @@ static ssize_t target_stat_scsi_port_show_attr_inst( | |||
540 | struct se_port_stat_grps *pgrps, char *page) | 539 | struct se_port_stat_grps *pgrps, char *page) |
541 | { | 540 | { |
542 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | 541 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); |
543 | struct se_port *sep; | 542 | struct se_device *dev; |
544 | struct se_device *dev = lun->lun_se_dev; | 543 | ssize_t ret = -ENODEV; |
545 | struct se_hba *hba; | 544 | |
546 | ssize_t ret; | 545 | rcu_read_lock(); |
547 | 546 | dev = rcu_dereference(lun->lun_se_dev); | |
548 | spin_lock(&lun->lun_sep_lock); | 547 | if (dev) |
549 | sep = lun->lun_sep; | 548 | ret = snprintf(page, PAGE_SIZE, "%u\n", dev->hba_index); |
550 | if (!sep) { | 549 | rcu_read_unlock(); |
551 | spin_unlock(&lun->lun_sep_lock); | ||
552 | return -ENODEV; | ||
553 | } | ||
554 | hba = dev->se_hba; | ||
555 | ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); | ||
556 | spin_unlock(&lun->lun_sep_lock); | ||
557 | return ret; | 550 | return ret; |
558 | } | 551 | } |
559 | DEV_STAT_SCSI_PORT_ATTR_RO(inst); | 552 | DEV_STAT_SCSI_PORT_ATTR_RO(inst); |
@@ -562,18 +555,14 @@ static ssize_t target_stat_scsi_port_show_attr_dev( | |||
562 | struct se_port_stat_grps *pgrps, char *page) | 555 | struct se_port_stat_grps *pgrps, char *page) |
563 | { | 556 | { |
564 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | 557 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); |
565 | struct se_port *sep; | 558 | struct se_device *dev; |
566 | struct se_device *dev = lun->lun_se_dev; | 559 | ssize_t ret = -ENODEV; |
567 | ssize_t ret; | 560 | |
568 | 561 | rcu_read_lock(); | |
569 | spin_lock(&lun->lun_sep_lock); | 562 | dev = rcu_dereference(lun->lun_se_dev); |
570 | sep = lun->lun_sep; | 563 | if (dev) |
571 | if (!sep) { | 564 | ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); |
572 | spin_unlock(&lun->lun_sep_lock); | 565 | rcu_read_unlock(); |
573 | return -ENODEV; | ||
574 | } | ||
575 | ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); | ||
576 | spin_unlock(&lun->lun_sep_lock); | ||
577 | return ret; | 566 | return ret; |
578 | } | 567 | } |
579 | DEV_STAT_SCSI_PORT_ATTR_RO(dev); | 568 | DEV_STAT_SCSI_PORT_ATTR_RO(dev); |
@@ -582,17 +571,14 @@ static ssize_t target_stat_scsi_port_show_attr_indx( | |||
582 | struct se_port_stat_grps *pgrps, char *page) | 571 | struct se_port_stat_grps *pgrps, char *page) |
583 | { | 572 | { |
584 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | 573 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); |
585 | struct se_port *sep; | 574 | struct se_device *dev; |
586 | ssize_t ret; | 575 | ssize_t ret = -ENODEV; |
587 | 576 | ||
588 | spin_lock(&lun->lun_sep_lock); | 577 | rcu_read_lock(); |
589 | sep = lun->lun_sep; | 578 | dev = rcu_dereference(lun->lun_se_dev); |
590 | if (!sep) { | 579 | if (dev) |
591 | spin_unlock(&lun->lun_sep_lock); | 580 | ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_rtpi); |
592 | return -ENODEV; | 581 | rcu_read_unlock(); |
593 | } | ||
594 | ret = snprintf(page, PAGE_SIZE, "%u\n", sep->sep_index); | ||
595 | spin_unlock(&lun->lun_sep_lock); | ||
596 | return ret; | 582 | return ret; |
597 | } | 583 | } |
598 | DEV_STAT_SCSI_PORT_ATTR_RO(indx); | 584 | DEV_STAT_SCSI_PORT_ATTR_RO(indx); |
@@ -601,21 +587,14 @@ static ssize_t target_stat_scsi_port_show_attr_role( | |||
601 | struct se_port_stat_grps *pgrps, char *page) | 587 | struct se_port_stat_grps *pgrps, char *page) |
602 | { | 588 | { |
603 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | 589 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); |
604 | struct se_device *dev = lun->lun_se_dev; | 590 | struct se_device *dev; |
605 | struct se_port *sep; | 591 | ssize_t ret = -ENODEV; |
606 | ssize_t ret; | 592 | |
607 | 593 | rcu_read_lock(); | |
608 | if (!dev) | 594 | dev = rcu_dereference(lun->lun_se_dev); |
609 | return -ENODEV; | 595 | if (dev) |
610 | 596 | ret = snprintf(page, PAGE_SIZE, "%s%u\n", "Device", dev->dev_index); | |
611 | spin_lock(&lun->lun_sep_lock); | 597 | rcu_read_unlock(); |
612 | sep = lun->lun_sep; | ||
613 | if (!sep) { | ||
614 | spin_unlock(&lun->lun_sep_lock); | ||
615 | return -ENODEV; | ||
616 | } | ||
617 | ret = snprintf(page, PAGE_SIZE, "%s%u\n", "Device", dev->dev_index); | ||
618 | spin_unlock(&lun->lun_sep_lock); | ||
619 | return ret; | 598 | return ret; |
620 | } | 599 | } |
621 | DEV_STAT_SCSI_PORT_ATTR_RO(role); | 600 | DEV_STAT_SCSI_PORT_ATTR_RO(role); |
@@ -624,18 +603,16 @@ static ssize_t target_stat_scsi_port_show_attr_busy_count( | |||
624 | struct se_port_stat_grps *pgrps, char *page) | 603 | struct se_port_stat_grps *pgrps, char *page) |
625 | { | 604 | { |
626 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | 605 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); |
627 | struct se_port *sep; | 606 | struct se_device *dev; |
628 | ssize_t ret; | 607 | ssize_t ret = -ENODEV; |
629 | 608 | ||
630 | spin_lock(&lun->lun_sep_lock); | 609 | rcu_read_lock(); |
631 | sep = lun->lun_sep; | 610 | dev = rcu_dereference(lun->lun_se_dev); |
632 | if (!sep) { | 611 | if (dev) { |
633 | spin_unlock(&lun->lun_sep_lock); | 612 | /* FIXME: scsiPortBusyStatuses */ |
634 | return -ENODEV; | 613 | ret = snprintf(page, PAGE_SIZE, "%u\n", 0); |
635 | } | 614 | } |
636 | /* FIXME: scsiPortBusyStatuses */ | 615 | rcu_read_unlock(); |
637 | ret = snprintf(page, PAGE_SIZE, "%u\n", 0); | ||
638 | spin_unlock(&lun->lun_sep_lock); | ||
639 | return ret; | 616 | return ret; |
640 | } | 617 | } |
641 | DEV_STAT_SCSI_PORT_ATTR_RO(busy_count); | 618 | DEV_STAT_SCSI_PORT_ATTR_RO(busy_count); |
@@ -683,20 +660,14 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_inst( | |||
683 | struct se_port_stat_grps *pgrps, char *page) | 660 | struct se_port_stat_grps *pgrps, char *page) |
684 | { | 661 | { |
685 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | 662 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); |
686 | struct se_device *dev = lun->lun_se_dev; | 663 | struct se_device *dev; |
687 | struct se_port *sep; | 664 | ssize_t ret = -ENODEV; |
688 | struct se_hba *hba; | 665 | |
689 | ssize_t ret; | 666 | rcu_read_lock(); |
690 | 667 | dev = rcu_dereference(lun->lun_se_dev); | |
691 | spin_lock(&lun->lun_sep_lock); | 668 | if (dev) |
692 | sep = lun->lun_sep; | 669 | ret = snprintf(page, PAGE_SIZE, "%u\n", dev->hba_index); |
693 | if (!sep) { | 670 | rcu_read_unlock(); |
694 | spin_unlock(&lun->lun_sep_lock); | ||
695 | return -ENODEV; | ||
696 | } | ||
697 | hba = dev->se_hba; | ||
698 | ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); | ||
699 | spin_unlock(&lun->lun_sep_lock); | ||
700 | return ret; | 671 | return ret; |
701 | } | 672 | } |
702 | DEV_STAT_SCSI_TGT_PORT_ATTR_RO(inst); | 673 | DEV_STAT_SCSI_TGT_PORT_ATTR_RO(inst); |
@@ -705,18 +676,14 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_dev( | |||
705 | struct se_port_stat_grps *pgrps, char *page) | 676 | struct se_port_stat_grps *pgrps, char *page) |
706 | { | 677 | { |
707 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | 678 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); |
708 | struct se_device *dev = lun->lun_se_dev; | 679 | struct se_device *dev; |
709 | struct se_port *sep; | 680 | ssize_t ret = -ENODEV; |
710 | ssize_t ret; | 681 | |
711 | 682 | rcu_read_lock(); | |
712 | spin_lock(&lun->lun_sep_lock); | 683 | dev = rcu_dereference(lun->lun_se_dev); |
713 | sep = lun->lun_sep; | 684 | if (dev) |
714 | if (!sep) { | 685 | ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); |
715 | spin_unlock(&lun->lun_sep_lock); | 686 | rcu_read_unlock(); |
716 | return -ENODEV; | ||
717 | } | ||
718 | ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); | ||
719 | spin_unlock(&lun->lun_sep_lock); | ||
720 | return ret; | 687 | return ret; |
721 | } | 688 | } |
722 | DEV_STAT_SCSI_TGT_PORT_ATTR_RO(dev); | 689 | DEV_STAT_SCSI_TGT_PORT_ATTR_RO(dev); |
@@ -725,17 +692,14 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_indx( | |||
725 | struct se_port_stat_grps *pgrps, char *page) | 692 | struct se_port_stat_grps *pgrps, char *page) |
726 | { | 693 | { |
727 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | 694 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); |
728 | struct se_port *sep; | 695 | struct se_device *dev; |
729 | ssize_t ret; | 696 | ssize_t ret = -ENODEV; |
730 | 697 | ||
731 | spin_lock(&lun->lun_sep_lock); | 698 | rcu_read_lock(); |
732 | sep = lun->lun_sep; | 699 | dev = rcu_dereference(lun->lun_se_dev); |
733 | if (!sep) { | 700 | if (dev) |
734 | spin_unlock(&lun->lun_sep_lock); | 701 | ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_rtpi); |
735 | return -ENODEV; | 702 | rcu_read_unlock(); |
736 | } | ||
737 | ret = snprintf(page, PAGE_SIZE, "%u\n", sep->sep_index); | ||
738 | spin_unlock(&lun->lun_sep_lock); | ||
739 | return ret; | 703 | return ret; |
740 | } | 704 | } |
741 | DEV_STAT_SCSI_TGT_PORT_ATTR_RO(indx); | 705 | DEV_STAT_SCSI_TGT_PORT_ATTR_RO(indx); |
@@ -744,21 +708,17 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_name( | |||
744 | struct se_port_stat_grps *pgrps, char *page) | 708 | struct se_port_stat_grps *pgrps, char *page) |
745 | { | 709 | { |
746 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | 710 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); |
747 | struct se_port *sep; | 711 | struct se_portal_group *tpg = lun->lun_tpg; |
748 | struct se_portal_group *tpg; | 712 | struct se_device *dev; |
749 | ssize_t ret; | 713 | ssize_t ret = -ENODEV; |
750 | 714 | ||
751 | spin_lock(&lun->lun_sep_lock); | 715 | rcu_read_lock(); |
752 | sep = lun->lun_sep; | 716 | dev = rcu_dereference(lun->lun_se_dev); |
753 | if (!sep) { | 717 | if (dev) |
754 | spin_unlock(&lun->lun_sep_lock); | 718 | ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n", |
755 | return -ENODEV; | 719 | tpg->se_tpg_tfo->get_fabric_name(), |
756 | } | 720 | lun->lun_rtpi); |
757 | tpg = sep->sep_tpg; | 721 | rcu_read_unlock(); |
758 | |||
759 | ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n", | ||
760 | tpg->se_tpg_tfo->get_fabric_name(), sep->sep_index); | ||
761 | spin_unlock(&lun->lun_sep_lock); | ||
762 | return ret; | 722 | return ret; |
763 | } | 723 | } |
764 | DEV_STAT_SCSI_TGT_PORT_ATTR_RO(name); | 724 | DEV_STAT_SCSI_TGT_PORT_ATTR_RO(name); |
@@ -767,22 +727,17 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_port_index( | |||
767 | struct se_port_stat_grps *pgrps, char *page) | 727 | struct se_port_stat_grps *pgrps, char *page) |
768 | { | 728 | { |
769 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | 729 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); |
770 | struct se_port *sep; | 730 | struct se_portal_group *tpg = lun->lun_tpg; |
771 | struct se_portal_group *tpg; | 731 | struct se_device *dev; |
772 | ssize_t ret; | 732 | ssize_t ret = -ENODEV; |
773 | 733 | ||
774 | spin_lock(&lun->lun_sep_lock); | 734 | rcu_read_lock(); |
775 | sep = lun->lun_sep; | 735 | dev = rcu_dereference(lun->lun_se_dev); |
776 | if (!sep) { | 736 | if (dev) |
777 | spin_unlock(&lun->lun_sep_lock); | 737 | ret = snprintf(page, PAGE_SIZE, "%s%s%d\n", |
778 | return -ENODEV; | 738 | tpg->se_tpg_tfo->tpg_get_wwn(tpg), "+t+", |
779 | } | 739 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
780 | tpg = sep->sep_tpg; | 740 | rcu_read_unlock(); |
781 | |||
782 | ret = snprintf(page, PAGE_SIZE, "%s%s%d\n", | ||
783 | tpg->se_tpg_tfo->tpg_get_wwn(tpg), "+t+", | ||
784 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); | ||
785 | spin_unlock(&lun->lun_sep_lock); | ||
786 | return ret; | 741 | return ret; |
787 | } | 742 | } |
788 | DEV_STAT_SCSI_TGT_PORT_ATTR_RO(port_index); | 743 | DEV_STAT_SCSI_TGT_PORT_ATTR_RO(port_index); |
@@ -791,18 +746,15 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_in_cmds( | |||
791 | struct se_port_stat_grps *pgrps, char *page) | 746 | struct se_port_stat_grps *pgrps, char *page) |
792 | { | 747 | { |
793 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | 748 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); |
794 | struct se_port *sep; | 749 | struct se_device *dev; |
795 | ssize_t ret; | 750 | ssize_t ret = -ENODEV; |
796 | 751 | ||
797 | spin_lock(&lun->lun_sep_lock); | 752 | rcu_read_lock(); |
798 | sep = lun->lun_sep; | 753 | dev = rcu_dereference(lun->lun_se_dev); |
799 | if (!sep) { | 754 | if (dev) |
800 | spin_unlock(&lun->lun_sep_lock); | 755 | ret = snprintf(page, PAGE_SIZE, "%lu\n", |
801 | return -ENODEV; | 756 | atomic_long_read(&lun->lun_stats.cmd_pdus)); |
802 | } | 757 | rcu_read_unlock(); |
803 | |||
804 | ret = snprintf(page, PAGE_SIZE, "%llu\n", sep->sep_stats.cmd_pdus); | ||
805 | spin_unlock(&lun->lun_sep_lock); | ||
806 | return ret; | 758 | return ret; |
807 | } | 759 | } |
808 | DEV_STAT_SCSI_TGT_PORT_ATTR_RO(in_cmds); | 760 | DEV_STAT_SCSI_TGT_PORT_ATTR_RO(in_cmds); |
@@ -811,19 +763,15 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_write_mbytes( | |||
811 | struct se_port_stat_grps *pgrps, char *page) | 763 | struct se_port_stat_grps *pgrps, char *page) |
812 | { | 764 | { |
813 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | 765 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); |
814 | struct se_port *sep; | 766 | struct se_device *dev; |
815 | ssize_t ret; | 767 | ssize_t ret = -ENODEV; |
816 | 768 | ||
817 | spin_lock(&lun->lun_sep_lock); | 769 | rcu_read_lock(); |
818 | sep = lun->lun_sep; | 770 | dev = rcu_dereference(lun->lun_se_dev); |
819 | if (!sep) { | 771 | if (dev) |
820 | spin_unlock(&lun->lun_sep_lock); | 772 | ret = snprintf(page, PAGE_SIZE, "%u\n", |
821 | return -ENODEV; | 773 | (u32)(atomic_long_read(&lun->lun_stats.rx_data_octets) >> 20)); |
822 | } | 774 | rcu_read_unlock(); |
823 | |||
824 | ret = snprintf(page, PAGE_SIZE, "%u\n", | ||
825 | (u32)(sep->sep_stats.rx_data_octets >> 20)); | ||
826 | spin_unlock(&lun->lun_sep_lock); | ||
827 | return ret; | 775 | return ret; |
828 | } | 776 | } |
829 | DEV_STAT_SCSI_TGT_PORT_ATTR_RO(write_mbytes); | 777 | DEV_STAT_SCSI_TGT_PORT_ATTR_RO(write_mbytes); |
@@ -832,19 +780,15 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_read_mbytes( | |||
832 | struct se_port_stat_grps *pgrps, char *page) | 780 | struct se_port_stat_grps *pgrps, char *page) |
833 | { | 781 | { |
834 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | 782 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); |
835 | struct se_port *sep; | 783 | struct se_device *dev; |
836 | ssize_t ret; | 784 | ssize_t ret = -ENODEV; |
837 | 785 | ||
838 | spin_lock(&lun->lun_sep_lock); | 786 | rcu_read_lock(); |
839 | sep = lun->lun_sep; | 787 | dev = rcu_dereference(lun->lun_se_dev); |
840 | if (!sep) { | 788 | if (dev) |
841 | spin_unlock(&lun->lun_sep_lock); | 789 | ret = snprintf(page, PAGE_SIZE, "%u\n", |
842 | return -ENODEV; | 790 | (u32)(atomic_long_read(&lun->lun_stats.tx_data_octets) >> 20)); |
843 | } | 791 | rcu_read_unlock(); |
844 | |||
845 | ret = snprintf(page, PAGE_SIZE, "%u\n", | ||
846 | (u32)(sep->sep_stats.tx_data_octets >> 20)); | ||
847 | spin_unlock(&lun->lun_sep_lock); | ||
848 | return ret; | 792 | return ret; |
849 | } | 793 | } |
850 | DEV_STAT_SCSI_TGT_PORT_ATTR_RO(read_mbytes); | 794 | DEV_STAT_SCSI_TGT_PORT_ATTR_RO(read_mbytes); |
@@ -853,19 +797,16 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_hs_in_cmds( | |||
853 | struct se_port_stat_grps *pgrps, char *page) | 797 | struct se_port_stat_grps *pgrps, char *page) |
854 | { | 798 | { |
855 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | 799 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); |
856 | struct se_port *sep; | 800 | struct se_device *dev; |
857 | ssize_t ret; | 801 | ssize_t ret = -ENODEV; |
858 | 802 | ||
859 | spin_lock(&lun->lun_sep_lock); | 803 | rcu_read_lock(); |
860 | sep = lun->lun_sep; | 804 | dev = rcu_dereference(lun->lun_se_dev); |
861 | if (!sep) { | 805 | if (dev) { |
862 | spin_unlock(&lun->lun_sep_lock); | 806 | /* FIXME: scsiTgtPortHsInCommands */ |
863 | return -ENODEV; | 807 | ret = snprintf(page, PAGE_SIZE, "%u\n", 0); |
864 | } | 808 | } |
865 | 809 | rcu_read_unlock(); | |
866 | /* FIXME: scsiTgtPortHsInCommands */ | ||
867 | ret = snprintf(page, PAGE_SIZE, "%u\n", 0); | ||
868 | spin_unlock(&lun->lun_sep_lock); | ||
869 | return ret; | 810 | return ret; |
870 | } | 811 | } |
871 | DEV_STAT_SCSI_TGT_PORT_ATTR_RO(hs_in_cmds); | 812 | DEV_STAT_SCSI_TGT_PORT_ATTR_RO(hs_in_cmds); |
@@ -919,21 +860,14 @@ static ssize_t target_stat_scsi_transport_show_attr_inst( | |||
919 | struct se_port_stat_grps *pgrps, char *page) | 860 | struct se_port_stat_grps *pgrps, char *page) |
920 | { | 861 | { |
921 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | 862 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); |
922 | struct se_device *dev = lun->lun_se_dev; | 863 | struct se_device *dev; |
923 | struct se_port *sep; | 864 | ssize_t ret = -ENODEV; |
924 | struct se_hba *hba; | 865 | |
925 | ssize_t ret; | 866 | rcu_read_lock(); |
926 | 867 | dev = rcu_dereference(lun->lun_se_dev); | |
927 | spin_lock(&lun->lun_sep_lock); | 868 | if (dev) |
928 | sep = lun->lun_sep; | 869 | ret = snprintf(page, PAGE_SIZE, "%u\n", dev->hba_index); |
929 | if (!sep) { | 870 | rcu_read_unlock(); |
930 | spin_unlock(&lun->lun_sep_lock); | ||
931 | return -ENODEV; | ||
932 | } | ||
933 | |||
934 | hba = dev->se_hba; | ||
935 | ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); | ||
936 | spin_unlock(&lun->lun_sep_lock); | ||
937 | return ret; | 871 | return ret; |
938 | } | 872 | } |
939 | DEV_STAT_SCSI_TRANSPORT_ATTR_RO(inst); | 873 | DEV_STAT_SCSI_TRANSPORT_ATTR_RO(inst); |
@@ -942,21 +876,18 @@ static ssize_t target_stat_scsi_transport_show_attr_device( | |||
942 | struct se_port_stat_grps *pgrps, char *page) | 876 | struct se_port_stat_grps *pgrps, char *page) |
943 | { | 877 | { |
944 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | 878 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); |
945 | struct se_port *sep; | 879 | struct se_device *dev; |
946 | struct se_portal_group *tpg; | 880 | struct se_portal_group *tpg = lun->lun_tpg; |
947 | ssize_t ret; | 881 | ssize_t ret = -ENODEV; |
948 | 882 | ||
949 | spin_lock(&lun->lun_sep_lock); | 883 | rcu_read_lock(); |
950 | sep = lun->lun_sep; | 884 | dev = rcu_dereference(lun->lun_se_dev); |
951 | if (!sep) { | 885 | if (dev) { |
952 | spin_unlock(&lun->lun_sep_lock); | 886 | /* scsiTransportType */ |
953 | return -ENODEV; | 887 | ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n", |
888 | tpg->se_tpg_tfo->get_fabric_name()); | ||
954 | } | 889 | } |
955 | tpg = sep->sep_tpg; | 890 | rcu_read_unlock(); |
956 | /* scsiTransportType */ | ||
957 | ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n", | ||
958 | tpg->se_tpg_tfo->get_fabric_name()); | ||
959 | spin_unlock(&lun->lun_sep_lock); | ||
960 | return ret; | 891 | return ret; |
961 | } | 892 | } |
962 | DEV_STAT_SCSI_TRANSPORT_ATTR_RO(device); | 893 | DEV_STAT_SCSI_TRANSPORT_ATTR_RO(device); |
@@ -965,20 +896,16 @@ static ssize_t target_stat_scsi_transport_show_attr_indx( | |||
965 | struct se_port_stat_grps *pgrps, char *page) | 896 | struct se_port_stat_grps *pgrps, char *page) |
966 | { | 897 | { |
967 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | 898 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); |
968 | struct se_port *sep; | 899 | struct se_device *dev; |
969 | struct se_portal_group *tpg; | 900 | struct se_portal_group *tpg = lun->lun_tpg; |
970 | ssize_t ret; | 901 | ssize_t ret = -ENODEV; |
971 | 902 | ||
972 | spin_lock(&lun->lun_sep_lock); | 903 | rcu_read_lock(); |
973 | sep = lun->lun_sep; | 904 | dev = rcu_dereference(lun->lun_se_dev); |
974 | if (!sep) { | 905 | if (dev) |
975 | spin_unlock(&lun->lun_sep_lock); | 906 | ret = snprintf(page, PAGE_SIZE, "%u\n", |
976 | return -ENODEV; | 907 | tpg->se_tpg_tfo->tpg_get_inst_index(tpg)); |
977 | } | 908 | rcu_read_unlock(); |
978 | tpg = sep->sep_tpg; | ||
979 | ret = snprintf(page, PAGE_SIZE, "%u\n", | ||
980 | tpg->se_tpg_tfo->tpg_get_inst_index(tpg)); | ||
981 | spin_unlock(&lun->lun_sep_lock); | ||
982 | return ret; | 909 | return ret; |
983 | } | 910 | } |
984 | DEV_STAT_SCSI_TRANSPORT_ATTR_RO(indx); | 911 | DEV_STAT_SCSI_TRANSPORT_ATTR_RO(indx); |
@@ -987,26 +914,22 @@ static ssize_t target_stat_scsi_transport_show_attr_dev_name( | |||
987 | struct se_port_stat_grps *pgrps, char *page) | 914 | struct se_port_stat_grps *pgrps, char *page) |
988 | { | 915 | { |
989 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | 916 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); |
990 | struct se_device *dev = lun->lun_se_dev; | 917 | struct se_device *dev; |
991 | struct se_port *sep; | 918 | struct se_portal_group *tpg = lun->lun_tpg; |
992 | struct se_portal_group *tpg; | ||
993 | struct t10_wwn *wwn; | 919 | struct t10_wwn *wwn; |
994 | ssize_t ret; | 920 | ssize_t ret = -ENODEV; |
995 | 921 | ||
996 | spin_lock(&lun->lun_sep_lock); | 922 | rcu_read_lock(); |
997 | sep = lun->lun_sep; | 923 | dev = rcu_dereference(lun->lun_se_dev); |
998 | if (!sep) { | 924 | if (dev) { |
999 | spin_unlock(&lun->lun_sep_lock); | 925 | wwn = &dev->t10_wwn; |
1000 | return -ENODEV; | 926 | /* scsiTransportDevName */ |
927 | ret = snprintf(page, PAGE_SIZE, "%s+%s\n", | ||
928 | tpg->se_tpg_tfo->tpg_get_wwn(tpg), | ||
929 | (strlen(wwn->unit_serial)) ? wwn->unit_serial : | ||
930 | wwn->vendor); | ||
1001 | } | 931 | } |
1002 | tpg = sep->sep_tpg; | 932 | rcu_read_unlock(); |
1003 | wwn = &dev->t10_wwn; | ||
1004 | /* scsiTransportDevName */ | ||
1005 | ret = snprintf(page, PAGE_SIZE, "%s+%s\n", | ||
1006 | tpg->se_tpg_tfo->tpg_get_wwn(tpg), | ||
1007 | (strlen(wwn->unit_serial)) ? wwn->unit_serial : | ||
1008 | wwn->vendor); | ||
1009 | spin_unlock(&lun->lun_sep_lock); | ||
1010 | return ret; | 933 | return ret; |
1011 | } | 934 | } |
1012 | DEV_STAT_SCSI_TRANSPORT_ATTR_RO(dev_name); | 935 | DEV_STAT_SCSI_TRANSPORT_ATTR_RO(dev_name); |
@@ -1082,17 +1005,17 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_inst( | |||
1082 | struct se_portal_group *tpg; | 1005 | struct se_portal_group *tpg; |
1083 | ssize_t ret; | 1006 | ssize_t ret; |
1084 | 1007 | ||
1085 | spin_lock_irq(&nacl->device_list_lock); | 1008 | rcu_read_lock(); |
1086 | deve = nacl->device_list[lacl->mapped_lun]; | 1009 | deve = target_nacl_find_deve(nacl, lacl->mapped_lun); |
1087 | if (!deve->se_lun || !deve->se_lun_acl) { | 1010 | if (!deve) { |
1088 | spin_unlock_irq(&nacl->device_list_lock); | 1011 | rcu_read_unlock(); |
1089 | return -ENODEV; | 1012 | return -ENODEV; |
1090 | } | 1013 | } |
1091 | tpg = nacl->se_tpg; | 1014 | tpg = nacl->se_tpg; |
1092 | /* scsiInstIndex */ | 1015 | /* scsiInstIndex */ |
1093 | ret = snprintf(page, PAGE_SIZE, "%u\n", | 1016 | ret = snprintf(page, PAGE_SIZE, "%u\n", |
1094 | tpg->se_tpg_tfo->tpg_get_inst_index(tpg)); | 1017 | tpg->se_tpg_tfo->tpg_get_inst_index(tpg)); |
1095 | spin_unlock_irq(&nacl->device_list_lock); | 1018 | rcu_read_unlock(); |
1096 | return ret; | 1019 | return ret; |
1097 | } | 1020 | } |
1098 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(inst); | 1021 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(inst); |
@@ -1107,16 +1030,16 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_dev( | |||
1107 | struct se_lun *lun; | 1030 | struct se_lun *lun; |
1108 | ssize_t ret; | 1031 | ssize_t ret; |
1109 | 1032 | ||
1110 | spin_lock_irq(&nacl->device_list_lock); | 1033 | rcu_read_lock(); |
1111 | deve = nacl->device_list[lacl->mapped_lun]; | 1034 | deve = target_nacl_find_deve(nacl, lacl->mapped_lun); |
1112 | if (!deve->se_lun || !deve->se_lun_acl) { | 1035 | if (!deve) { |
1113 | spin_unlock_irq(&nacl->device_list_lock); | 1036 | rcu_read_unlock(); |
1114 | return -ENODEV; | 1037 | return -ENODEV; |
1115 | } | 1038 | } |
1116 | lun = deve->se_lun; | 1039 | lun = rcu_dereference(deve->se_lun); |
1117 | /* scsiDeviceIndex */ | 1040 | /* scsiDeviceIndex */ |
1118 | ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_se_dev->dev_index); | 1041 | ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_index); |
1119 | spin_unlock_irq(&nacl->device_list_lock); | 1042 | rcu_read_unlock(); |
1120 | return ret; | 1043 | return ret; |
1121 | } | 1044 | } |
1122 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(dev); | 1045 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(dev); |
@@ -1131,16 +1054,16 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_port( | |||
1131 | struct se_portal_group *tpg; | 1054 | struct se_portal_group *tpg; |
1132 | ssize_t ret; | 1055 | ssize_t ret; |
1133 | 1056 | ||
1134 | spin_lock_irq(&nacl->device_list_lock); | 1057 | rcu_read_lock(); |
1135 | deve = nacl->device_list[lacl->mapped_lun]; | 1058 | deve = target_nacl_find_deve(nacl, lacl->mapped_lun); |
1136 | if (!deve->se_lun || !deve->se_lun_acl) { | 1059 | if (!deve) { |
1137 | spin_unlock_irq(&nacl->device_list_lock); | 1060 | rcu_read_unlock(); |
1138 | return -ENODEV; | 1061 | return -ENODEV; |
1139 | } | 1062 | } |
1140 | tpg = nacl->se_tpg; | 1063 | tpg = nacl->se_tpg; |
1141 | /* scsiAuthIntrTgtPortIndex */ | 1064 | /* scsiAuthIntrTgtPortIndex */ |
1142 | ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg)); | 1065 | ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
1143 | spin_unlock_irq(&nacl->device_list_lock); | 1066 | rcu_read_unlock(); |
1144 | return ret; | 1067 | return ret; |
1145 | } | 1068 | } |
1146 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(port); | 1069 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(port); |
@@ -1154,15 +1077,15 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_indx( | |||
1154 | struct se_dev_entry *deve; | 1077 | struct se_dev_entry *deve; |
1155 | ssize_t ret; | 1078 | ssize_t ret; |
1156 | 1079 | ||
1157 | spin_lock_irq(&nacl->device_list_lock); | 1080 | rcu_read_lock(); |
1158 | deve = nacl->device_list[lacl->mapped_lun]; | 1081 | deve = target_nacl_find_deve(nacl, lacl->mapped_lun); |
1159 | if (!deve->se_lun || !deve->se_lun_acl) { | 1082 | if (!deve) { |
1160 | spin_unlock_irq(&nacl->device_list_lock); | 1083 | rcu_read_unlock(); |
1161 | return -ENODEV; | 1084 | return -ENODEV; |
1162 | } | 1085 | } |
1163 | /* scsiAuthIntrIndex */ | 1086 | /* scsiAuthIntrIndex */ |
1164 | ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index); | 1087 | ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index); |
1165 | spin_unlock_irq(&nacl->device_list_lock); | 1088 | rcu_read_unlock(); |
1166 | return ret; | 1089 | return ret; |
1167 | } | 1090 | } |
1168 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(indx); | 1091 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(indx); |
@@ -1176,15 +1099,15 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_dev_or_port( | |||
1176 | struct se_dev_entry *deve; | 1099 | struct se_dev_entry *deve; |
1177 | ssize_t ret; | 1100 | ssize_t ret; |
1178 | 1101 | ||
1179 | spin_lock_irq(&nacl->device_list_lock); | 1102 | rcu_read_lock(); |
1180 | deve = nacl->device_list[lacl->mapped_lun]; | 1103 | deve = target_nacl_find_deve(nacl, lacl->mapped_lun); |
1181 | if (!deve->se_lun || !deve->se_lun_acl) { | 1104 | if (!deve) { |
1182 | spin_unlock_irq(&nacl->device_list_lock); | 1105 | rcu_read_unlock(); |
1183 | return -ENODEV; | 1106 | return -ENODEV; |
1184 | } | 1107 | } |
1185 | /* scsiAuthIntrDevOrPort */ | 1108 | /* scsiAuthIntrDevOrPort */ |
1186 | ret = snprintf(page, PAGE_SIZE, "%u\n", 1); | 1109 | ret = snprintf(page, PAGE_SIZE, "%u\n", 1); |
1187 | spin_unlock_irq(&nacl->device_list_lock); | 1110 | rcu_read_unlock(); |
1188 | return ret; | 1111 | return ret; |
1189 | } | 1112 | } |
1190 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(dev_or_port); | 1113 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(dev_or_port); |
@@ -1198,15 +1121,15 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_intr_name( | |||
1198 | struct se_dev_entry *deve; | 1121 | struct se_dev_entry *deve; |
1199 | ssize_t ret; | 1122 | ssize_t ret; |
1200 | 1123 | ||
1201 | spin_lock_irq(&nacl->device_list_lock); | 1124 | rcu_read_lock(); |
1202 | deve = nacl->device_list[lacl->mapped_lun]; | 1125 | deve = target_nacl_find_deve(nacl, lacl->mapped_lun); |
1203 | if (!deve->se_lun || !deve->se_lun_acl) { | 1126 | if (!deve) { |
1204 | spin_unlock_irq(&nacl->device_list_lock); | 1127 | rcu_read_unlock(); |
1205 | return -ENODEV; | 1128 | return -ENODEV; |
1206 | } | 1129 | } |
1207 | /* scsiAuthIntrName */ | 1130 | /* scsiAuthIntrName */ |
1208 | ret = snprintf(page, PAGE_SIZE, "%s\n", nacl->initiatorname); | 1131 | ret = snprintf(page, PAGE_SIZE, "%s\n", nacl->initiatorname); |
1209 | spin_unlock_irq(&nacl->device_list_lock); | 1132 | rcu_read_unlock(); |
1210 | return ret; | 1133 | return ret; |
1211 | } | 1134 | } |
1212 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(intr_name); | 1135 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(intr_name); |
@@ -1220,15 +1143,15 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_map_indx( | |||
1220 | struct se_dev_entry *deve; | 1143 | struct se_dev_entry *deve; |
1221 | ssize_t ret; | 1144 | ssize_t ret; |
1222 | 1145 | ||
1223 | spin_lock_irq(&nacl->device_list_lock); | 1146 | rcu_read_lock(); |
1224 | deve = nacl->device_list[lacl->mapped_lun]; | 1147 | deve = target_nacl_find_deve(nacl, lacl->mapped_lun); |
1225 | if (!deve->se_lun || !deve->se_lun_acl) { | 1148 | if (!deve) { |
1226 | spin_unlock_irq(&nacl->device_list_lock); | 1149 | rcu_read_unlock(); |
1227 | return -ENODEV; | 1150 | return -ENODEV; |
1228 | } | 1151 | } |
1229 | /* FIXME: scsiAuthIntrLunMapIndex */ | 1152 | /* FIXME: scsiAuthIntrLunMapIndex */ |
1230 | ret = snprintf(page, PAGE_SIZE, "%u\n", 0); | 1153 | ret = snprintf(page, PAGE_SIZE, "%u\n", 0); |
1231 | spin_unlock_irq(&nacl->device_list_lock); | 1154 | rcu_read_unlock(); |
1232 | return ret; | 1155 | return ret; |
1233 | } | 1156 | } |
1234 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(map_indx); | 1157 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(map_indx); |
@@ -1242,15 +1165,15 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_att_count( | |||
1242 | struct se_dev_entry *deve; | 1165 | struct se_dev_entry *deve; |
1243 | ssize_t ret; | 1166 | ssize_t ret; |
1244 | 1167 | ||
1245 | spin_lock_irq(&nacl->device_list_lock); | 1168 | rcu_read_lock(); |
1246 | deve = nacl->device_list[lacl->mapped_lun]; | 1169 | deve = target_nacl_find_deve(nacl, lacl->mapped_lun); |
1247 | if (!deve->se_lun || !deve->se_lun_acl) { | 1170 | if (!deve) { |
1248 | spin_unlock_irq(&nacl->device_list_lock); | 1171 | rcu_read_unlock(); |
1249 | return -ENODEV; | 1172 | return -ENODEV; |
1250 | } | 1173 | } |
1251 | /* scsiAuthIntrAttachedTimes */ | 1174 | /* scsiAuthIntrAttachedTimes */ |
1252 | ret = snprintf(page, PAGE_SIZE, "%u\n", deve->attach_count); | 1175 | ret = snprintf(page, PAGE_SIZE, "%u\n", deve->attach_count); |
1253 | spin_unlock_irq(&nacl->device_list_lock); | 1176 | rcu_read_unlock(); |
1254 | return ret; | 1177 | return ret; |
1255 | } | 1178 | } |
1256 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(att_count); | 1179 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(att_count); |
@@ -1264,15 +1187,16 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_num_cmds( | |||
1264 | struct se_dev_entry *deve; | 1187 | struct se_dev_entry *deve; |
1265 | ssize_t ret; | 1188 | ssize_t ret; |
1266 | 1189 | ||
1267 | spin_lock_irq(&nacl->device_list_lock); | 1190 | rcu_read_lock(); |
1268 | deve = nacl->device_list[lacl->mapped_lun]; | 1191 | deve = target_nacl_find_deve(nacl, lacl->mapped_lun); |
1269 | if (!deve->se_lun || !deve->se_lun_acl) { | 1192 | if (!deve) { |
1270 | spin_unlock_irq(&nacl->device_list_lock); | 1193 | rcu_read_unlock(); |
1271 | return -ENODEV; | 1194 | return -ENODEV; |
1272 | } | 1195 | } |
1273 | /* scsiAuthIntrOutCommands */ | 1196 | /* scsiAuthIntrOutCommands */ |
1274 | ret = snprintf(page, PAGE_SIZE, "%u\n", deve->total_cmds); | 1197 | ret = snprintf(page, PAGE_SIZE, "%lu\n", |
1275 | spin_unlock_irq(&nacl->device_list_lock); | 1198 | atomic_long_read(&deve->total_cmds)); |
1199 | rcu_read_unlock(); | ||
1276 | return ret; | 1200 | return ret; |
1277 | } | 1201 | } |
1278 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(num_cmds); | 1202 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(num_cmds); |
@@ -1286,15 +1210,16 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_read_mbytes( | |||
1286 | struct se_dev_entry *deve; | 1210 | struct se_dev_entry *deve; |
1287 | ssize_t ret; | 1211 | ssize_t ret; |
1288 | 1212 | ||
1289 | spin_lock_irq(&nacl->device_list_lock); | 1213 | rcu_read_lock(); |
1290 | deve = nacl->device_list[lacl->mapped_lun]; | 1214 | deve = target_nacl_find_deve(nacl, lacl->mapped_lun); |
1291 | if (!deve->se_lun || !deve->se_lun_acl) { | 1215 | if (!deve) { |
1292 | spin_unlock_irq(&nacl->device_list_lock); | 1216 | rcu_read_unlock(); |
1293 | return -ENODEV; | 1217 | return -ENODEV; |
1294 | } | 1218 | } |
1295 | /* scsiAuthIntrReadMegaBytes */ | 1219 | /* scsiAuthIntrReadMegaBytes */ |
1296 | ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(deve->read_bytes >> 20)); | 1220 | ret = snprintf(page, PAGE_SIZE, "%u\n", |
1297 | spin_unlock_irq(&nacl->device_list_lock); | 1221 | (u32)(atomic_long_read(&deve->read_bytes) >> 20)); |
1222 | rcu_read_unlock(); | ||
1298 | return ret; | 1223 | return ret; |
1299 | } | 1224 | } |
1300 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(read_mbytes); | 1225 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(read_mbytes); |
@@ -1308,15 +1233,16 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_write_mbytes( | |||
1308 | struct se_dev_entry *deve; | 1233 | struct se_dev_entry *deve; |
1309 | ssize_t ret; | 1234 | ssize_t ret; |
1310 | 1235 | ||
1311 | spin_lock_irq(&nacl->device_list_lock); | 1236 | rcu_read_lock(); |
1312 | deve = nacl->device_list[lacl->mapped_lun]; | 1237 | deve = target_nacl_find_deve(nacl, lacl->mapped_lun); |
1313 | if (!deve->se_lun || !deve->se_lun_acl) { | 1238 | if (!deve) { |
1314 | spin_unlock_irq(&nacl->device_list_lock); | 1239 | rcu_read_unlock(); |
1315 | return -ENODEV; | 1240 | return -ENODEV; |
1316 | } | 1241 | } |
1317 | /* scsiAuthIntrWrittenMegaBytes */ | 1242 | /* scsiAuthIntrWrittenMegaBytes */ |
1318 | ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(deve->write_bytes >> 20)); | 1243 | ret = snprintf(page, PAGE_SIZE, "%u\n", |
1319 | spin_unlock_irq(&nacl->device_list_lock); | 1244 | (u32)(atomic_long_read(&deve->write_bytes) >> 20)); |
1245 | rcu_read_unlock(); | ||
1320 | return ret; | 1246 | return ret; |
1321 | } | 1247 | } |
1322 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(write_mbytes); | 1248 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(write_mbytes); |
@@ -1330,15 +1256,15 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_hs_num_cmds( | |||
1330 | struct se_dev_entry *deve; | 1256 | struct se_dev_entry *deve; |
1331 | ssize_t ret; | 1257 | ssize_t ret; |
1332 | 1258 | ||
1333 | spin_lock_irq(&nacl->device_list_lock); | 1259 | rcu_read_lock(); |
1334 | deve = nacl->device_list[lacl->mapped_lun]; | 1260 | deve = target_nacl_find_deve(nacl, lacl->mapped_lun); |
1335 | if (!deve->se_lun || !deve->se_lun_acl) { | 1261 | if (!deve) { |
1336 | spin_unlock_irq(&nacl->device_list_lock); | 1262 | rcu_read_unlock(); |
1337 | return -ENODEV; | 1263 | return -ENODEV; |
1338 | } | 1264 | } |
1339 | /* FIXME: scsiAuthIntrHSOutCommands */ | 1265 | /* FIXME: scsiAuthIntrHSOutCommands */ |
1340 | ret = snprintf(page, PAGE_SIZE, "%u\n", 0); | 1266 | ret = snprintf(page, PAGE_SIZE, "%u\n", 0); |
1341 | spin_unlock_irq(&nacl->device_list_lock); | 1267 | rcu_read_unlock(); |
1342 | return ret; | 1268 | return ret; |
1343 | } | 1269 | } |
1344 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(hs_num_cmds); | 1270 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(hs_num_cmds); |
@@ -1352,16 +1278,16 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_creation_time( | |||
1352 | struct se_dev_entry *deve; | 1278 | struct se_dev_entry *deve; |
1353 | ssize_t ret; | 1279 | ssize_t ret; |
1354 | 1280 | ||
1355 | spin_lock_irq(&nacl->device_list_lock); | 1281 | rcu_read_lock(); |
1356 | deve = nacl->device_list[lacl->mapped_lun]; | 1282 | deve = target_nacl_find_deve(nacl, lacl->mapped_lun); |
1357 | if (!deve->se_lun || !deve->se_lun_acl) { | 1283 | if (!deve) { |
1358 | spin_unlock_irq(&nacl->device_list_lock); | 1284 | rcu_read_unlock(); |
1359 | return -ENODEV; | 1285 | return -ENODEV; |
1360 | } | 1286 | } |
1361 | /* scsiAuthIntrLastCreation */ | 1287 | /* scsiAuthIntrLastCreation */ |
1362 | ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)deve->creation_time - | 1288 | ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)deve->creation_time - |
1363 | INITIAL_JIFFIES) * 100 / HZ)); | 1289 | INITIAL_JIFFIES) * 100 / HZ)); |
1364 | spin_unlock_irq(&nacl->device_list_lock); | 1290 | rcu_read_unlock(); |
1365 | return ret; | 1291 | return ret; |
1366 | } | 1292 | } |
1367 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(creation_time); | 1293 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(creation_time); |
@@ -1375,15 +1301,15 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_row_status( | |||
1375 | struct se_dev_entry *deve; | 1301 | struct se_dev_entry *deve; |
1376 | ssize_t ret; | 1302 | ssize_t ret; |
1377 | 1303 | ||
1378 | spin_lock_irq(&nacl->device_list_lock); | 1304 | rcu_read_lock(); |
1379 | deve = nacl->device_list[lacl->mapped_lun]; | 1305 | deve = target_nacl_find_deve(nacl, lacl->mapped_lun); |
1380 | if (!deve->se_lun || !deve->se_lun_acl) { | 1306 | if (!deve) { |
1381 | spin_unlock_irq(&nacl->device_list_lock); | 1307 | rcu_read_unlock(); |
1382 | return -ENODEV; | 1308 | return -ENODEV; |
1383 | } | 1309 | } |
1384 | /* FIXME: scsiAuthIntrRowStatus */ | 1310 | /* FIXME: scsiAuthIntrRowStatus */ |
1385 | ret = snprintf(page, PAGE_SIZE, "Ready\n"); | 1311 | ret = snprintf(page, PAGE_SIZE, "Ready\n"); |
1386 | spin_unlock_irq(&nacl->device_list_lock); | 1312 | rcu_read_unlock(); |
1387 | return ret; | 1313 | return ret; |
1388 | } | 1314 | } |
1389 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(row_status); | 1315 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(row_status); |
@@ -1448,17 +1374,17 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_inst( | |||
1448 | struct se_portal_group *tpg; | 1374 | struct se_portal_group *tpg; |
1449 | ssize_t ret; | 1375 | ssize_t ret; |
1450 | 1376 | ||
1451 | spin_lock_irq(&nacl->device_list_lock); | 1377 | rcu_read_lock(); |
1452 | deve = nacl->device_list[lacl->mapped_lun]; | 1378 | deve = target_nacl_find_deve(nacl, lacl->mapped_lun); |
1453 | if (!deve->se_lun || !deve->se_lun_acl) { | 1379 | if (!deve) { |
1454 | spin_unlock_irq(&nacl->device_list_lock); | 1380 | rcu_read_unlock(); |
1455 | return -ENODEV; | 1381 | return -ENODEV; |
1456 | } | 1382 | } |
1457 | tpg = nacl->se_tpg; | 1383 | tpg = nacl->se_tpg; |
1458 | /* scsiInstIndex */ | 1384 | /* scsiInstIndex */ |
1459 | ret = snprintf(page, PAGE_SIZE, "%u\n", | 1385 | ret = snprintf(page, PAGE_SIZE, "%u\n", |
1460 | tpg->se_tpg_tfo->tpg_get_inst_index(tpg)); | 1386 | tpg->se_tpg_tfo->tpg_get_inst_index(tpg)); |
1461 | spin_unlock_irq(&nacl->device_list_lock); | 1387 | rcu_read_unlock(); |
1462 | return ret; | 1388 | return ret; |
1463 | } | 1389 | } |
1464 | DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(inst); | 1390 | DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(inst); |
@@ -1473,16 +1399,16 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_dev( | |||
1473 | struct se_lun *lun; | 1399 | struct se_lun *lun; |
1474 | ssize_t ret; | 1400 | ssize_t ret; |
1475 | 1401 | ||
1476 | spin_lock_irq(&nacl->device_list_lock); | 1402 | rcu_read_lock(); |
1477 | deve = nacl->device_list[lacl->mapped_lun]; | 1403 | deve = target_nacl_find_deve(nacl, lacl->mapped_lun); |
1478 | if (!deve->se_lun || !deve->se_lun_acl) { | 1404 | if (!deve) { |
1479 | spin_unlock_irq(&nacl->device_list_lock); | 1405 | rcu_read_unlock(); |
1480 | return -ENODEV; | 1406 | return -ENODEV; |
1481 | } | 1407 | } |
1482 | lun = deve->se_lun; | 1408 | lun = rcu_dereference(deve->se_lun); |
1483 | /* scsiDeviceIndex */ | 1409 | /* scsiDeviceIndex */ |
1484 | ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_se_dev->dev_index); | 1410 | ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_index); |
1485 | spin_unlock_irq(&nacl->device_list_lock); | 1411 | rcu_read_unlock(); |
1486 | return ret; | 1412 | return ret; |
1487 | } | 1413 | } |
1488 | DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(dev); | 1414 | DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(dev); |
@@ -1497,16 +1423,16 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_port( | |||
1497 | struct se_portal_group *tpg; | 1423 | struct se_portal_group *tpg; |
1498 | ssize_t ret; | 1424 | ssize_t ret; |
1499 | 1425 | ||
1500 | spin_lock_irq(&nacl->device_list_lock); | 1426 | rcu_read_lock(); |
1501 | deve = nacl->device_list[lacl->mapped_lun]; | 1427 | deve = target_nacl_find_deve(nacl, lacl->mapped_lun); |
1502 | if (!deve->se_lun || !deve->se_lun_acl) { | 1428 | if (!deve) { |
1503 | spin_unlock_irq(&nacl->device_list_lock); | 1429 | rcu_read_unlock(); |
1504 | return -ENODEV; | 1430 | return -ENODEV; |
1505 | } | 1431 | } |
1506 | tpg = nacl->se_tpg; | 1432 | tpg = nacl->se_tpg; |
1507 | /* scsiPortIndex */ | 1433 | /* scsiPortIndex */ |
1508 | ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg)); | 1434 | ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
1509 | spin_unlock_irq(&nacl->device_list_lock); | 1435 | rcu_read_unlock(); |
1510 | return ret; | 1436 | return ret; |
1511 | } | 1437 | } |
1512 | DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(port); | 1438 | DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(port); |
@@ -1546,15 +1472,15 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_port_auth_indx( | |||
1546 | struct se_dev_entry *deve; | 1472 | struct se_dev_entry *deve; |
1547 | ssize_t ret; | 1473 | ssize_t ret; |
1548 | 1474 | ||
1549 | spin_lock_irq(&nacl->device_list_lock); | 1475 | rcu_read_lock(); |
1550 | deve = nacl->device_list[lacl->mapped_lun]; | 1476 | deve = target_nacl_find_deve(nacl, lacl->mapped_lun); |
1551 | if (!deve->se_lun || !deve->se_lun_acl) { | 1477 | if (!deve) { |
1552 | spin_unlock_irq(&nacl->device_list_lock); | 1478 | rcu_read_unlock(); |
1553 | return -ENODEV; | 1479 | return -ENODEV; |
1554 | } | 1480 | } |
1555 | /* scsiAttIntrPortAuthIntrIdx */ | 1481 | /* scsiAttIntrPortAuthIntrIdx */ |
1556 | ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index); | 1482 | ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index); |
1557 | spin_unlock_irq(&nacl->device_list_lock); | 1483 | rcu_read_unlock(); |
1558 | return ret; | 1484 | return ret; |
1559 | } | 1485 | } |
1560 | DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(port_auth_indx); | 1486 | DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(port_auth_indx); |
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index a5bb0c46e57e..5b2820312310 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c | |||
@@ -31,7 +31,6 @@ | |||
31 | #include <target/target_core_base.h> | 31 | #include <target/target_core_base.h> |
32 | #include <target/target_core_backend.h> | 32 | #include <target/target_core_backend.h> |
33 | #include <target/target_core_fabric.h> | 33 | #include <target/target_core_fabric.h> |
34 | #include <target/target_core_configfs.h> | ||
35 | 34 | ||
36 | #include "target_core_internal.h" | 35 | #include "target_core_internal.h" |
37 | #include "target_core_alua.h" | 36 | #include "target_core_alua.h" |
@@ -115,7 +114,7 @@ void core_tmr_abort_task( | |||
115 | { | 114 | { |
116 | struct se_cmd *se_cmd; | 115 | struct se_cmd *se_cmd; |
117 | unsigned long flags; | 116 | unsigned long flags; |
118 | int ref_tag; | 117 | u64 ref_tag; |
119 | 118 | ||
120 | spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); | 119 | spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); |
121 | list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) { | 120 | list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) { |
@@ -127,16 +126,17 @@ void core_tmr_abort_task( | |||
127 | if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) | 126 | if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) |
128 | continue; | 127 | continue; |
129 | 128 | ||
130 | ref_tag = se_cmd->se_tfo->get_task_tag(se_cmd); | 129 | ref_tag = se_cmd->tag; |
131 | if (tmr->ref_task_tag != ref_tag) | 130 | if (tmr->ref_task_tag != ref_tag) |
132 | continue; | 131 | continue; |
133 | 132 | ||
134 | printk("ABORT_TASK: Found referenced %s task_tag: %u\n", | 133 | printk("ABORT_TASK: Found referenced %s task_tag: %llu\n", |
135 | se_cmd->se_tfo->get_fabric_name(), ref_tag); | 134 | se_cmd->se_tfo->get_fabric_name(), ref_tag); |
136 | 135 | ||
137 | spin_lock(&se_cmd->t_state_lock); | 136 | spin_lock(&se_cmd->t_state_lock); |
138 | if (se_cmd->transport_state & CMD_T_COMPLETE) { | 137 | if (se_cmd->transport_state & CMD_T_COMPLETE) { |
139 | printk("ABORT_TASK: ref_tag: %u already complete, skipping\n", ref_tag); | 138 | printk("ABORT_TASK: ref_tag: %llu already complete," |
139 | " skipping\n", ref_tag); | ||
140 | spin_unlock(&se_cmd->t_state_lock); | 140 | spin_unlock(&se_cmd->t_state_lock); |
141 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | 141 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); |
142 | goto out; | 142 | goto out; |
@@ -151,18 +151,18 @@ void core_tmr_abort_task( | |||
151 | cancel_work_sync(&se_cmd->work); | 151 | cancel_work_sync(&se_cmd->work); |
152 | transport_wait_for_tasks(se_cmd); | 152 | transport_wait_for_tasks(se_cmd); |
153 | 153 | ||
154 | target_put_sess_cmd(se_sess, se_cmd); | 154 | target_put_sess_cmd(se_cmd); |
155 | transport_cmd_finish_abort(se_cmd, true); | 155 | transport_cmd_finish_abort(se_cmd, true); |
156 | 156 | ||
157 | printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" | 157 | printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" |
158 | " ref_tag: %d\n", ref_tag); | 158 | " ref_tag: %llu\n", ref_tag); |
159 | tmr->response = TMR_FUNCTION_COMPLETE; | 159 | tmr->response = TMR_FUNCTION_COMPLETE; |
160 | return; | 160 | return; |
161 | } | 161 | } |
162 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | 162 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); |
163 | 163 | ||
164 | out: | 164 | out: |
165 | printk("ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST for ref_tag: %d\n", | 165 | printk("ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST for ref_tag: %lld\n", |
166 | tmr->ref_task_tag); | 166 | tmr->ref_task_tag); |
167 | tmr->response = TMR_TASK_DOES_NOT_EXIST; | 167 | tmr->response = TMR_TASK_DOES_NOT_EXIST; |
168 | } | 168 | } |
@@ -287,16 +287,16 @@ static void core_tmr_drain_state_list( | |||
287 | list_del(&cmd->state_list); | 287 | list_del(&cmd->state_list); |
288 | 288 | ||
289 | pr_debug("LUN_RESET: %s cmd: %p" | 289 | pr_debug("LUN_RESET: %s cmd: %p" |
290 | " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state: %d" | 290 | " ITT/CmdSN: 0x%08llx/0x%08x, i_state: %d, t_state: %d" |
291 | "cdb: 0x%02x\n", | 291 | "cdb: 0x%02x\n", |
292 | (preempt_and_abort_list) ? "Preempt" : "", cmd, | 292 | (preempt_and_abort_list) ? "Preempt" : "", cmd, |
293 | cmd->se_tfo->get_task_tag(cmd), 0, | 293 | cmd->tag, 0, |
294 | cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, | 294 | cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, |
295 | cmd->t_task_cdb[0]); | 295 | cmd->t_task_cdb[0]); |
296 | pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx" | 296 | pr_debug("LUN_RESET: ITT[0x%08llx] - pr_res_key: 0x%016Lx" |
297 | " -- CMD_T_ACTIVE: %d" | 297 | " -- CMD_T_ACTIVE: %d" |
298 | " CMD_T_STOP: %d CMD_T_SENT: %d\n", | 298 | " CMD_T_STOP: %d CMD_T_SENT: %d\n", |
299 | cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key, | 299 | cmd->tag, cmd->pr_res_key, |
300 | (cmd->transport_state & CMD_T_ACTIVE) != 0, | 300 | (cmd->transport_state & CMD_T_ACTIVE) != 0, |
301 | (cmd->transport_state & CMD_T_STOP) != 0, | 301 | (cmd->transport_state & CMD_T_STOP) != 0, |
302 | (cmd->transport_state & CMD_T_SENT) != 0); | 302 | (cmd->transport_state & CMD_T_SENT) != 0); |
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 84de757bd458..babde4ad841f 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <target/target_core_fabric.h> | 39 | #include <target/target_core_fabric.h> |
40 | 40 | ||
41 | #include "target_core_internal.h" | 41 | #include "target_core_internal.h" |
42 | #include "target_core_alua.h" | ||
42 | #include "target_core_pr.h" | 43 | #include "target_core_pr.h" |
43 | 44 | ||
44 | extern struct se_device *g_lun0_dev; | 45 | extern struct se_device *g_lun0_dev; |
@@ -46,45 +47,9 @@ extern struct se_device *g_lun0_dev; | |||
46 | static DEFINE_SPINLOCK(tpg_lock); | 47 | static DEFINE_SPINLOCK(tpg_lock); |
47 | static LIST_HEAD(tpg_list); | 48 | static LIST_HEAD(tpg_list); |
48 | 49 | ||
49 | /* core_clear_initiator_node_from_tpg(): | ||
50 | * | ||
51 | * | ||
52 | */ | ||
53 | static void core_clear_initiator_node_from_tpg( | ||
54 | struct se_node_acl *nacl, | ||
55 | struct se_portal_group *tpg) | ||
56 | { | ||
57 | int i; | ||
58 | struct se_dev_entry *deve; | ||
59 | struct se_lun *lun; | ||
60 | |||
61 | spin_lock_irq(&nacl->device_list_lock); | ||
62 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | ||
63 | deve = nacl->device_list[i]; | ||
64 | |||
65 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) | ||
66 | continue; | ||
67 | |||
68 | if (!deve->se_lun) { | ||
69 | pr_err("%s device entries device pointer is" | ||
70 | " NULL, but Initiator has access.\n", | ||
71 | tpg->se_tpg_tfo->get_fabric_name()); | ||
72 | continue; | ||
73 | } | ||
74 | |||
75 | lun = deve->se_lun; | ||
76 | spin_unlock_irq(&nacl->device_list_lock); | ||
77 | core_disable_device_list_for_node(lun, NULL, deve->mapped_lun, | ||
78 | TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg); | ||
79 | |||
80 | spin_lock_irq(&nacl->device_list_lock); | ||
81 | } | ||
82 | spin_unlock_irq(&nacl->device_list_lock); | ||
83 | } | ||
84 | |||
85 | /* __core_tpg_get_initiator_node_acl(): | 50 | /* __core_tpg_get_initiator_node_acl(): |
86 | * | 51 | * |
87 | * spin_lock_bh(&tpg->acl_node_lock); must be held when calling | 52 | * mutex_lock(&tpg->acl_node_mutex); must be held when calling |
88 | */ | 53 | */ |
89 | struct se_node_acl *__core_tpg_get_initiator_node_acl( | 54 | struct se_node_acl *__core_tpg_get_initiator_node_acl( |
90 | struct se_portal_group *tpg, | 55 | struct se_portal_group *tpg, |
@@ -110,9 +75,9 @@ struct se_node_acl *core_tpg_get_initiator_node_acl( | |||
110 | { | 75 | { |
111 | struct se_node_acl *acl; | 76 | struct se_node_acl *acl; |
112 | 77 | ||
113 | spin_lock_irq(&tpg->acl_node_lock); | 78 | mutex_lock(&tpg->acl_node_mutex); |
114 | acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); | 79 | acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); |
115 | spin_unlock_irq(&tpg->acl_node_lock); | 80 | mutex_unlock(&tpg->acl_node_mutex); |
116 | 81 | ||
117 | return acl; | 82 | return acl; |
118 | } | 83 | } |
@@ -124,22 +89,20 @@ EXPORT_SYMBOL(core_tpg_get_initiator_node_acl); | |||
124 | */ | 89 | */ |
125 | void core_tpg_add_node_to_devs( | 90 | void core_tpg_add_node_to_devs( |
126 | struct se_node_acl *acl, | 91 | struct se_node_acl *acl, |
127 | struct se_portal_group *tpg) | 92 | struct se_portal_group *tpg, |
93 | struct se_lun *lun_orig) | ||
128 | { | 94 | { |
129 | int i = 0; | ||
130 | u32 lun_access = 0; | 95 | u32 lun_access = 0; |
131 | struct se_lun *lun; | 96 | struct se_lun *lun; |
132 | struct se_device *dev; | 97 | struct se_device *dev; |
133 | 98 | ||
134 | spin_lock(&tpg->tpg_lun_lock); | 99 | mutex_lock(&tpg->tpg_lun_mutex); |
135 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | 100 | hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) { |
136 | lun = tpg->tpg_lun_list[i]; | 101 | if (lun_orig && lun != lun_orig) |
137 | if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) | ||
138 | continue; | 102 | continue; |
139 | 103 | ||
140 | spin_unlock(&tpg->tpg_lun_lock); | 104 | dev = rcu_dereference_check(lun->lun_se_dev, |
141 | 105 | lockdep_is_held(&tpg->tpg_lun_mutex)); | |
142 | dev = lun->lun_se_dev; | ||
143 | /* | 106 | /* |
144 | * By default in LIO-Target $FABRIC_MOD, | 107 | * By default in LIO-Target $FABRIC_MOD, |
145 | * demo_mode_write_protect is ON, or READ_ONLY; | 108 | * demo_mode_write_protect is ON, or READ_ONLY; |
@@ -157,7 +120,7 @@ void core_tpg_add_node_to_devs( | |||
157 | lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; | 120 | lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; |
158 | } | 121 | } |
159 | 122 | ||
160 | pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s" | 123 | pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s" |
161 | " access for LUN in Demo Mode\n", | 124 | " access for LUN in Demo Mode\n", |
162 | tpg->se_tpg_tfo->get_fabric_name(), | 125 | tpg->se_tpg_tfo->get_fabric_name(), |
163 | tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, | 126 | tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, |
@@ -165,7 +128,7 @@ void core_tpg_add_node_to_devs( | |||
165 | "READ-WRITE" : "READ-ONLY"); | 128 | "READ-WRITE" : "READ-ONLY"); |
166 | 129 | ||
167 | core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun, | 130 | core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun, |
168 | lun_access, acl, tpg); | 131 | lun_access, acl, tpg); |
169 | /* | 132 | /* |
170 | * Check to see if there are any existing persistent reservation | 133 | * Check to see if there are any existing persistent reservation |
171 | * APTPL pre-registrations that need to be enabled for this dynamic | 134 | * APTPL pre-registrations that need to be enabled for this dynamic |
@@ -173,9 +136,8 @@ void core_tpg_add_node_to_devs( | |||
173 | */ | 136 | */ |
174 | core_scsi3_check_aptpl_registration(dev, tpg, lun, acl, | 137 | core_scsi3_check_aptpl_registration(dev, tpg, lun, acl, |
175 | lun->unpacked_lun); | 138 | lun->unpacked_lun); |
176 | spin_lock(&tpg->tpg_lun_lock); | ||
177 | } | 139 | } |
178 | spin_unlock(&tpg->tpg_lun_lock); | 140 | mutex_unlock(&tpg->tpg_lun_mutex); |
179 | } | 141 | } |
180 | 142 | ||
181 | /* core_set_queue_depth_for_node(): | 143 | /* core_set_queue_depth_for_node(): |
@@ -196,67 +158,63 @@ static int core_set_queue_depth_for_node( | |||
196 | return 0; | 158 | return 0; |
197 | } | 159 | } |
198 | 160 | ||
199 | void array_free(void *array, int n) | 161 | static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg, |
162 | const unsigned char *initiatorname) | ||
200 | { | 163 | { |
201 | void **a = array; | 164 | struct se_node_acl *acl; |
202 | int i; | ||
203 | 165 | ||
204 | for (i = 0; i < n; i++) | 166 | acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size), |
205 | kfree(a[i]); | 167 | GFP_KERNEL); |
206 | kfree(a); | 168 | if (!acl) |
207 | } | 169 | return NULL; |
208 | 170 | ||
209 | static void *array_zalloc(int n, size_t size, gfp_t flags) | 171 | INIT_LIST_HEAD(&acl->acl_list); |
210 | { | 172 | INIT_LIST_HEAD(&acl->acl_sess_list); |
211 | void **a; | 173 | INIT_HLIST_HEAD(&acl->lun_entry_hlist); |
212 | int i; | 174 | kref_init(&acl->acl_kref); |
175 | init_completion(&acl->acl_free_comp); | ||
176 | spin_lock_init(&acl->nacl_sess_lock); | ||
177 | mutex_init(&acl->lun_entry_mutex); | ||
178 | atomic_set(&acl->acl_pr_ref_count, 0); | ||
179 | if (tpg->se_tpg_tfo->tpg_get_default_depth) | ||
180 | acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg); | ||
181 | else | ||
182 | acl->queue_depth = 1; | ||
183 | snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); | ||
184 | acl->se_tpg = tpg; | ||
185 | acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); | ||
213 | 186 | ||
214 | a = kzalloc(n * sizeof(void*), flags); | 187 | tpg->se_tpg_tfo->set_default_node_attributes(acl); |
215 | if (!a) | 188 | |
216 | return NULL; | 189 | if (core_set_queue_depth_for_node(tpg, acl) < 0) |
217 | for (i = 0; i < n; i++) { | 190 | goto out_free_acl; |
218 | a[i] = kzalloc(size, flags); | 191 | |
219 | if (!a[i]) { | 192 | return acl; |
220 | array_free(a, n); | 193 | |
221 | return NULL; | 194 | out_free_acl: |
222 | } | 195 | kfree(acl); |
223 | } | 196 | return NULL; |
224 | return a; | ||
225 | } | 197 | } |
226 | 198 | ||
227 | /* core_create_device_list_for_node(): | 199 | static void target_add_node_acl(struct se_node_acl *acl) |
228 | * | ||
229 | * | ||
230 | */ | ||
231 | static int core_create_device_list_for_node(struct se_node_acl *nacl) | ||
232 | { | 200 | { |
233 | struct se_dev_entry *deve; | 201 | struct se_portal_group *tpg = acl->se_tpg; |
234 | int i; | ||
235 | |||
236 | nacl->device_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG, | ||
237 | sizeof(struct se_dev_entry), GFP_KERNEL); | ||
238 | if (!nacl->device_list) { | ||
239 | pr_err("Unable to allocate memory for" | ||
240 | " struct se_node_acl->device_list\n"); | ||
241 | return -ENOMEM; | ||
242 | } | ||
243 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | ||
244 | deve = nacl->device_list[i]; | ||
245 | |||
246 | atomic_set(&deve->ua_count, 0); | ||
247 | atomic_set(&deve->pr_ref_count, 0); | ||
248 | spin_lock_init(&deve->ua_lock); | ||
249 | INIT_LIST_HEAD(&deve->alua_port_list); | ||
250 | INIT_LIST_HEAD(&deve->ua_list); | ||
251 | } | ||
252 | 202 | ||
253 | return 0; | 203 | mutex_lock(&tpg->acl_node_mutex); |
204 | list_add_tail(&acl->acl_list, &tpg->acl_node_list); | ||
205 | tpg->num_node_acls++; | ||
206 | mutex_unlock(&tpg->acl_node_mutex); | ||
207 | |||
208 | pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s" | ||
209 | " Initiator Node: %s\n", | ||
210 | tpg->se_tpg_tfo->get_fabric_name(), | ||
211 | tpg->se_tpg_tfo->tpg_get_tag(tpg), | ||
212 | acl->dynamic_node_acl ? "DYNAMIC" : "", | ||
213 | acl->queue_depth, | ||
214 | tpg->se_tpg_tfo->get_fabric_name(), | ||
215 | acl->initiatorname); | ||
254 | } | 216 | } |
255 | 217 | ||
256 | /* core_tpg_check_initiator_node_acl() | ||
257 | * | ||
258 | * | ||
259 | */ | ||
260 | struct se_node_acl *core_tpg_check_initiator_node_acl( | 218 | struct se_node_acl *core_tpg_check_initiator_node_acl( |
261 | struct se_portal_group *tpg, | 219 | struct se_portal_group *tpg, |
262 | unsigned char *initiatorname) | 220 | unsigned char *initiatorname) |
@@ -270,35 +228,11 @@ struct se_node_acl *core_tpg_check_initiator_node_acl( | |||
270 | if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) | 228 | if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) |
271 | return NULL; | 229 | return NULL; |
272 | 230 | ||
273 | acl = tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg); | 231 | acl = target_alloc_node_acl(tpg, initiatorname); |
274 | if (!acl) | 232 | if (!acl) |
275 | return NULL; | 233 | return NULL; |
276 | |||
277 | INIT_LIST_HEAD(&acl->acl_list); | ||
278 | INIT_LIST_HEAD(&acl->acl_sess_list); | ||
279 | kref_init(&acl->acl_kref); | ||
280 | init_completion(&acl->acl_free_comp); | ||
281 | spin_lock_init(&acl->device_list_lock); | ||
282 | spin_lock_init(&acl->nacl_sess_lock); | ||
283 | atomic_set(&acl->acl_pr_ref_count, 0); | ||
284 | acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg); | ||
285 | snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); | ||
286 | acl->se_tpg = tpg; | ||
287 | acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); | ||
288 | acl->dynamic_node_acl = 1; | 234 | acl->dynamic_node_acl = 1; |
289 | 235 | ||
290 | tpg->se_tpg_tfo->set_default_node_attributes(acl); | ||
291 | |||
292 | if (core_create_device_list_for_node(acl) < 0) { | ||
293 | tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); | ||
294 | return NULL; | ||
295 | } | ||
296 | |||
297 | if (core_set_queue_depth_for_node(tpg, acl) < 0) { | ||
298 | core_free_device_list_for_node(acl, tpg); | ||
299 | tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); | ||
300 | return NULL; | ||
301 | } | ||
302 | /* | 236 | /* |
303 | * Here we only create demo-mode MappedLUNs from the active | 237 | * Here we only create demo-mode MappedLUNs from the active |
304 | * TPG LUNs if the fabric is not explicitly asking for | 238 | * TPG LUNs if the fabric is not explicitly asking for |
@@ -306,18 +240,9 @@ struct se_node_acl *core_tpg_check_initiator_node_acl( | |||
306 | */ | 240 | */ |
307 | if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) || | 241 | if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) || |
308 | (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1)) | 242 | (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1)) |
309 | core_tpg_add_node_to_devs(acl, tpg); | 243 | core_tpg_add_node_to_devs(acl, tpg, NULL); |
310 | |||
311 | spin_lock_irq(&tpg->acl_node_lock); | ||
312 | list_add_tail(&acl->acl_list, &tpg->acl_node_list); | ||
313 | tpg->num_node_acls++; | ||
314 | spin_unlock_irq(&tpg->acl_node_lock); | ||
315 | |||
316 | pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s" | ||
317 | " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), | ||
318 | tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, | ||
319 | tpg->se_tpg_tfo->get_fabric_name(), initiatorname); | ||
320 | 244 | ||
245 | target_add_node_acl(acl); | ||
321 | return acl; | 246 | return acl; |
322 | } | 247 | } |
323 | EXPORT_SYMBOL(core_tpg_check_initiator_node_acl); | 248 | EXPORT_SYMBOL(core_tpg_check_initiator_node_acl); |
@@ -328,40 +253,13 @@ void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl) | |||
328 | cpu_relax(); | 253 | cpu_relax(); |
329 | } | 254 | } |
330 | 255 | ||
331 | void core_tpg_clear_object_luns(struct se_portal_group *tpg) | ||
332 | { | ||
333 | int i; | ||
334 | struct se_lun *lun; | ||
335 | |||
336 | spin_lock(&tpg->tpg_lun_lock); | ||
337 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | ||
338 | lun = tpg->tpg_lun_list[i]; | ||
339 | |||
340 | if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) || | ||
341 | (lun->lun_se_dev == NULL)) | ||
342 | continue; | ||
343 | |||
344 | spin_unlock(&tpg->tpg_lun_lock); | ||
345 | core_dev_del_lun(tpg, lun); | ||
346 | spin_lock(&tpg->tpg_lun_lock); | ||
347 | } | ||
348 | spin_unlock(&tpg->tpg_lun_lock); | ||
349 | } | ||
350 | EXPORT_SYMBOL(core_tpg_clear_object_luns); | ||
351 | |||
352 | /* core_tpg_add_initiator_node_acl(): | ||
353 | * | ||
354 | * | ||
355 | */ | ||
356 | struct se_node_acl *core_tpg_add_initiator_node_acl( | 256 | struct se_node_acl *core_tpg_add_initiator_node_acl( |
357 | struct se_portal_group *tpg, | 257 | struct se_portal_group *tpg, |
358 | struct se_node_acl *se_nacl, | 258 | const char *initiatorname) |
359 | const char *initiatorname, | ||
360 | u32 queue_depth) | ||
361 | { | 259 | { |
362 | struct se_node_acl *acl = NULL; | 260 | struct se_node_acl *acl; |
363 | 261 | ||
364 | spin_lock_irq(&tpg->acl_node_lock); | 262 | mutex_lock(&tpg->acl_node_mutex); |
365 | acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); | 263 | acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); |
366 | if (acl) { | 264 | if (acl) { |
367 | if (acl->dynamic_node_acl) { | 265 | if (acl->dynamic_node_acl) { |
@@ -369,99 +267,42 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( | |||
369 | pr_debug("%s_TPG[%u] - Replacing dynamic ACL" | 267 | pr_debug("%s_TPG[%u] - Replacing dynamic ACL" |
370 | " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), | 268 | " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), |
371 | tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname); | 269 | tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname); |
372 | spin_unlock_irq(&tpg->acl_node_lock); | 270 | mutex_unlock(&tpg->acl_node_mutex); |
373 | /* | 271 | return acl; |
374 | * Release the locally allocated struct se_node_acl | ||
375 | * because * core_tpg_add_initiator_node_acl() returned | ||
376 | * a pointer to an existing demo mode node ACL. | ||
377 | */ | ||
378 | if (se_nacl) | ||
379 | tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, | ||
380 | se_nacl); | ||
381 | goto done; | ||
382 | } | 272 | } |
383 | 273 | ||
384 | pr_err("ACL entry for %s Initiator" | 274 | pr_err("ACL entry for %s Initiator" |
385 | " Node %s already exists for TPG %u, ignoring" | 275 | " Node %s already exists for TPG %u, ignoring" |
386 | " request.\n", tpg->se_tpg_tfo->get_fabric_name(), | 276 | " request.\n", tpg->se_tpg_tfo->get_fabric_name(), |
387 | initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); | 277 | initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
388 | spin_unlock_irq(&tpg->acl_node_lock); | 278 | mutex_unlock(&tpg->acl_node_mutex); |
389 | return ERR_PTR(-EEXIST); | 279 | return ERR_PTR(-EEXIST); |
390 | } | 280 | } |
391 | spin_unlock_irq(&tpg->acl_node_lock); | 281 | mutex_unlock(&tpg->acl_node_mutex); |
392 | |||
393 | if (!se_nacl) { | ||
394 | pr_err("struct se_node_acl pointer is NULL\n"); | ||
395 | return ERR_PTR(-EINVAL); | ||
396 | } | ||
397 | /* | ||
398 | * For v4.x logic the se_node_acl_s is hanging off a fabric | ||
399 | * dependent structure allocated via | ||
400 | * struct target_core_fabric_ops->fabric_make_nodeacl() | ||
401 | */ | ||
402 | acl = se_nacl; | ||
403 | 282 | ||
404 | INIT_LIST_HEAD(&acl->acl_list); | 283 | acl = target_alloc_node_acl(tpg, initiatorname); |
405 | INIT_LIST_HEAD(&acl->acl_sess_list); | 284 | if (!acl) |
406 | kref_init(&acl->acl_kref); | ||
407 | init_completion(&acl->acl_free_comp); | ||
408 | spin_lock_init(&acl->device_list_lock); | ||
409 | spin_lock_init(&acl->nacl_sess_lock); | ||
410 | atomic_set(&acl->acl_pr_ref_count, 0); | ||
411 | acl->queue_depth = queue_depth; | ||
412 | snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); | ||
413 | acl->se_tpg = tpg; | ||
414 | acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); | ||
415 | |||
416 | tpg->se_tpg_tfo->set_default_node_attributes(acl); | ||
417 | |||
418 | if (core_create_device_list_for_node(acl) < 0) { | ||
419 | tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); | ||
420 | return ERR_PTR(-ENOMEM); | 285 | return ERR_PTR(-ENOMEM); |
421 | } | ||
422 | |||
423 | if (core_set_queue_depth_for_node(tpg, acl) < 0) { | ||
424 | core_free_device_list_for_node(acl, tpg); | ||
425 | tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); | ||
426 | return ERR_PTR(-EINVAL); | ||
427 | } | ||
428 | |||
429 | spin_lock_irq(&tpg->acl_node_lock); | ||
430 | list_add_tail(&acl->acl_list, &tpg->acl_node_list); | ||
431 | tpg->num_node_acls++; | ||
432 | spin_unlock_irq(&tpg->acl_node_lock); | ||
433 | |||
434 | done: | ||
435 | pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s" | ||
436 | " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), | ||
437 | tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, | ||
438 | tpg->se_tpg_tfo->get_fabric_name(), initiatorname); | ||
439 | 286 | ||
287 | target_add_node_acl(acl); | ||
440 | return acl; | 288 | return acl; |
441 | } | 289 | } |
442 | EXPORT_SYMBOL(core_tpg_add_initiator_node_acl); | ||
443 | 290 | ||
444 | /* core_tpg_del_initiator_node_acl(): | 291 | void core_tpg_del_initiator_node_acl(struct se_node_acl *acl) |
445 | * | ||
446 | * | ||
447 | */ | ||
448 | int core_tpg_del_initiator_node_acl( | ||
449 | struct se_portal_group *tpg, | ||
450 | struct se_node_acl *acl, | ||
451 | int force) | ||
452 | { | 292 | { |
293 | struct se_portal_group *tpg = acl->se_tpg; | ||
453 | LIST_HEAD(sess_list); | 294 | LIST_HEAD(sess_list); |
454 | struct se_session *sess, *sess_tmp; | 295 | struct se_session *sess, *sess_tmp; |
455 | unsigned long flags; | 296 | unsigned long flags; |
456 | int rc; | 297 | int rc; |
457 | 298 | ||
458 | spin_lock_irq(&tpg->acl_node_lock); | 299 | mutex_lock(&tpg->acl_node_mutex); |
459 | if (acl->dynamic_node_acl) { | 300 | if (acl->dynamic_node_acl) { |
460 | acl->dynamic_node_acl = 0; | 301 | acl->dynamic_node_acl = 0; |
461 | } | 302 | } |
462 | list_del(&acl->acl_list); | 303 | list_del(&acl->acl_list); |
463 | tpg->num_node_acls--; | 304 | tpg->num_node_acls--; |
464 | spin_unlock_irq(&tpg->acl_node_lock); | 305 | mutex_unlock(&tpg->acl_node_mutex); |
465 | 306 | ||
466 | spin_lock_irqsave(&acl->nacl_sess_lock, flags); | 307 | spin_lock_irqsave(&acl->nacl_sess_lock, flags); |
467 | acl->acl_stop = 1; | 308 | acl->acl_stop = 1; |
@@ -493,7 +334,6 @@ int core_tpg_del_initiator_node_acl( | |||
493 | wait_for_completion(&acl->acl_free_comp); | 334 | wait_for_completion(&acl->acl_free_comp); |
494 | 335 | ||
495 | core_tpg_wait_for_nacl_pr_ref(acl); | 336 | core_tpg_wait_for_nacl_pr_ref(acl); |
496 | core_clear_initiator_node_from_tpg(acl, tpg); | ||
497 | core_free_device_list_for_node(acl, tpg); | 337 | core_free_device_list_for_node(acl, tpg); |
498 | 338 | ||
499 | pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s" | 339 | pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s" |
@@ -501,9 +341,8 @@ int core_tpg_del_initiator_node_acl( | |||
501 | tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, | 341 | tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, |
502 | tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname); | 342 | tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname); |
503 | 343 | ||
504 | return 0; | 344 | kfree(acl); |
505 | } | 345 | } |
506 | EXPORT_SYMBOL(core_tpg_del_initiator_node_acl); | ||
507 | 346 | ||
508 | /* core_tpg_set_initiator_node_queue_depth(): | 347 | /* core_tpg_set_initiator_node_queue_depth(): |
509 | * | 348 | * |
@@ -520,21 +359,21 @@ int core_tpg_set_initiator_node_queue_depth( | |||
520 | unsigned long flags; | 359 | unsigned long flags; |
521 | int dynamic_acl = 0; | 360 | int dynamic_acl = 0; |
522 | 361 | ||
523 | spin_lock_irq(&tpg->acl_node_lock); | 362 | mutex_lock(&tpg->acl_node_mutex); |
524 | acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); | 363 | acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); |
525 | if (!acl) { | 364 | if (!acl) { |
526 | pr_err("Access Control List entry for %s Initiator" | 365 | pr_err("Access Control List entry for %s Initiator" |
527 | " Node %s does not exists for TPG %hu, ignoring" | 366 | " Node %s does not exists for TPG %hu, ignoring" |
528 | " request.\n", tpg->se_tpg_tfo->get_fabric_name(), | 367 | " request.\n", tpg->se_tpg_tfo->get_fabric_name(), |
529 | initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); | 368 | initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
530 | spin_unlock_irq(&tpg->acl_node_lock); | 369 | mutex_unlock(&tpg->acl_node_mutex); |
531 | return -ENODEV; | 370 | return -ENODEV; |
532 | } | 371 | } |
533 | if (acl->dynamic_node_acl) { | 372 | if (acl->dynamic_node_acl) { |
534 | acl->dynamic_node_acl = 0; | 373 | acl->dynamic_node_acl = 0; |
535 | dynamic_acl = 1; | 374 | dynamic_acl = 1; |
536 | } | 375 | } |
537 | spin_unlock_irq(&tpg->acl_node_lock); | 376 | mutex_unlock(&tpg->acl_node_mutex); |
538 | 377 | ||
539 | spin_lock_irqsave(&tpg->session_lock, flags); | 378 | spin_lock_irqsave(&tpg->session_lock, flags); |
540 | list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) { | 379 | list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) { |
@@ -550,10 +389,10 @@ int core_tpg_set_initiator_node_queue_depth( | |||
550 | tpg->se_tpg_tfo->get_fabric_name(), initiatorname); | 389 | tpg->se_tpg_tfo->get_fabric_name(), initiatorname); |
551 | spin_unlock_irqrestore(&tpg->session_lock, flags); | 390 | spin_unlock_irqrestore(&tpg->session_lock, flags); |
552 | 391 | ||
553 | spin_lock_irq(&tpg->acl_node_lock); | 392 | mutex_lock(&tpg->acl_node_mutex); |
554 | if (dynamic_acl) | 393 | if (dynamic_acl) |
555 | acl->dynamic_node_acl = 1; | 394 | acl->dynamic_node_acl = 1; |
556 | spin_unlock_irq(&tpg->acl_node_lock); | 395 | mutex_unlock(&tpg->acl_node_mutex); |
557 | return -EEXIST; | 396 | return -EEXIST; |
558 | } | 397 | } |
559 | /* | 398 | /* |
@@ -588,10 +427,10 @@ int core_tpg_set_initiator_node_queue_depth( | |||
588 | if (init_sess) | 427 | if (init_sess) |
589 | tpg->se_tpg_tfo->close_session(init_sess); | 428 | tpg->se_tpg_tfo->close_session(init_sess); |
590 | 429 | ||
591 | spin_lock_irq(&tpg->acl_node_lock); | 430 | mutex_lock(&tpg->acl_node_mutex); |
592 | if (dynamic_acl) | 431 | if (dynamic_acl) |
593 | acl->dynamic_node_acl = 1; | 432 | acl->dynamic_node_acl = 1; |
594 | spin_unlock_irq(&tpg->acl_node_lock); | 433 | mutex_unlock(&tpg->acl_node_mutex); |
595 | return -EINVAL; | 434 | return -EINVAL; |
596 | } | 435 | } |
597 | spin_unlock_irqrestore(&tpg->session_lock, flags); | 436 | spin_unlock_irqrestore(&tpg->session_lock, flags); |
@@ -607,10 +446,10 @@ int core_tpg_set_initiator_node_queue_depth( | |||
607 | initiatorname, tpg->se_tpg_tfo->get_fabric_name(), | 446 | initiatorname, tpg->se_tpg_tfo->get_fabric_name(), |
608 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); | 447 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
609 | 448 | ||
610 | spin_lock_irq(&tpg->acl_node_lock); | 449 | mutex_lock(&tpg->acl_node_mutex); |
611 | if (dynamic_acl) | 450 | if (dynamic_acl) |
612 | acl->dynamic_node_acl = 1; | 451 | acl->dynamic_node_acl = 1; |
613 | spin_unlock_irq(&tpg->acl_node_lock); | 452 | mutex_unlock(&tpg->acl_node_mutex); |
614 | 453 | ||
615 | return 0; | 454 | return 0; |
616 | } | 455 | } |
@@ -646,78 +485,54 @@ static void core_tpg_lun_ref_release(struct percpu_ref *ref) | |||
646 | complete(&lun->lun_ref_comp); | 485 | complete(&lun->lun_ref_comp); |
647 | } | 486 | } |
648 | 487 | ||
649 | static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg) | ||
650 | { | ||
651 | /* Set in core_dev_setup_virtual_lun0() */ | ||
652 | struct se_device *dev = g_lun0_dev; | ||
653 | struct se_lun *lun = &se_tpg->tpg_virt_lun0; | ||
654 | u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; | ||
655 | int ret; | ||
656 | |||
657 | lun->unpacked_lun = 0; | ||
658 | lun->lun_status = TRANSPORT_LUN_STATUS_FREE; | ||
659 | atomic_set(&lun->lun_acl_count, 0); | ||
660 | init_completion(&lun->lun_shutdown_comp); | ||
661 | INIT_LIST_HEAD(&lun->lun_acl_list); | ||
662 | spin_lock_init(&lun->lun_acl_lock); | ||
663 | spin_lock_init(&lun->lun_sep_lock); | ||
664 | init_completion(&lun->lun_ref_comp); | ||
665 | |||
666 | ret = core_tpg_add_lun(se_tpg, lun, lun_access, dev); | ||
667 | if (ret < 0) | ||
668 | return ret; | ||
669 | |||
670 | return 0; | ||
671 | } | ||
672 | |||
673 | int core_tpg_register( | 488 | int core_tpg_register( |
674 | const struct target_core_fabric_ops *tfo, | ||
675 | struct se_wwn *se_wwn, | 489 | struct se_wwn *se_wwn, |
676 | struct se_portal_group *se_tpg, | 490 | struct se_portal_group *se_tpg, |
677 | void *tpg_fabric_ptr, | 491 | int proto_id) |
678 | int se_tpg_type) | ||
679 | { | 492 | { |
680 | struct se_lun *lun; | 493 | int ret; |
681 | u32 i; | 494 | |
682 | 495 | if (!se_tpg) | |
683 | se_tpg->tpg_lun_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG, | 496 | return -EINVAL; |
684 | sizeof(struct se_lun), GFP_KERNEL); | 497 | /* |
685 | if (!se_tpg->tpg_lun_list) { | 498 | * For the typical case where core_tpg_register() is called by a |
686 | pr_err("Unable to allocate struct se_portal_group->" | 499 | * fabric driver from target_core_fabric_ops->fabric_make_tpg() |
687 | "tpg_lun_list\n"); | 500 | * configfs context, use the original tf_ops pointer already saved |
688 | return -ENOMEM; | 501 | * by target-core in target_fabric_make_wwn(). |
689 | } | 502 | * |
503 | * Otherwise, for special cases like iscsi-target discovery TPGs | ||
504 | * the caller is responsible for setting ->se_tpg_tfo ahead of | ||
505 | * calling core_tpg_register(). | ||
506 | */ | ||
507 | if (se_wwn) | ||
508 | se_tpg->se_tpg_tfo = se_wwn->wwn_tf->tf_ops; | ||
690 | 509 | ||
691 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | 510 | if (!se_tpg->se_tpg_tfo) { |
692 | lun = se_tpg->tpg_lun_list[i]; | 511 | pr_err("Unable to locate se_tpg->se_tpg_tfo pointer\n"); |
693 | lun->unpacked_lun = i; | 512 | return -EINVAL; |
694 | lun->lun_link_magic = SE_LUN_LINK_MAGIC; | ||
695 | lun->lun_status = TRANSPORT_LUN_STATUS_FREE; | ||
696 | atomic_set(&lun->lun_acl_count, 0); | ||
697 | init_completion(&lun->lun_shutdown_comp); | ||
698 | INIT_LIST_HEAD(&lun->lun_acl_list); | ||
699 | spin_lock_init(&lun->lun_acl_lock); | ||
700 | spin_lock_init(&lun->lun_sep_lock); | ||
701 | init_completion(&lun->lun_ref_comp); | ||
702 | } | 513 | } |
703 | 514 | ||
704 | se_tpg->se_tpg_type = se_tpg_type; | 515 | INIT_HLIST_HEAD(&se_tpg->tpg_lun_hlist); |
705 | se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr; | 516 | se_tpg->proto_id = proto_id; |
706 | se_tpg->se_tpg_tfo = tfo; | ||
707 | se_tpg->se_tpg_wwn = se_wwn; | 517 | se_tpg->se_tpg_wwn = se_wwn; |
708 | atomic_set(&se_tpg->tpg_pr_ref_count, 0); | 518 | atomic_set(&se_tpg->tpg_pr_ref_count, 0); |
709 | INIT_LIST_HEAD(&se_tpg->acl_node_list); | 519 | INIT_LIST_HEAD(&se_tpg->acl_node_list); |
710 | INIT_LIST_HEAD(&se_tpg->se_tpg_node); | 520 | INIT_LIST_HEAD(&se_tpg->se_tpg_node); |
711 | INIT_LIST_HEAD(&se_tpg->tpg_sess_list); | 521 | INIT_LIST_HEAD(&se_tpg->tpg_sess_list); |
712 | spin_lock_init(&se_tpg->acl_node_lock); | ||
713 | spin_lock_init(&se_tpg->session_lock); | 522 | spin_lock_init(&se_tpg->session_lock); |
714 | spin_lock_init(&se_tpg->tpg_lun_lock); | 523 | mutex_init(&se_tpg->tpg_lun_mutex); |
715 | 524 | mutex_init(&se_tpg->acl_node_mutex); | |
716 | if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) { | 525 | |
717 | if (core_tpg_setup_virtual_lun0(se_tpg) < 0) { | 526 | if (se_tpg->proto_id >= 0) { |
718 | array_free(se_tpg->tpg_lun_list, | 527 | se_tpg->tpg_virt_lun0 = core_tpg_alloc_lun(se_tpg, 0); |
719 | TRANSPORT_MAX_LUNS_PER_TPG); | 528 | if (IS_ERR(se_tpg->tpg_virt_lun0)) |
720 | return -ENOMEM; | 529 | return PTR_ERR(se_tpg->tpg_virt_lun0); |
530 | |||
531 | ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0, | ||
532 | TRANSPORT_LUNFLAGS_READ_ONLY, g_lun0_dev); | ||
533 | if (ret < 0) { | ||
534 | kfree(se_tpg->tpg_virt_lun0); | ||
535 | return ret; | ||
721 | } | 536 | } |
722 | } | 537 | } |
723 | 538 | ||
@@ -725,11 +540,11 @@ int core_tpg_register( | |||
725 | list_add_tail(&se_tpg->se_tpg_node, &tpg_list); | 540 | list_add_tail(&se_tpg->se_tpg_node, &tpg_list); |
726 | spin_unlock_bh(&tpg_lock); | 541 | spin_unlock_bh(&tpg_lock); |
727 | 542 | ||
728 | pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for" | 543 | pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, " |
729 | " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(), | 544 | "Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->get_fabric_name(), |
730 | (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? | 545 | se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) ? |
731 | "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ? | 546 | se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) : NULL, |
732 | "None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg)); | 547 | se_tpg->proto_id, se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); |
733 | 548 | ||
734 | return 0; | 549 | return 0; |
735 | } | 550 | } |
@@ -737,14 +552,14 @@ EXPORT_SYMBOL(core_tpg_register); | |||
737 | 552 | ||
738 | int core_tpg_deregister(struct se_portal_group *se_tpg) | 553 | int core_tpg_deregister(struct se_portal_group *se_tpg) |
739 | { | 554 | { |
555 | const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo; | ||
740 | struct se_node_acl *nacl, *nacl_tmp; | 556 | struct se_node_acl *nacl, *nacl_tmp; |
557 | LIST_HEAD(node_list); | ||
741 | 558 | ||
742 | pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group" | 559 | pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, " |
743 | " for endpoint: %s Portal Tag %u\n", | 560 | "Proto: %d, Portal Tag: %u\n", tfo->get_fabric_name(), |
744 | (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? | 561 | tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL, |
745 | "Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(), | 562 | se_tpg->proto_id, tfo->tpg_get_tag(se_tpg)); |
746 | se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg), | ||
747 | se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); | ||
748 | 563 | ||
749 | spin_lock_bh(&tpg_lock); | 564 | spin_lock_bh(&tpg_lock); |
750 | list_del(&se_tpg->se_tpg_node); | 565 | list_del(&se_tpg->se_tpg_node); |
@@ -752,61 +567,56 @@ int core_tpg_deregister(struct se_portal_group *se_tpg) | |||
752 | 567 | ||
753 | while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0) | 568 | while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0) |
754 | cpu_relax(); | 569 | cpu_relax(); |
570 | |||
571 | mutex_lock(&se_tpg->acl_node_mutex); | ||
572 | list_splice_init(&se_tpg->acl_node_list, &node_list); | ||
573 | mutex_unlock(&se_tpg->acl_node_mutex); | ||
755 | /* | 574 | /* |
756 | * Release any remaining demo-mode generated se_node_acl that have | 575 | * Release any remaining demo-mode generated se_node_acl that have |
757 | * not been released because of TFO->tpg_check_demo_mode_cache() == 1 | 576 | * not been released because of TFO->tpg_check_demo_mode_cache() == 1 |
758 | * in transport_deregister_session(). | 577 | * in transport_deregister_session(). |
759 | */ | 578 | */ |
760 | spin_lock_irq(&se_tpg->acl_node_lock); | 579 | list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) { |
761 | list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list, | ||
762 | acl_list) { | ||
763 | list_del(&nacl->acl_list); | 580 | list_del(&nacl->acl_list); |
764 | se_tpg->num_node_acls--; | 581 | se_tpg->num_node_acls--; |
765 | spin_unlock_irq(&se_tpg->acl_node_lock); | ||
766 | 582 | ||
767 | core_tpg_wait_for_nacl_pr_ref(nacl); | 583 | core_tpg_wait_for_nacl_pr_ref(nacl); |
768 | core_free_device_list_for_node(nacl, se_tpg); | 584 | core_free_device_list_for_node(nacl, se_tpg); |
769 | se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl); | 585 | kfree(nacl); |
770 | |||
771 | spin_lock_irq(&se_tpg->acl_node_lock); | ||
772 | } | 586 | } |
773 | spin_unlock_irq(&se_tpg->acl_node_lock); | ||
774 | 587 | ||
775 | if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) | 588 | if (se_tpg->proto_id >= 0) { |
776 | core_tpg_remove_lun(se_tpg, &se_tpg->tpg_virt_lun0); | 589 | core_tpg_remove_lun(se_tpg, se_tpg->tpg_virt_lun0); |
590 | kfree_rcu(se_tpg->tpg_virt_lun0, rcu_head); | ||
591 | } | ||
777 | 592 | ||
778 | se_tpg->se_tpg_fabric_ptr = NULL; | ||
779 | array_free(se_tpg->tpg_lun_list, TRANSPORT_MAX_LUNS_PER_TPG); | ||
780 | return 0; | 593 | return 0; |
781 | } | 594 | } |
782 | EXPORT_SYMBOL(core_tpg_deregister); | 595 | EXPORT_SYMBOL(core_tpg_deregister); |
783 | 596 | ||
784 | struct se_lun *core_tpg_alloc_lun( | 597 | struct se_lun *core_tpg_alloc_lun( |
785 | struct se_portal_group *tpg, | 598 | struct se_portal_group *tpg, |
786 | u32 unpacked_lun) | 599 | u64 unpacked_lun) |
787 | { | 600 | { |
788 | struct se_lun *lun; | 601 | struct se_lun *lun; |
789 | 602 | ||
790 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { | 603 | lun = kzalloc(sizeof(*lun), GFP_KERNEL); |
791 | pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG" | 604 | if (!lun) { |
792 | "-1: %u for Target Portal Group: %u\n", | 605 | pr_err("Unable to allocate se_lun memory\n"); |
793 | tpg->se_tpg_tfo->get_fabric_name(), | 606 | return ERR_PTR(-ENOMEM); |
794 | unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1, | ||
795 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); | ||
796 | return ERR_PTR(-EOVERFLOW); | ||
797 | } | ||
798 | |||
799 | spin_lock(&tpg->tpg_lun_lock); | ||
800 | lun = tpg->tpg_lun_list[unpacked_lun]; | ||
801 | if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) { | ||
802 | pr_err("TPG Logical Unit Number: %u is already active" | ||
803 | " on %s Target Portal Group: %u, ignoring request.\n", | ||
804 | unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(), | ||
805 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); | ||
806 | spin_unlock(&tpg->tpg_lun_lock); | ||
807 | return ERR_PTR(-EINVAL); | ||
808 | } | 607 | } |
809 | spin_unlock(&tpg->tpg_lun_lock); | 608 | lun->unpacked_lun = unpacked_lun; |
609 | lun->lun_link_magic = SE_LUN_LINK_MAGIC; | ||
610 | atomic_set(&lun->lun_acl_count, 0); | ||
611 | init_completion(&lun->lun_ref_comp); | ||
612 | INIT_LIST_HEAD(&lun->lun_deve_list); | ||
613 | INIT_LIST_HEAD(&lun->lun_dev_link); | ||
614 | atomic_set(&lun->lun_tg_pt_secondary_offline, 0); | ||
615 | spin_lock_init(&lun->lun_deve_lock); | ||
616 | mutex_init(&lun->lun_tg_pt_md_mutex); | ||
617 | INIT_LIST_HEAD(&lun->lun_tg_pt_gp_link); | ||
618 | spin_lock_init(&lun->lun_tg_pt_gp_lock); | ||
619 | lun->lun_tpg = tpg; | ||
810 | 620 | ||
811 | return lun; | 621 | return lun; |
812 | } | 622 | } |
@@ -822,34 +632,70 @@ int core_tpg_add_lun( | |||
822 | ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0, | 632 | ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0, |
823 | GFP_KERNEL); | 633 | GFP_KERNEL); |
824 | if (ret < 0) | 634 | if (ret < 0) |
825 | return ret; | 635 | goto out; |
826 | 636 | ||
827 | ret = core_dev_export(dev, tpg, lun); | 637 | ret = core_alloc_rtpi(lun, dev); |
828 | if (ret < 0) { | 638 | if (ret) |
829 | percpu_ref_exit(&lun->lun_ref); | 639 | goto out_kill_ref; |
830 | return ret; | 640 | |
831 | } | 641 | if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) && |
642 | !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) | ||
643 | target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp); | ||
644 | |||
645 | mutex_lock(&tpg->tpg_lun_mutex); | ||
646 | |||
647 | spin_lock(&dev->se_port_lock); | ||
648 | lun->lun_index = dev->dev_index; | ||
649 | rcu_assign_pointer(lun->lun_se_dev, dev); | ||
650 | dev->export_count++; | ||
651 | list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list); | ||
652 | spin_unlock(&dev->se_port_lock); | ||
832 | 653 | ||
833 | spin_lock(&tpg->tpg_lun_lock); | ||
834 | lun->lun_access = lun_access; | 654 | lun->lun_access = lun_access; |
835 | lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE; | 655 | if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) |
836 | spin_unlock(&tpg->tpg_lun_lock); | 656 | hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist); |
657 | mutex_unlock(&tpg->tpg_lun_mutex); | ||
837 | 658 | ||
838 | return 0; | 659 | return 0; |
660 | |||
661 | out_kill_ref: | ||
662 | percpu_ref_exit(&lun->lun_ref); | ||
663 | out: | ||
664 | return ret; | ||
839 | } | 665 | } |
840 | 666 | ||
841 | void core_tpg_remove_lun( | 667 | void core_tpg_remove_lun( |
842 | struct se_portal_group *tpg, | 668 | struct se_portal_group *tpg, |
843 | struct se_lun *lun) | 669 | struct se_lun *lun) |
844 | { | 670 | { |
671 | /* | ||
672 | * rcu_dereference_raw protected by se_lun->lun_group symlink | ||
673 | * reference to se_device->dev_group. | ||
674 | */ | ||
675 | struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); | ||
676 | |||
845 | core_clear_lun_from_tpg(lun, tpg); | 677 | core_clear_lun_from_tpg(lun, tpg); |
678 | /* | ||
679 | * Wait for any active I/O references to percpu se_lun->lun_ref to | ||
680 | * be released. Also, se_lun->lun_ref is now used by PR and ALUA | ||
681 | * logic when referencing a remote target port during ALL_TGT_PT=1 | ||
682 | * and generating UNIT_ATTENTIONs for ALUA access state transition. | ||
683 | */ | ||
846 | transport_clear_lun_ref(lun); | 684 | transport_clear_lun_ref(lun); |
847 | 685 | ||
848 | core_dev_unexport(lun->lun_se_dev, tpg, lun); | 686 | mutex_lock(&tpg->tpg_lun_mutex); |
687 | if (lun->lun_se_dev) { | ||
688 | target_detach_tg_pt_gp(lun); | ||
849 | 689 | ||
850 | spin_lock(&tpg->tpg_lun_lock); | 690 | spin_lock(&dev->se_port_lock); |
851 | lun->lun_status = TRANSPORT_LUN_STATUS_FREE; | 691 | list_del(&lun->lun_dev_link); |
852 | spin_unlock(&tpg->tpg_lun_lock); | 692 | dev->export_count--; |
693 | rcu_assign_pointer(lun->lun_se_dev, NULL); | ||
694 | spin_unlock(&dev->se_port_lock); | ||
695 | } | ||
696 | if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) | ||
697 | hlist_del_rcu(&lun->link); | ||
698 | mutex_unlock(&tpg->tpg_lun_mutex); | ||
853 | 699 | ||
854 | percpu_ref_exit(&lun->lun_ref); | 700 | percpu_ref_exit(&lun->lun_ref); |
855 | } | 701 | } |
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index cd3bfc16d25f..ce8574b7220c 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -43,7 +43,6 @@ | |||
43 | #include <target/target_core_base.h> | 43 | #include <target/target_core_base.h> |
44 | #include <target/target_core_backend.h> | 44 | #include <target/target_core_backend.h> |
45 | #include <target/target_core_fabric.h> | 45 | #include <target/target_core_fabric.h> |
46 | #include <target/target_core_configfs.h> | ||
47 | 46 | ||
48 | #include "target_core_internal.h" | 47 | #include "target_core_internal.h" |
49 | #include "target_core_alua.h" | 48 | #include "target_core_alua.h" |
@@ -60,7 +59,6 @@ struct kmem_cache *t10_pr_reg_cache; | |||
60 | struct kmem_cache *t10_alua_lu_gp_cache; | 59 | struct kmem_cache *t10_alua_lu_gp_cache; |
61 | struct kmem_cache *t10_alua_lu_gp_mem_cache; | 60 | struct kmem_cache *t10_alua_lu_gp_mem_cache; |
62 | struct kmem_cache *t10_alua_tg_pt_gp_cache; | 61 | struct kmem_cache *t10_alua_tg_pt_gp_cache; |
63 | struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; | ||
64 | struct kmem_cache *t10_alua_lba_map_cache; | 62 | struct kmem_cache *t10_alua_lba_map_cache; |
65 | struct kmem_cache *t10_alua_lba_map_mem_cache; | 63 | struct kmem_cache *t10_alua_lba_map_mem_cache; |
66 | 64 | ||
@@ -119,16 +117,6 @@ int init_se_kmem_caches(void) | |||
119 | "cache failed\n"); | 117 | "cache failed\n"); |
120 | goto out_free_lu_gp_mem_cache; | 118 | goto out_free_lu_gp_mem_cache; |
121 | } | 119 | } |
122 | t10_alua_tg_pt_gp_mem_cache = kmem_cache_create( | ||
123 | "t10_alua_tg_pt_gp_mem_cache", | ||
124 | sizeof(struct t10_alua_tg_pt_gp_member), | ||
125 | __alignof__(struct t10_alua_tg_pt_gp_member), | ||
126 | 0, NULL); | ||
127 | if (!t10_alua_tg_pt_gp_mem_cache) { | ||
128 | pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" | ||
129 | "mem_t failed\n"); | ||
130 | goto out_free_tg_pt_gp_cache; | ||
131 | } | ||
132 | t10_alua_lba_map_cache = kmem_cache_create( | 120 | t10_alua_lba_map_cache = kmem_cache_create( |
133 | "t10_alua_lba_map_cache", | 121 | "t10_alua_lba_map_cache", |
134 | sizeof(struct t10_alua_lba_map), | 122 | sizeof(struct t10_alua_lba_map), |
@@ -136,7 +124,7 @@ int init_se_kmem_caches(void) | |||
136 | if (!t10_alua_lba_map_cache) { | 124 | if (!t10_alua_lba_map_cache) { |
137 | pr_err("kmem_cache_create() for t10_alua_lba_map_" | 125 | pr_err("kmem_cache_create() for t10_alua_lba_map_" |
138 | "cache failed\n"); | 126 | "cache failed\n"); |
139 | goto out_free_tg_pt_gp_mem_cache; | 127 | goto out_free_tg_pt_gp_cache; |
140 | } | 128 | } |
141 | t10_alua_lba_map_mem_cache = kmem_cache_create( | 129 | t10_alua_lba_map_mem_cache = kmem_cache_create( |
142 | "t10_alua_lba_map_mem_cache", | 130 | "t10_alua_lba_map_mem_cache", |
@@ -159,8 +147,6 @@ out_free_lba_map_mem_cache: | |||
159 | kmem_cache_destroy(t10_alua_lba_map_mem_cache); | 147 | kmem_cache_destroy(t10_alua_lba_map_mem_cache); |
160 | out_free_lba_map_cache: | 148 | out_free_lba_map_cache: |
161 | kmem_cache_destroy(t10_alua_lba_map_cache); | 149 | kmem_cache_destroy(t10_alua_lba_map_cache); |
162 | out_free_tg_pt_gp_mem_cache: | ||
163 | kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); | ||
164 | out_free_tg_pt_gp_cache: | 150 | out_free_tg_pt_gp_cache: |
165 | kmem_cache_destroy(t10_alua_tg_pt_gp_cache); | 151 | kmem_cache_destroy(t10_alua_tg_pt_gp_cache); |
166 | out_free_lu_gp_mem_cache: | 152 | out_free_lu_gp_mem_cache: |
@@ -186,7 +172,6 @@ void release_se_kmem_caches(void) | |||
186 | kmem_cache_destroy(t10_alua_lu_gp_cache); | 172 | kmem_cache_destroy(t10_alua_lu_gp_cache); |
187 | kmem_cache_destroy(t10_alua_lu_gp_mem_cache); | 173 | kmem_cache_destroy(t10_alua_lu_gp_mem_cache); |
188 | kmem_cache_destroy(t10_alua_tg_pt_gp_cache); | 174 | kmem_cache_destroy(t10_alua_tg_pt_gp_cache); |
189 | kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); | ||
190 | kmem_cache_destroy(t10_alua_lba_map_cache); | 175 | kmem_cache_destroy(t10_alua_lba_map_cache); |
191 | kmem_cache_destroy(t10_alua_lba_map_mem_cache); | 176 | kmem_cache_destroy(t10_alua_lba_map_mem_cache); |
192 | } | 177 | } |
@@ -406,12 +391,6 @@ EXPORT_SYMBOL(target_get_session); | |||
406 | 391 | ||
407 | void target_put_session(struct se_session *se_sess) | 392 | void target_put_session(struct se_session *se_sess) |
408 | { | 393 | { |
409 | struct se_portal_group *tpg = se_sess->se_tpg; | ||
410 | |||
411 | if (tpg->se_tpg_tfo->put_session != NULL) { | ||
412 | tpg->se_tpg_tfo->put_session(se_sess); | ||
413 | return; | ||
414 | } | ||
415 | kref_put(&se_sess->sess_kref, target_release_session); | 394 | kref_put(&se_sess->sess_kref, target_release_session); |
416 | } | 395 | } |
417 | EXPORT_SYMBOL(target_put_session); | 396 | EXPORT_SYMBOL(target_put_session); |
@@ -498,7 +477,7 @@ void transport_deregister_session(struct se_session *se_sess) | |||
498 | const struct target_core_fabric_ops *se_tfo; | 477 | const struct target_core_fabric_ops *se_tfo; |
499 | struct se_node_acl *se_nacl; | 478 | struct se_node_acl *se_nacl; |
500 | unsigned long flags; | 479 | unsigned long flags; |
501 | bool comp_nacl = true; | 480 | bool comp_nacl = true, drop_nacl = false; |
502 | 481 | ||
503 | if (!se_tpg) { | 482 | if (!se_tpg) { |
504 | transport_free_session(se_sess); | 483 | transport_free_session(se_sess); |
@@ -518,22 +497,22 @@ void transport_deregister_session(struct se_session *se_sess) | |||
518 | */ | 497 | */ |
519 | se_nacl = se_sess->se_node_acl; | 498 | se_nacl = se_sess->se_node_acl; |
520 | 499 | ||
521 | spin_lock_irqsave(&se_tpg->acl_node_lock, flags); | 500 | mutex_lock(&se_tpg->acl_node_mutex); |
522 | if (se_nacl && se_nacl->dynamic_node_acl) { | 501 | if (se_nacl && se_nacl->dynamic_node_acl) { |
523 | if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) { | 502 | if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) { |
524 | list_del(&se_nacl->acl_list); | 503 | list_del(&se_nacl->acl_list); |
525 | se_tpg->num_node_acls--; | 504 | se_tpg->num_node_acls--; |
526 | spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); | 505 | drop_nacl = true; |
527 | core_tpg_wait_for_nacl_pr_ref(se_nacl); | ||
528 | core_free_device_list_for_node(se_nacl, se_tpg); | ||
529 | se_tfo->tpg_release_fabric_acl(se_tpg, se_nacl); | ||
530 | |||
531 | comp_nacl = false; | ||
532 | spin_lock_irqsave(&se_tpg->acl_node_lock, flags); | ||
533 | } | 506 | } |
534 | } | 507 | } |
535 | spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); | 508 | mutex_unlock(&se_tpg->acl_node_mutex); |
536 | 509 | ||
510 | if (drop_nacl) { | ||
511 | core_tpg_wait_for_nacl_pr_ref(se_nacl); | ||
512 | core_free_device_list_for_node(se_nacl, se_tpg); | ||
513 | kfree(se_nacl); | ||
514 | comp_nacl = false; | ||
515 | } | ||
537 | pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", | 516 | pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", |
538 | se_tpg->se_tpg_tfo->get_fabric_name()); | 517 | se_tpg->se_tpg_tfo->get_fabric_name()); |
539 | /* | 518 | /* |
@@ -593,9 +572,8 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, | |||
593 | * this command for frontend exceptions. | 572 | * this command for frontend exceptions. |
594 | */ | 573 | */ |
595 | if (cmd->transport_state & CMD_T_STOP) { | 574 | if (cmd->transport_state & CMD_T_STOP) { |
596 | pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n", | 575 | pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", |
597 | __func__, __LINE__, | 576 | __func__, __LINE__, cmd->tag); |
598 | cmd->se_tfo->get_task_tag(cmd)); | ||
599 | 577 | ||
600 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 578 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
601 | 579 | ||
@@ -1148,6 +1126,8 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size) | |||
1148 | /* | 1126 | /* |
1149 | * Used by fabric modules containing a local struct se_cmd within their | 1127 | * Used by fabric modules containing a local struct se_cmd within their |
1150 | * fabric dependent per I/O descriptor. | 1128 | * fabric dependent per I/O descriptor. |
1129 | * | ||
1130 | * Preserves the value of @cmd->tag. | ||
1151 | */ | 1131 | */ |
1152 | void transport_init_se_cmd( | 1132 | void transport_init_se_cmd( |
1153 | struct se_cmd *cmd, | 1133 | struct se_cmd *cmd, |
@@ -1274,11 +1254,7 @@ target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb) | |||
1274 | return ret; | 1254 | return ret; |
1275 | 1255 | ||
1276 | cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; | 1256 | cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; |
1277 | 1257 | atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus); | |
1278 | spin_lock(&cmd->se_lun->lun_sep_lock); | ||
1279 | if (cmd->se_lun->lun_sep) | ||
1280 | cmd->se_lun->lun_sep->sep_stats.cmd_pdus++; | ||
1281 | spin_unlock(&cmd->se_lun->lun_sep_lock); | ||
1282 | return 0; | 1258 | return 0; |
1283 | } | 1259 | } |
1284 | EXPORT_SYMBOL(target_setup_cmd_from_cdb); | 1260 | EXPORT_SYMBOL(target_setup_cmd_from_cdb); |
@@ -1346,11 +1322,9 @@ transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl, | |||
1346 | 1322 | ||
1347 | cmd->t_data_sg = sgl; | 1323 | cmd->t_data_sg = sgl; |
1348 | cmd->t_data_nents = sgl_count; | 1324 | cmd->t_data_nents = sgl_count; |
1325 | cmd->t_bidi_data_sg = sgl_bidi; | ||
1326 | cmd->t_bidi_data_nents = sgl_bidi_count; | ||
1349 | 1327 | ||
1350 | if (sgl_bidi && sgl_bidi_count) { | ||
1351 | cmd->t_bidi_data_sg = sgl_bidi; | ||
1352 | cmd->t_bidi_data_nents = sgl_bidi_count; | ||
1353 | } | ||
1354 | cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; | 1328 | cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; |
1355 | return 0; | 1329 | return 0; |
1356 | } | 1330 | } |
@@ -1375,6 +1349,8 @@ transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl, | |||
1375 | * @sgl_prot: struct scatterlist memory protection information | 1349 | * @sgl_prot: struct scatterlist memory protection information |
1376 | * @sgl_prot_count: scatterlist count for protection information | 1350 | * @sgl_prot_count: scatterlist count for protection information |
1377 | * | 1351 | * |
1352 | * Task tags are supported if the caller has set @se_cmd->tag. | ||
1353 | * | ||
1378 | * Returns non zero to signal active I/O shutdown failure. All other | 1354 | * Returns non zero to signal active I/O shutdown failure. All other |
1379 | * setup exceptions will be returned as a SCSI CHECK_CONDITION response, | 1355 | * setup exceptions will be returned as a SCSI CHECK_CONDITION response, |
1380 | * but still return zero here. | 1356 | * but still return zero here. |
@@ -1383,7 +1359,7 @@ transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl, | |||
1383 | * assumes internal allocation of fabric payload buffer by target-core. | 1359 | * assumes internal allocation of fabric payload buffer by target-core. |
1384 | */ | 1360 | */ |
1385 | int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess, | 1361 | int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess, |
1386 | unsigned char *cdb, unsigned char *sense, u32 unpacked_lun, | 1362 | unsigned char *cdb, unsigned char *sense, u64 unpacked_lun, |
1387 | u32 data_length, int task_attr, int data_dir, int flags, | 1363 | u32 data_length, int task_attr, int data_dir, int flags, |
1388 | struct scatterlist *sgl, u32 sgl_count, | 1364 | struct scatterlist *sgl, u32 sgl_count, |
1389 | struct scatterlist *sgl_bidi, u32 sgl_bidi_count, | 1365 | struct scatterlist *sgl_bidi, u32 sgl_bidi_count, |
@@ -1412,7 +1388,7 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess | |||
1412 | * for fabrics using TARGET_SCF_ACK_KREF that expect a second | 1388 | * for fabrics using TARGET_SCF_ACK_KREF that expect a second |
1413 | * kref_put() to happen during fabric packet acknowledgement. | 1389 | * kref_put() to happen during fabric packet acknowledgement. |
1414 | */ | 1390 | */ |
1415 | ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF)); | 1391 | ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); |
1416 | if (ret) | 1392 | if (ret) |
1417 | return ret; | 1393 | return ret; |
1418 | /* | 1394 | /* |
@@ -1426,7 +1402,7 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess | |||
1426 | rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun); | 1402 | rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun); |
1427 | if (rc) { | 1403 | if (rc) { |
1428 | transport_send_check_condition_and_sense(se_cmd, rc, 0); | 1404 | transport_send_check_condition_and_sense(se_cmd, rc, 0); |
1429 | target_put_sess_cmd(se_sess, se_cmd); | 1405 | target_put_sess_cmd(se_cmd); |
1430 | return 0; | 1406 | return 0; |
1431 | } | 1407 | } |
1432 | 1408 | ||
@@ -1443,6 +1419,7 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess | |||
1443 | if (sgl_prot_count) { | 1419 | if (sgl_prot_count) { |
1444 | se_cmd->t_prot_sg = sgl_prot; | 1420 | se_cmd->t_prot_sg = sgl_prot; |
1445 | se_cmd->t_prot_nents = sgl_prot_count; | 1421 | se_cmd->t_prot_nents = sgl_prot_count; |
1422 | se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC; | ||
1446 | } | 1423 | } |
1447 | 1424 | ||
1448 | /* | 1425 | /* |
@@ -1506,6 +1483,8 @@ EXPORT_SYMBOL(target_submit_cmd_map_sgls); | |||
1506 | * @data_dir: DMA data direction | 1483 | * @data_dir: DMA data direction |
1507 | * @flags: flags for command submission from target_sc_flags_tables | 1484 | * @flags: flags for command submission from target_sc_flags_tables |
1508 | * | 1485 | * |
1486 | * Task tags are supported if the caller has set @se_cmd->tag. | ||
1487 | * | ||
1509 | * Returns non zero to signal active I/O shutdown failure. All other | 1488 | * Returns non zero to signal active I/O shutdown failure. All other |
1510 | * setup exceptions will be returned as a SCSI CHECK_CONDITION response, | 1489 | * setup exceptions will be returned as a SCSI CHECK_CONDITION response, |
1511 | * but still return zero here. | 1490 | * but still return zero here. |
@@ -1516,7 +1495,7 @@ EXPORT_SYMBOL(target_submit_cmd_map_sgls); | |||
1516 | * It also assumes interal target core SGL memory allocation. | 1495 | * It also assumes interal target core SGL memory allocation. |
1517 | */ | 1496 | */ |
1518 | int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, | 1497 | int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, |
1519 | unsigned char *cdb, unsigned char *sense, u32 unpacked_lun, | 1498 | unsigned char *cdb, unsigned char *sense, u64 unpacked_lun, |
1520 | u32 data_length, int task_attr, int data_dir, int flags) | 1499 | u32 data_length, int task_attr, int data_dir, int flags) |
1521 | { | 1500 | { |
1522 | return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense, | 1501 | return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense, |
@@ -1553,7 +1532,7 @@ static void target_complete_tmr_failure(struct work_struct *work) | |||
1553 | **/ | 1532 | **/ |
1554 | 1533 | ||
1555 | int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, | 1534 | int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, |
1556 | unsigned char *sense, u32 unpacked_lun, | 1535 | unsigned char *sense, u64 unpacked_lun, |
1557 | void *fabric_tmr_ptr, unsigned char tm_type, | 1536 | void *fabric_tmr_ptr, unsigned char tm_type, |
1558 | gfp_t gfp, unsigned int tag, int flags) | 1537 | gfp_t gfp, unsigned int tag, int flags) |
1559 | { | 1538 | { |
@@ -1577,7 +1556,7 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, | |||
1577 | se_cmd->se_tmr_req->ref_task_tag = tag; | 1556 | se_cmd->se_tmr_req->ref_task_tag = tag; |
1578 | 1557 | ||
1579 | /* See target_submit_cmd for commentary */ | 1558 | /* See target_submit_cmd for commentary */ |
1580 | ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF)); | 1559 | ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); |
1581 | if (ret) { | 1560 | if (ret) { |
1582 | core_tmr_release_req(se_cmd->se_tmr_req); | 1561 | core_tmr_release_req(se_cmd->se_tmr_req); |
1583 | return ret; | 1562 | return ret; |
@@ -1633,9 +1612,8 @@ void transport_generic_request_failure(struct se_cmd *cmd, | |||
1633 | { | 1612 | { |
1634 | int ret = 0; | 1613 | int ret = 0; |
1635 | 1614 | ||
1636 | pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" | 1615 | pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx" |
1637 | " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), | 1616 | " CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]); |
1638 | cmd->t_task_cdb[0]); | ||
1639 | pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n", | 1617 | pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n", |
1640 | cmd->se_tfo->get_cmd_state(cmd), | 1618 | cmd->se_tfo->get_cmd_state(cmd), |
1641 | cmd->t_state, sense_reason); | 1619 | cmd->t_state, sense_reason); |
@@ -1692,13 +1670,13 @@ void transport_generic_request_failure(struct se_cmd *cmd, | |||
1692 | * See spc4r17, section 7.4.6 Control Mode Page, Table 349 | 1670 | * See spc4r17, section 7.4.6 Control Mode Page, Table 349 |
1693 | */ | 1671 | */ |
1694 | if (cmd->se_sess && | 1672 | if (cmd->se_sess && |
1695 | cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) | 1673 | cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) { |
1696 | core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, | 1674 | target_ua_allocate_lun(cmd->se_sess->se_node_acl, |
1697 | cmd->orig_fe_lun, 0x2C, | 1675 | cmd->orig_fe_lun, 0x2C, |
1698 | ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); | 1676 | ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); |
1699 | 1677 | } | |
1700 | trace_target_cmd_complete(cmd); | 1678 | trace_target_cmd_complete(cmd); |
1701 | ret = cmd->se_tfo-> queue_status(cmd); | 1679 | ret = cmd->se_tfo->queue_status(cmd); |
1702 | if (ret == -EAGAIN || ret == -ENOMEM) | 1680 | if (ret == -EAGAIN || ret == -ENOMEM) |
1703 | goto queue_full; | 1681 | goto queue_full; |
1704 | goto check_stop; | 1682 | goto check_stop; |
@@ -1759,8 +1737,8 @@ static int target_write_prot_action(struct se_cmd *cmd) | |||
1759 | break; | 1737 | break; |
1760 | 1738 | ||
1761 | sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size); | 1739 | sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size); |
1762 | cmd->pi_err = sbc_dif_verify_write(cmd, cmd->t_task_lba, | 1740 | cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, |
1763 | sectors, 0, NULL, 0); | 1741 | sectors, 0, cmd->t_prot_sg, 0); |
1764 | if (unlikely(cmd->pi_err)) { | 1742 | if (unlikely(cmd->pi_err)) { |
1765 | spin_lock_irq(&cmd->t_state_lock); | 1743 | spin_lock_irq(&cmd->t_state_lock); |
1766 | cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); | 1744 | cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); |
@@ -1843,9 +1821,8 @@ void target_execute_cmd(struct se_cmd *cmd) | |||
1843 | */ | 1821 | */ |
1844 | spin_lock_irq(&cmd->t_state_lock); | 1822 | spin_lock_irq(&cmd->t_state_lock); |
1845 | if (cmd->transport_state & CMD_T_STOP) { | 1823 | if (cmd->transport_state & CMD_T_STOP) { |
1846 | pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n", | 1824 | pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", |
1847 | __func__, __LINE__, | 1825 | __func__, __LINE__, cmd->tag); |
1848 | cmd->se_tfo->get_task_tag(cmd)); | ||
1849 | 1826 | ||
1850 | spin_unlock_irq(&cmd->t_state_lock); | 1827 | spin_unlock_irq(&cmd->t_state_lock); |
1851 | complete_all(&cmd->t_transport_stop_comp); | 1828 | complete_all(&cmd->t_transport_stop_comp); |
@@ -1984,16 +1961,17 @@ static void transport_handle_queue_full( | |||
1984 | 1961 | ||
1985 | static bool target_read_prot_action(struct se_cmd *cmd) | 1962 | static bool target_read_prot_action(struct se_cmd *cmd) |
1986 | { | 1963 | { |
1987 | sense_reason_t rc; | ||
1988 | |||
1989 | switch (cmd->prot_op) { | 1964 | switch (cmd->prot_op) { |
1990 | case TARGET_PROT_DIN_STRIP: | 1965 | case TARGET_PROT_DIN_STRIP: |
1991 | if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) { | 1966 | if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) { |
1992 | rc = sbc_dif_read_strip(cmd); | 1967 | u32 sectors = cmd->data_length >> |
1993 | if (rc) { | 1968 | ilog2(cmd->se_dev->dev_attrib.block_size); |
1994 | cmd->pi_err = rc; | 1969 | |
1970 | cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, | ||
1971 | sectors, 0, cmd->t_prot_sg, | ||
1972 | 0); | ||
1973 | if (cmd->pi_err) | ||
1995 | return true; | 1974 | return true; |
1996 | } | ||
1997 | } | 1975 | } |
1998 | break; | 1976 | break; |
1999 | case TARGET_PROT_DIN_INSERT: | 1977 | case TARGET_PROT_DIN_INSERT: |
@@ -2072,12 +2050,8 @@ static void target_complete_ok_work(struct work_struct *work) | |||
2072 | queue_rsp: | 2050 | queue_rsp: |
2073 | switch (cmd->data_direction) { | 2051 | switch (cmd->data_direction) { |
2074 | case DMA_FROM_DEVICE: | 2052 | case DMA_FROM_DEVICE: |
2075 | spin_lock(&cmd->se_lun->lun_sep_lock); | 2053 | atomic_long_add(cmd->data_length, |
2076 | if (cmd->se_lun->lun_sep) { | 2054 | &cmd->se_lun->lun_stats.tx_data_octets); |
2077 | cmd->se_lun->lun_sep->sep_stats.tx_data_octets += | ||
2078 | cmd->data_length; | ||
2079 | } | ||
2080 | spin_unlock(&cmd->se_lun->lun_sep_lock); | ||
2081 | /* | 2055 | /* |
2082 | * Perform READ_STRIP of PI using software emulation when | 2056 | * Perform READ_STRIP of PI using software emulation when |
2083 | * backend had PI enabled, if the transport will not be | 2057 | * backend had PI enabled, if the transport will not be |
@@ -2100,22 +2074,14 @@ queue_rsp: | |||
2100 | goto queue_full; | 2074 | goto queue_full; |
2101 | break; | 2075 | break; |
2102 | case DMA_TO_DEVICE: | 2076 | case DMA_TO_DEVICE: |
2103 | spin_lock(&cmd->se_lun->lun_sep_lock); | 2077 | atomic_long_add(cmd->data_length, |
2104 | if (cmd->se_lun->lun_sep) { | 2078 | &cmd->se_lun->lun_stats.rx_data_octets); |
2105 | cmd->se_lun->lun_sep->sep_stats.rx_data_octets += | ||
2106 | cmd->data_length; | ||
2107 | } | ||
2108 | spin_unlock(&cmd->se_lun->lun_sep_lock); | ||
2109 | /* | 2079 | /* |
2110 | * Check if we need to send READ payload for BIDI-COMMAND | 2080 | * Check if we need to send READ payload for BIDI-COMMAND |
2111 | */ | 2081 | */ |
2112 | if (cmd->se_cmd_flags & SCF_BIDI) { | 2082 | if (cmd->se_cmd_flags & SCF_BIDI) { |
2113 | spin_lock(&cmd->se_lun->lun_sep_lock); | 2083 | atomic_long_add(cmd->data_length, |
2114 | if (cmd->se_lun->lun_sep) { | 2084 | &cmd->se_lun->lun_stats.tx_data_octets); |
2115 | cmd->se_lun->lun_sep->sep_stats.tx_data_octets += | ||
2116 | cmd->data_length; | ||
2117 | } | ||
2118 | spin_unlock(&cmd->se_lun->lun_sep_lock); | ||
2119 | ret = cmd->se_tfo->queue_data_in(cmd); | 2085 | ret = cmd->se_tfo->queue_data_in(cmd); |
2120 | if (ret == -EAGAIN || ret == -ENOMEM) | 2086 | if (ret == -EAGAIN || ret == -ENOMEM) |
2121 | goto queue_full; | 2087 | goto queue_full; |
@@ -2172,6 +2138,12 @@ static inline void transport_reset_sgl_orig(struct se_cmd *cmd) | |||
2172 | 2138 | ||
2173 | static inline void transport_free_pages(struct se_cmd *cmd) | 2139 | static inline void transport_free_pages(struct se_cmd *cmd) |
2174 | { | 2140 | { |
2141 | if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { | ||
2142 | transport_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents); | ||
2143 | cmd->t_prot_sg = NULL; | ||
2144 | cmd->t_prot_nents = 0; | ||
2145 | } | ||
2146 | |||
2175 | if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { | 2147 | if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { |
2176 | /* | 2148 | /* |
2177 | * Release special case READ buffer payload required for | 2149 | * Release special case READ buffer payload required for |
@@ -2195,10 +2167,6 @@ static inline void transport_free_pages(struct se_cmd *cmd) | |||
2195 | transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); | 2167 | transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); |
2196 | cmd->t_bidi_data_sg = NULL; | 2168 | cmd->t_bidi_data_sg = NULL; |
2197 | cmd->t_bidi_data_nents = 0; | 2169 | cmd->t_bidi_data_nents = 0; |
2198 | |||
2199 | transport_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents); | ||
2200 | cmd->t_prot_sg = NULL; | ||
2201 | cmd->t_prot_nents = 0; | ||
2202 | } | 2170 | } |
2203 | 2171 | ||
2204 | /** | 2172 | /** |
@@ -2220,7 +2188,7 @@ static int transport_release_cmd(struct se_cmd *cmd) | |||
2220 | * If this cmd has been setup with target_get_sess_cmd(), drop | 2188 | * If this cmd has been setup with target_get_sess_cmd(), drop |
2221 | * the kref and call ->release_cmd() in kref callback. | 2189 | * the kref and call ->release_cmd() in kref callback. |
2222 | */ | 2190 | */ |
2223 | return target_put_sess_cmd(cmd->se_sess, cmd); | 2191 | return target_put_sess_cmd(cmd); |
2224 | } | 2192 | } |
2225 | 2193 | ||
2226 | /** | 2194 | /** |
@@ -2337,6 +2305,14 @@ transport_generic_new_cmd(struct se_cmd *cmd) | |||
2337 | int ret = 0; | 2305 | int ret = 0; |
2338 | bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); | 2306 | bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); |
2339 | 2307 | ||
2308 | if (cmd->prot_op != TARGET_PROT_NORMAL && | ||
2309 | !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { | ||
2310 | ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents, | ||
2311 | cmd->prot_length, true); | ||
2312 | if (ret < 0) | ||
2313 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
2314 | } | ||
2315 | |||
2340 | /* | 2316 | /* |
2341 | * Determine is the TCM fabric module has already allocated physical | 2317 | * Determine is the TCM fabric module has already allocated physical |
2342 | * memory, and is directly calling transport_generic_map_mem_to_cmd() | 2318 | * memory, and is directly calling transport_generic_map_mem_to_cmd() |
@@ -2362,14 +2338,6 @@ transport_generic_new_cmd(struct se_cmd *cmd) | |||
2362 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 2338 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
2363 | } | 2339 | } |
2364 | 2340 | ||
2365 | if (cmd->prot_op != TARGET_PROT_NORMAL) { | ||
2366 | ret = target_alloc_sgl(&cmd->t_prot_sg, | ||
2367 | &cmd->t_prot_nents, | ||
2368 | cmd->prot_length, true); | ||
2369 | if (ret < 0) | ||
2370 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
2371 | } | ||
2372 | |||
2373 | ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, | 2341 | ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, |
2374 | cmd->data_length, zero_flag); | 2342 | cmd->data_length, zero_flag); |
2375 | if (ret < 0) | 2343 | if (ret < 0) |
@@ -2464,13 +2432,12 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) | |||
2464 | EXPORT_SYMBOL(transport_generic_free_cmd); | 2432 | EXPORT_SYMBOL(transport_generic_free_cmd); |
2465 | 2433 | ||
2466 | /* target_get_sess_cmd - Add command to active ->sess_cmd_list | 2434 | /* target_get_sess_cmd - Add command to active ->sess_cmd_list |
2467 | * @se_sess: session to reference | ||
2468 | * @se_cmd: command descriptor to add | 2435 | * @se_cmd: command descriptor to add |
2469 | * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() | 2436 | * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() |
2470 | */ | 2437 | */ |
2471 | int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd, | 2438 | int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref) |
2472 | bool ack_kref) | ||
2473 | { | 2439 | { |
2440 | struct se_session *se_sess = se_cmd->se_sess; | ||
2474 | unsigned long flags; | 2441 | unsigned long flags; |
2475 | int ret = 0; | 2442 | int ret = 0; |
2476 | 2443 | ||
@@ -2492,7 +2459,7 @@ out: | |||
2492 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | 2459 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); |
2493 | 2460 | ||
2494 | if (ret && ack_kref) | 2461 | if (ret && ack_kref) |
2495 | target_put_sess_cmd(se_sess, se_cmd); | 2462 | target_put_sess_cmd(se_cmd); |
2496 | 2463 | ||
2497 | return ret; | 2464 | return ret; |
2498 | } | 2465 | } |
@@ -2521,11 +2488,12 @@ static void target_release_cmd_kref(struct kref *kref) | |||
2521 | } | 2488 | } |
2522 | 2489 | ||
2523 | /* target_put_sess_cmd - Check for active I/O shutdown via kref_put | 2490 | /* target_put_sess_cmd - Check for active I/O shutdown via kref_put |
2524 | * @se_sess: session to reference | ||
2525 | * @se_cmd: command descriptor to drop | 2491 | * @se_cmd: command descriptor to drop |
2526 | */ | 2492 | */ |
2527 | int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) | 2493 | int target_put_sess_cmd(struct se_cmd *se_cmd) |
2528 | { | 2494 | { |
2495 | struct se_session *se_sess = se_cmd->se_sess; | ||
2496 | |||
2529 | if (!se_sess) { | 2497 | if (!se_sess) { |
2530 | se_cmd->se_tfo->release_cmd(se_cmd); | 2498 | se_cmd->se_tfo->release_cmd(se_cmd); |
2531 | return 1; | 2499 | return 1; |
@@ -2591,31 +2559,10 @@ void target_wait_for_sess_cmds(struct se_session *se_sess) | |||
2591 | } | 2559 | } |
2592 | EXPORT_SYMBOL(target_wait_for_sess_cmds); | 2560 | EXPORT_SYMBOL(target_wait_for_sess_cmds); |
2593 | 2561 | ||
2594 | static int transport_clear_lun_ref_thread(void *p) | 2562 | void transport_clear_lun_ref(struct se_lun *lun) |
2595 | { | 2563 | { |
2596 | struct se_lun *lun = p; | ||
2597 | |||
2598 | percpu_ref_kill(&lun->lun_ref); | 2564 | percpu_ref_kill(&lun->lun_ref); |
2599 | |||
2600 | wait_for_completion(&lun->lun_ref_comp); | 2565 | wait_for_completion(&lun->lun_ref_comp); |
2601 | complete(&lun->lun_shutdown_comp); | ||
2602 | |||
2603 | return 0; | ||
2604 | } | ||
2605 | |||
2606 | int transport_clear_lun_ref(struct se_lun *lun) | ||
2607 | { | ||
2608 | struct task_struct *kt; | ||
2609 | |||
2610 | kt = kthread_run(transport_clear_lun_ref_thread, lun, | ||
2611 | "tcm_cl_%u", lun->unpacked_lun); | ||
2612 | if (IS_ERR(kt)) { | ||
2613 | pr_err("Unable to start clear_lun thread\n"); | ||
2614 | return PTR_ERR(kt); | ||
2615 | } | ||
2616 | wait_for_completion(&lun->lun_shutdown_comp); | ||
2617 | |||
2618 | return 0; | ||
2619 | } | 2566 | } |
2620 | 2567 | ||
2621 | /** | 2568 | /** |
@@ -2649,10 +2596,8 @@ bool transport_wait_for_tasks(struct se_cmd *cmd) | |||
2649 | 2596 | ||
2650 | cmd->transport_state |= CMD_T_STOP; | 2597 | cmd->transport_state |= CMD_T_STOP; |
2651 | 2598 | ||
2652 | pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x" | 2599 | pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d, t_state: %d, CMD_T_STOP\n", |
2653 | " i_state: %d, t_state: %d, CMD_T_STOP\n", | 2600 | cmd, cmd->tag, cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); |
2654 | cmd, cmd->se_tfo->get_task_tag(cmd), | ||
2655 | cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); | ||
2656 | 2601 | ||
2657 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 2602 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
2658 | 2603 | ||
@@ -2661,9 +2606,8 @@ bool transport_wait_for_tasks(struct se_cmd *cmd) | |||
2661 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 2606 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
2662 | cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); | 2607 | cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); |
2663 | 2608 | ||
2664 | pr_debug("wait_for_tasks: Stopped wait_for_completion(" | 2609 | pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->t_transport_stop_comp) for ITT: 0x%08llx\n", |
2665 | "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n", | 2610 | cmd->tag); |
2666 | cmd->se_tfo->get_task_tag(cmd)); | ||
2667 | 2611 | ||
2668 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 2612 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
2669 | 2613 | ||
@@ -2965,8 +2909,8 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status) | |||
2965 | if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) | 2909 | if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) |
2966 | return 1; | 2910 | return 1; |
2967 | 2911 | ||
2968 | pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08x\n", | 2912 | pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08llx\n", |
2969 | cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd)); | 2913 | cmd->t_task_cdb[0], cmd->tag); |
2970 | 2914 | ||
2971 | cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS; | 2915 | cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS; |
2972 | cmd->scsi_status = SAM_STAT_TASK_ABORTED; | 2916 | cmd->scsi_status = SAM_STAT_TASK_ABORTED; |
@@ -3005,9 +2949,8 @@ void transport_send_task_abort(struct se_cmd *cmd) | |||
3005 | 2949 | ||
3006 | transport_lun_remove_cmd(cmd); | 2950 | transport_lun_remove_cmd(cmd); |
3007 | 2951 | ||
3008 | pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," | 2952 | pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n", |
3009 | " ITT: 0x%08x\n", cmd->t_task_cdb[0], | 2953 | cmd->t_task_cdb[0], cmd->tag); |
3010 | cmd->se_tfo->get_task_tag(cmd)); | ||
3011 | 2954 | ||
3012 | trace_target_cmd_complete(cmd); | 2955 | trace_target_cmd_complete(cmd); |
3013 | cmd->se_tfo->queue_status(cmd); | 2956 | cmd->se_tfo->queue_status(cmd); |
@@ -3033,6 +2976,11 @@ static void target_tmr_work(struct work_struct *work) | |||
3033 | ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); | 2976 | ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); |
3034 | tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : | 2977 | tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : |
3035 | TMR_FUNCTION_REJECTED; | 2978 | TMR_FUNCTION_REJECTED; |
2979 | if (tmr->response == TMR_FUNCTION_COMPLETE) { | ||
2980 | target_ua_allocate_lun(cmd->se_sess->se_node_acl, | ||
2981 | cmd->orig_fe_lun, 0x29, | ||
2982 | ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED); | ||
2983 | } | ||
3036 | break; | 2984 | break; |
3037 | case TMR_TARGET_WARM_RESET: | 2985 | case TMR_TARGET_WARM_RESET: |
3038 | tmr->response = TMR_FUNCTION_REJECTED; | 2986 | tmr->response = TMR_FUNCTION_REJECTED; |
@@ -3067,3 +3015,22 @@ int transport_generic_handle_tmr( | |||
3067 | return 0; | 3015 | return 0; |
3068 | } | 3016 | } |
3069 | EXPORT_SYMBOL(transport_generic_handle_tmr); | 3017 | EXPORT_SYMBOL(transport_generic_handle_tmr); |
3018 | |||
3019 | bool | ||
3020 | target_check_wce(struct se_device *dev) | ||
3021 | { | ||
3022 | bool wce = false; | ||
3023 | |||
3024 | if (dev->transport->get_write_cache) | ||
3025 | wce = dev->transport->get_write_cache(dev); | ||
3026 | else if (dev->dev_attrib.emulate_write_cache > 0) | ||
3027 | wce = true; | ||
3028 | |||
3029 | return wce; | ||
3030 | } | ||
3031 | |||
3032 | bool | ||
3033 | target_check_fua(struct se_device *dev) | ||
3034 | { | ||
3035 | return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0; | ||
3036 | } | ||
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c index e44cc94b12cb..be25eb807a5f 100644 --- a/drivers/target/target_core_ua.c +++ b/drivers/target/target_core_ua.c | |||
@@ -29,7 +29,6 @@ | |||
29 | 29 | ||
30 | #include <target/target_core_base.h> | 30 | #include <target/target_core_base.h> |
31 | #include <target/target_core_fabric.h> | 31 | #include <target/target_core_fabric.h> |
32 | #include <target/target_core_configfs.h> | ||
33 | 32 | ||
34 | #include "target_core_internal.h" | 33 | #include "target_core_internal.h" |
35 | #include "target_core_alua.h" | 34 | #include "target_core_alua.h" |
@@ -50,9 +49,17 @@ target_scsi3_ua_check(struct se_cmd *cmd) | |||
50 | if (!nacl) | 49 | if (!nacl) |
51 | return 0; | 50 | return 0; |
52 | 51 | ||
53 | deve = nacl->device_list[cmd->orig_fe_lun]; | 52 | rcu_read_lock(); |
54 | if (!atomic_read(&deve->ua_count)) | 53 | deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun); |
54 | if (!deve) { | ||
55 | rcu_read_unlock(); | ||
55 | return 0; | 56 | return 0; |
57 | } | ||
58 | if (!atomic_read(&deve->ua_count)) { | ||
59 | rcu_read_unlock(); | ||
60 | return 0; | ||
61 | } | ||
62 | rcu_read_unlock(); | ||
56 | /* | 63 | /* |
57 | * From sam4r14, section 5.14 Unit attention condition: | 64 | * From sam4r14, section 5.14 Unit attention condition: |
58 | * | 65 | * |
@@ -79,18 +86,11 @@ target_scsi3_ua_check(struct se_cmd *cmd) | |||
79 | } | 86 | } |
80 | 87 | ||
81 | int core_scsi3_ua_allocate( | 88 | int core_scsi3_ua_allocate( |
82 | struct se_node_acl *nacl, | 89 | struct se_dev_entry *deve, |
83 | u32 unpacked_lun, | ||
84 | u8 asc, | 90 | u8 asc, |
85 | u8 ascq) | 91 | u8 ascq) |
86 | { | 92 | { |
87 | struct se_dev_entry *deve; | ||
88 | struct se_ua *ua, *ua_p, *ua_tmp; | 93 | struct se_ua *ua, *ua_p, *ua_tmp; |
89 | /* | ||
90 | * PASSTHROUGH OPS | ||
91 | */ | ||
92 | if (!nacl) | ||
93 | return -EINVAL; | ||
94 | 94 | ||
95 | ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC); | 95 | ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC); |
96 | if (!ua) { | 96 | if (!ua) { |
@@ -99,13 +99,9 @@ int core_scsi3_ua_allocate( | |||
99 | } | 99 | } |
100 | INIT_LIST_HEAD(&ua->ua_nacl_list); | 100 | INIT_LIST_HEAD(&ua->ua_nacl_list); |
101 | 101 | ||
102 | ua->ua_nacl = nacl; | ||
103 | ua->ua_asc = asc; | 102 | ua->ua_asc = asc; |
104 | ua->ua_ascq = ascq; | 103 | ua->ua_ascq = ascq; |
105 | 104 | ||
106 | spin_lock_irq(&nacl->device_list_lock); | ||
107 | deve = nacl->device_list[unpacked_lun]; | ||
108 | |||
109 | spin_lock(&deve->ua_lock); | 105 | spin_lock(&deve->ua_lock); |
110 | list_for_each_entry_safe(ua_p, ua_tmp, &deve->ua_list, ua_nacl_list) { | 106 | list_for_each_entry_safe(ua_p, ua_tmp, &deve->ua_list, ua_nacl_list) { |
111 | /* | 107 | /* |
@@ -113,7 +109,6 @@ int core_scsi3_ua_allocate( | |||
113 | */ | 109 | */ |
114 | if ((ua_p->ua_asc == asc) && (ua_p->ua_ascq == ascq)) { | 110 | if ((ua_p->ua_asc == asc) && (ua_p->ua_ascq == ascq)) { |
115 | spin_unlock(&deve->ua_lock); | 111 | spin_unlock(&deve->ua_lock); |
116 | spin_unlock_irq(&nacl->device_list_lock); | ||
117 | kmem_cache_free(se_ua_cache, ua); | 112 | kmem_cache_free(se_ua_cache, ua); |
118 | return 0; | 113 | return 0; |
119 | } | 114 | } |
@@ -158,24 +153,40 @@ int core_scsi3_ua_allocate( | |||
158 | list_add_tail(&ua->ua_nacl_list, | 153 | list_add_tail(&ua->ua_nacl_list, |
159 | &deve->ua_list); | 154 | &deve->ua_list); |
160 | spin_unlock(&deve->ua_lock); | 155 | spin_unlock(&deve->ua_lock); |
161 | spin_unlock_irq(&nacl->device_list_lock); | ||
162 | 156 | ||
163 | atomic_inc_mb(&deve->ua_count); | 157 | atomic_inc_mb(&deve->ua_count); |
164 | return 0; | 158 | return 0; |
165 | } | 159 | } |
166 | list_add_tail(&ua->ua_nacl_list, &deve->ua_list); | 160 | list_add_tail(&ua->ua_nacl_list, &deve->ua_list); |
167 | spin_unlock(&deve->ua_lock); | 161 | spin_unlock(&deve->ua_lock); |
168 | spin_unlock_irq(&nacl->device_list_lock); | ||
169 | 162 | ||
170 | pr_debug("[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:" | 163 | pr_debug("Allocated UNIT ATTENTION, mapped LUN: %llu, ASC:" |
171 | " 0x%02x, ASCQ: 0x%02x\n", | 164 | " 0x%02x, ASCQ: 0x%02x\n", deve->mapped_lun, |
172 | nacl->se_tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, | ||
173 | asc, ascq); | 165 | asc, ascq); |
174 | 166 | ||
175 | atomic_inc_mb(&deve->ua_count); | 167 | atomic_inc_mb(&deve->ua_count); |
176 | return 0; | 168 | return 0; |
177 | } | 169 | } |
178 | 170 | ||
171 | void target_ua_allocate_lun(struct se_node_acl *nacl, | ||
172 | u32 unpacked_lun, u8 asc, u8 ascq) | ||
173 | { | ||
174 | struct se_dev_entry *deve; | ||
175 | |||
176 | if (!nacl) | ||
177 | return; | ||
178 | |||
179 | rcu_read_lock(); | ||
180 | deve = target_nacl_find_deve(nacl, unpacked_lun); | ||
181 | if (!deve) { | ||
182 | rcu_read_unlock(); | ||
183 | return; | ||
184 | } | ||
185 | |||
186 | core_scsi3_ua_allocate(deve, asc, ascq); | ||
187 | rcu_read_unlock(); | ||
188 | } | ||
189 | |||
179 | void core_scsi3_ua_release_all( | 190 | void core_scsi3_ua_release_all( |
180 | struct se_dev_entry *deve) | 191 | struct se_dev_entry *deve) |
181 | { | 192 | { |
@@ -210,10 +221,14 @@ void core_scsi3_ua_for_check_condition( | |||
210 | if (!nacl) | 221 | if (!nacl) |
211 | return; | 222 | return; |
212 | 223 | ||
213 | spin_lock_irq(&nacl->device_list_lock); | 224 | rcu_read_lock(); |
214 | deve = nacl->device_list[cmd->orig_fe_lun]; | 225 | deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun); |
226 | if (!deve) { | ||
227 | rcu_read_unlock(); | ||
228 | return; | ||
229 | } | ||
215 | if (!atomic_read(&deve->ua_count)) { | 230 | if (!atomic_read(&deve->ua_count)) { |
216 | spin_unlock_irq(&nacl->device_list_lock); | 231 | rcu_read_unlock(); |
217 | return; | 232 | return; |
218 | } | 233 | } |
219 | /* | 234 | /* |
@@ -249,10 +264,10 @@ void core_scsi3_ua_for_check_condition( | |||
249 | atomic_dec_mb(&deve->ua_count); | 264 | atomic_dec_mb(&deve->ua_count); |
250 | } | 265 | } |
251 | spin_unlock(&deve->ua_lock); | 266 | spin_unlock(&deve->ua_lock); |
252 | spin_unlock_irq(&nacl->device_list_lock); | 267 | rcu_read_unlock(); |
253 | 268 | ||
254 | pr_debug("[%s]: %s UNIT ATTENTION condition with" | 269 | pr_debug("[%s]: %s UNIT ATTENTION condition with" |
255 | " INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x" | 270 | " INTLCK_CTRL: %d, mapped LUN: %llu, got CDB: 0x%02x" |
256 | " reported ASC: 0x%02x, ASCQ: 0x%02x\n", | 271 | " reported ASC: 0x%02x, ASCQ: 0x%02x\n", |
257 | nacl->se_tpg->se_tpg_tfo->get_fabric_name(), | 272 | nacl->se_tpg->se_tpg_tfo->get_fabric_name(), |
258 | (dev->dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" : | 273 | (dev->dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" : |
@@ -278,10 +293,14 @@ int core_scsi3_ua_clear_for_request_sense( | |||
278 | if (!nacl) | 293 | if (!nacl) |
279 | return -EINVAL; | 294 | return -EINVAL; |
280 | 295 | ||
281 | spin_lock_irq(&nacl->device_list_lock); | 296 | rcu_read_lock(); |
282 | deve = nacl->device_list[cmd->orig_fe_lun]; | 297 | deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun); |
298 | if (!deve) { | ||
299 | rcu_read_unlock(); | ||
300 | return -EINVAL; | ||
301 | } | ||
283 | if (!atomic_read(&deve->ua_count)) { | 302 | if (!atomic_read(&deve->ua_count)) { |
284 | spin_unlock_irq(&nacl->device_list_lock); | 303 | rcu_read_unlock(); |
285 | return -EPERM; | 304 | return -EPERM; |
286 | } | 305 | } |
287 | /* | 306 | /* |
@@ -307,10 +326,10 @@ int core_scsi3_ua_clear_for_request_sense( | |||
307 | atomic_dec_mb(&deve->ua_count); | 326 | atomic_dec_mb(&deve->ua_count); |
308 | } | 327 | } |
309 | spin_unlock(&deve->ua_lock); | 328 | spin_unlock(&deve->ua_lock); |
310 | spin_unlock_irq(&nacl->device_list_lock); | 329 | rcu_read_unlock(); |
311 | 330 | ||
312 | pr_debug("[%s]: Released UNIT ATTENTION condition, mapped" | 331 | pr_debug("[%s]: Released UNIT ATTENTION condition, mapped" |
313 | " LUN: %u, got REQUEST_SENSE reported ASC: 0x%02x," | 332 | " LUN: %llu, got REQUEST_SENSE reported ASC: 0x%02x," |
314 | " ASCQ: 0x%02x\n", nacl->se_tpg->se_tpg_tfo->get_fabric_name(), | 333 | " ASCQ: 0x%02x\n", nacl->se_tpg->se_tpg_tfo->get_fabric_name(), |
315 | cmd->orig_fe_lun, *asc, *ascq); | 334 | cmd->orig_fe_lun, *asc, *ascq); |
316 | 335 | ||
diff --git a/drivers/target/target_core_ua.h b/drivers/target/target_core_ua.h index a6b56b364e7a..bd6e78ba153d 100644 --- a/drivers/target/target_core_ua.h +++ b/drivers/target/target_core_ua.h | |||
@@ -25,10 +25,14 @@ | |||
25 | 25 | ||
26 | #define ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS 0x09 | 26 | #define ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS 0x09 |
27 | 27 | ||
28 | #define ASCQ_3FH_INQUIRY_DATA_HAS_CHANGED 0x03 | ||
29 | #define ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED 0x0E | ||
30 | |||
28 | extern struct kmem_cache *se_ua_cache; | 31 | extern struct kmem_cache *se_ua_cache; |
29 | 32 | ||
30 | extern sense_reason_t target_scsi3_ua_check(struct se_cmd *); | 33 | extern sense_reason_t target_scsi3_ua_check(struct se_cmd *); |
31 | extern int core_scsi3_ua_allocate(struct se_node_acl *, u32, u8, u8); | 34 | extern int core_scsi3_ua_allocate(struct se_dev_entry *, u8, u8); |
35 | extern void target_ua_allocate_lun(struct se_node_acl *, u32, u8, u8); | ||
32 | extern void core_scsi3_ua_release_all(struct se_dev_entry *); | 36 | extern void core_scsi3_ua_release_all(struct se_dev_entry *); |
33 | extern void core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *); | 37 | extern void core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *); |
34 | extern int core_scsi3_ua_clear_for_request_sense(struct se_cmd *, | 38 | extern int core_scsi3_ua_clear_for_request_sense(struct se_cmd *, |
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 549af9847c28..c448ef421ce7 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c | |||
@@ -1,6 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2013 Shaohua Li <shli@kernel.org> | 2 | * Copyright (C) 2013 Shaohua Li <shli@kernel.org> |
3 | * Copyright (C) 2014 Red Hat, Inc. | 3 | * Copyright (C) 2014 Red Hat, Inc. |
4 | * Copyright (C) 2015 Arrikto, Inc. | ||
4 | * | 5 | * |
5 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
6 | * under the terms and conditions of the GNU General Public License, | 7 | * under the terms and conditions of the GNU General Public License, |
@@ -30,7 +31,6 @@ | |||
30 | #include <target/target_core_base.h> | 31 | #include <target/target_core_base.h> |
31 | #include <target/target_core_fabric.h> | 32 | #include <target/target_core_fabric.h> |
32 | #include <target/target_core_backend.h> | 33 | #include <target/target_core_backend.h> |
33 | #include <target/target_core_backend_configfs.h> | ||
34 | 34 | ||
35 | #include <linux/target_core_user.h> | 35 | #include <linux/target_core_user.h> |
36 | 36 | ||
@@ -168,6 +168,11 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) | |||
168 | tcmu_cmd->tcmu_dev = udev; | 168 | tcmu_cmd->tcmu_dev = udev; |
169 | tcmu_cmd->data_length = se_cmd->data_length; | 169 | tcmu_cmd->data_length = se_cmd->data_length; |
170 | 170 | ||
171 | if (se_cmd->se_cmd_flags & SCF_BIDI) { | ||
172 | BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents)); | ||
173 | tcmu_cmd->data_length += se_cmd->t_bidi_data_sg->length; | ||
174 | } | ||
175 | |||
171 | tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT); | 176 | tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT); |
172 | 177 | ||
173 | idr_preload(GFP_KERNEL); | 178 | idr_preload(GFP_KERNEL); |
@@ -226,9 +231,106 @@ static inline size_t head_to_end(size_t head, size_t size) | |||
226 | 231 | ||
227 | #define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size) | 232 | #define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size) |
228 | 233 | ||
234 | static void alloc_and_scatter_data_area(struct tcmu_dev *udev, | ||
235 | struct scatterlist *data_sg, unsigned int data_nents, | ||
236 | struct iovec **iov, int *iov_cnt, bool copy_data) | ||
237 | { | ||
238 | int i; | ||
239 | void *from, *to; | ||
240 | size_t copy_bytes; | ||
241 | struct scatterlist *sg; | ||
242 | |||
243 | for_each_sg(data_sg, sg, data_nents, i) { | ||
244 | copy_bytes = min_t(size_t, sg->length, | ||
245 | head_to_end(udev->data_head, udev->data_size)); | ||
246 | from = kmap_atomic(sg_page(sg)) + sg->offset; | ||
247 | to = (void *) udev->mb_addr + udev->data_off + udev->data_head; | ||
248 | |||
249 | if (copy_data) { | ||
250 | memcpy(to, from, copy_bytes); | ||
251 | tcmu_flush_dcache_range(to, copy_bytes); | ||
252 | } | ||
253 | |||
254 | /* Even iov_base is relative to mb_addr */ | ||
255 | (*iov)->iov_len = copy_bytes; | ||
256 | (*iov)->iov_base = (void __user *) udev->data_off + | ||
257 | udev->data_head; | ||
258 | (*iov_cnt)++; | ||
259 | (*iov)++; | ||
260 | |||
261 | UPDATE_HEAD(udev->data_head, copy_bytes, udev->data_size); | ||
262 | |||
263 | /* Uh oh, we wrapped the buffer. Must split sg across 2 iovs. */ | ||
264 | if (sg->length != copy_bytes) { | ||
265 | void *from_skip = from + copy_bytes; | ||
266 | |||
267 | copy_bytes = sg->length - copy_bytes; | ||
268 | |||
269 | (*iov)->iov_len = copy_bytes; | ||
270 | (*iov)->iov_base = (void __user *) udev->data_off + | ||
271 | udev->data_head; | ||
272 | |||
273 | if (copy_data) { | ||
274 | to = (void *) udev->mb_addr + | ||
275 | udev->data_off + udev->data_head; | ||
276 | memcpy(to, from_skip, copy_bytes); | ||
277 | tcmu_flush_dcache_range(to, copy_bytes); | ||
278 | } | ||
279 | |||
280 | (*iov_cnt)++; | ||
281 | (*iov)++; | ||
282 | |||
283 | UPDATE_HEAD(udev->data_head, | ||
284 | copy_bytes, udev->data_size); | ||
285 | } | ||
286 | |||
287 | kunmap_atomic(from - sg->offset); | ||
288 | } | ||
289 | } | ||
290 | |||
291 | static void gather_and_free_data_area(struct tcmu_dev *udev, | ||
292 | struct scatterlist *data_sg, unsigned int data_nents) | ||
293 | { | ||
294 | int i; | ||
295 | void *from, *to; | ||
296 | size_t copy_bytes; | ||
297 | struct scatterlist *sg; | ||
298 | |||
299 | /* It'd be easier to look at entry's iovec again, but UAM */ | ||
300 | for_each_sg(data_sg, sg, data_nents, i) { | ||
301 | copy_bytes = min_t(size_t, sg->length, | ||
302 | head_to_end(udev->data_tail, udev->data_size)); | ||
303 | |||
304 | to = kmap_atomic(sg_page(sg)) + sg->offset; | ||
305 | WARN_ON(sg->length + sg->offset > PAGE_SIZE); | ||
306 | from = (void *) udev->mb_addr + | ||
307 | udev->data_off + udev->data_tail; | ||
308 | tcmu_flush_dcache_range(from, copy_bytes); | ||
309 | memcpy(to, from, copy_bytes); | ||
310 | |||
311 | UPDATE_HEAD(udev->data_tail, copy_bytes, udev->data_size); | ||
312 | |||
313 | /* Uh oh, wrapped the data buffer for this sg's data */ | ||
314 | if (sg->length != copy_bytes) { | ||
315 | void *to_skip = to + copy_bytes; | ||
316 | |||
317 | from = (void *) udev->mb_addr + | ||
318 | udev->data_off + udev->data_tail; | ||
319 | WARN_ON(udev->data_tail); | ||
320 | copy_bytes = sg->length - copy_bytes; | ||
321 | tcmu_flush_dcache_range(from, copy_bytes); | ||
322 | memcpy(to_skip, from, copy_bytes); | ||
323 | |||
324 | UPDATE_HEAD(udev->data_tail, | ||
325 | copy_bytes, udev->data_size); | ||
326 | } | ||
327 | kunmap_atomic(to - sg->offset); | ||
328 | } | ||
329 | } | ||
330 | |||
229 | /* | 331 | /* |
230 | * We can't queue a command until we have space available on the cmd ring *and* space | 332 | * We can't queue a command until we have space available on the cmd ring *and* |
231 | * space avail on the data ring. | 333 | * space available on the data ring. |
232 | * | 334 | * |
233 | * Called with ring lock held. | 335 | * Called with ring lock held. |
234 | */ | 336 | */ |
@@ -276,12 +378,11 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) | |||
276 | size_t base_command_size, command_size; | 378 | size_t base_command_size, command_size; |
277 | struct tcmu_mailbox *mb; | 379 | struct tcmu_mailbox *mb; |
278 | struct tcmu_cmd_entry *entry; | 380 | struct tcmu_cmd_entry *entry; |
279 | int i; | ||
280 | struct scatterlist *sg; | ||
281 | struct iovec *iov; | 381 | struct iovec *iov; |
282 | int iov_cnt = 0; | 382 | int iov_cnt; |
283 | uint32_t cmd_head; | 383 | uint32_t cmd_head; |
284 | uint64_t cdb_off; | 384 | uint64_t cdb_off; |
385 | bool copy_to_data_area; | ||
285 | 386 | ||
286 | if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) | 387 | if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) |
287 | return -EINVAL; | 388 | return -EINVAL; |
@@ -294,7 +395,8 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) | |||
294 | * b/c size == offsetof one-past-element. | 395 | * b/c size == offsetof one-past-element. |
295 | */ | 396 | */ |
296 | base_command_size = max(offsetof(struct tcmu_cmd_entry, | 397 | base_command_size = max(offsetof(struct tcmu_cmd_entry, |
297 | req.iov[se_cmd->t_data_nents + 2]), | 398 | req.iov[se_cmd->t_bidi_data_nents + |
399 | se_cmd->t_data_nents + 2]), | ||
298 | sizeof(struct tcmu_cmd_entry)); | 400 | sizeof(struct tcmu_cmd_entry)); |
299 | command_size = base_command_size | 401 | command_size = base_command_size |
300 | + round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE); | 402 | + round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE); |
@@ -362,53 +464,20 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) | |||
362 | * Fix up iovecs, and handle if allocation in data ring wrapped. | 464 | * Fix up iovecs, and handle if allocation in data ring wrapped. |
363 | */ | 465 | */ |
364 | iov = &entry->req.iov[0]; | 466 | iov = &entry->req.iov[0]; |
365 | for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, i) { | 467 | iov_cnt = 0; |
366 | size_t copy_bytes = min((size_t)sg->length, | 468 | copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE |
367 | head_to_end(udev->data_head, udev->data_size)); | 469 | || se_cmd->se_cmd_flags & SCF_BIDI); |
368 | void *from = kmap_atomic(sg_page(sg)) + sg->offset; | 470 | alloc_and_scatter_data_area(udev, se_cmd->t_data_sg, |
369 | void *to = (void *) mb + udev->data_off + udev->data_head; | 471 | se_cmd->t_data_nents, &iov, &iov_cnt, copy_to_data_area); |
370 | |||
371 | if (tcmu_cmd->se_cmd->data_direction == DMA_TO_DEVICE) { | ||
372 | memcpy(to, from, copy_bytes); | ||
373 | tcmu_flush_dcache_range(to, copy_bytes); | ||
374 | } | ||
375 | |||
376 | /* Even iov_base is relative to mb_addr */ | ||
377 | iov->iov_len = copy_bytes; | ||
378 | iov->iov_base = (void __user *) udev->data_off + | ||
379 | udev->data_head; | ||
380 | iov_cnt++; | ||
381 | iov++; | ||
382 | |||
383 | UPDATE_HEAD(udev->data_head, copy_bytes, udev->data_size); | ||
384 | |||
385 | /* Uh oh, we wrapped the buffer. Must split sg across 2 iovs. */ | ||
386 | if (sg->length != copy_bytes) { | ||
387 | from += copy_bytes; | ||
388 | copy_bytes = sg->length - copy_bytes; | ||
389 | |||
390 | iov->iov_len = copy_bytes; | ||
391 | iov->iov_base = (void __user *) udev->data_off + | ||
392 | udev->data_head; | ||
393 | |||
394 | if (se_cmd->data_direction == DMA_TO_DEVICE) { | ||
395 | to = (void *) mb + udev->data_off + udev->data_head; | ||
396 | memcpy(to, from, copy_bytes); | ||
397 | tcmu_flush_dcache_range(to, copy_bytes); | ||
398 | } | ||
399 | |||
400 | iov_cnt++; | ||
401 | iov++; | ||
402 | |||
403 | UPDATE_HEAD(udev->data_head, copy_bytes, udev->data_size); | ||
404 | } | ||
405 | |||
406 | kunmap_atomic(from); | ||
407 | } | ||
408 | entry->req.iov_cnt = iov_cnt; | 472 | entry->req.iov_cnt = iov_cnt; |
409 | entry->req.iov_bidi_cnt = 0; | ||
410 | entry->req.iov_dif_cnt = 0; | 473 | entry->req.iov_dif_cnt = 0; |
411 | 474 | ||
475 | /* Handle BIDI commands */ | ||
476 | iov_cnt = 0; | ||
477 | alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg, | ||
478 | se_cmd->t_bidi_data_nents, &iov, &iov_cnt, false); | ||
479 | entry->req.iov_bidi_cnt = iov_cnt; | ||
480 | |||
412 | /* All offsets relative to mb_addr, not start of entry! */ | 481 | /* All offsets relative to mb_addr, not start of entry! */ |
413 | cdb_off = CMDR_OFF + cmd_head + base_command_size; | 482 | cdb_off = CMDR_OFF + cmd_head + base_command_size; |
414 | memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb)); | 483 | memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb)); |
@@ -481,47 +550,22 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry * | |||
481 | se_cmd->scsi_sense_length); | 550 | se_cmd->scsi_sense_length); |
482 | 551 | ||
483 | UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size); | 552 | UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size); |
484 | } | 553 | } else if (se_cmd->se_cmd_flags & SCF_BIDI) { |
485 | else if (se_cmd->data_direction == DMA_FROM_DEVICE) { | 554 | /* Discard data_out buffer */ |
486 | struct scatterlist *sg; | 555 | UPDATE_HEAD(udev->data_tail, |
487 | int i; | 556 | (size_t)se_cmd->t_data_sg->length, udev->data_size); |
488 | 557 | ||
489 | /* It'd be easier to look at entry's iovec again, but UAM */ | 558 | /* Get Data-In buffer */ |
490 | for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, i) { | 559 | gather_and_free_data_area(udev, |
491 | size_t copy_bytes; | 560 | se_cmd->t_bidi_data_sg, se_cmd->t_bidi_data_nents); |
492 | void *to; | 561 | } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { |
493 | void *from; | 562 | gather_and_free_data_area(udev, |
494 | 563 | se_cmd->t_data_sg, se_cmd->t_data_nents); | |
495 | copy_bytes = min((size_t)sg->length, | ||
496 | head_to_end(udev->data_tail, udev->data_size)); | ||
497 | |||
498 | to = kmap_atomic(sg_page(sg)) + sg->offset; | ||
499 | WARN_ON(sg->length + sg->offset > PAGE_SIZE); | ||
500 | from = (void *) udev->mb_addr + udev->data_off + udev->data_tail; | ||
501 | tcmu_flush_dcache_range(from, copy_bytes); | ||
502 | memcpy(to, from, copy_bytes); | ||
503 | |||
504 | UPDATE_HEAD(udev->data_tail, copy_bytes, udev->data_size); | ||
505 | |||
506 | /* Uh oh, wrapped the data buffer for this sg's data */ | ||
507 | if (sg->length != copy_bytes) { | ||
508 | from = (void *) udev->mb_addr + udev->data_off + udev->data_tail; | ||
509 | WARN_ON(udev->data_tail); | ||
510 | to += copy_bytes; | ||
511 | copy_bytes = sg->length - copy_bytes; | ||
512 | tcmu_flush_dcache_range(from, copy_bytes); | ||
513 | memcpy(to, from, copy_bytes); | ||
514 | |||
515 | UPDATE_HEAD(udev->data_tail, copy_bytes, udev->data_size); | ||
516 | } | ||
517 | |||
518 | kunmap_atomic(to); | ||
519 | } | ||
520 | |||
521 | } else if (se_cmd->data_direction == DMA_TO_DEVICE) { | 564 | } else if (se_cmd->data_direction == DMA_TO_DEVICE) { |
522 | UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size); | 565 | UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size); |
523 | } else { | 566 | } else if (se_cmd->data_direction != DMA_NONE) { |
524 | pr_warn("TCMU: data direction was %d!\n", se_cmd->data_direction); | 567 | pr_warn("TCMU: data direction was %d!\n", |
568 | se_cmd->data_direction); | ||
525 | } | 569 | } |
526 | 570 | ||
527 | target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status); | 571 | target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status); |
@@ -910,6 +954,14 @@ static int tcmu_check_pending_cmd(int id, void *p, void *data) | |||
910 | return -EINVAL; | 954 | return -EINVAL; |
911 | } | 955 | } |
912 | 956 | ||
957 | static void tcmu_dev_call_rcu(struct rcu_head *p) | ||
958 | { | ||
959 | struct se_device *dev = container_of(p, struct se_device, rcu_head); | ||
960 | struct tcmu_dev *udev = TCMU_DEV(dev); | ||
961 | |||
962 | kfree(udev); | ||
963 | } | ||
964 | |||
913 | static void tcmu_free_device(struct se_device *dev) | 965 | static void tcmu_free_device(struct se_device *dev) |
914 | { | 966 | { |
915 | struct tcmu_dev *udev = TCMU_DEV(dev); | 967 | struct tcmu_dev *udev = TCMU_DEV(dev); |
@@ -935,8 +987,7 @@ static void tcmu_free_device(struct se_device *dev) | |||
935 | kfree(udev->uio_info.name); | 987 | kfree(udev->uio_info.name); |
936 | kfree(udev->name); | 988 | kfree(udev->name); |
937 | } | 989 | } |
938 | 990 | call_rcu(&dev->rcu_head, tcmu_dev_call_rcu); | |
939 | kfree(udev); | ||
940 | } | 991 | } |
941 | 992 | ||
942 | enum { | 993 | enum { |
@@ -1054,27 +1105,7 @@ tcmu_parse_cdb(struct se_cmd *cmd) | |||
1054 | return passthrough_parse_cdb(cmd, tcmu_pass_op); | 1105 | return passthrough_parse_cdb(cmd, tcmu_pass_op); |
1055 | } | 1106 | } |
1056 | 1107 | ||
1057 | DEF_TB_DEV_ATTRIB_RO(tcmu, hw_pi_prot_type); | 1108 | static const struct target_backend_ops tcmu_ops = { |
1058 | TB_DEV_ATTR_RO(tcmu, hw_pi_prot_type); | ||
1059 | |||
1060 | DEF_TB_DEV_ATTRIB_RO(tcmu, hw_block_size); | ||
1061 | TB_DEV_ATTR_RO(tcmu, hw_block_size); | ||
1062 | |||
1063 | DEF_TB_DEV_ATTRIB_RO(tcmu, hw_max_sectors); | ||
1064 | TB_DEV_ATTR_RO(tcmu, hw_max_sectors); | ||
1065 | |||
1066 | DEF_TB_DEV_ATTRIB_RO(tcmu, hw_queue_depth); | ||
1067 | TB_DEV_ATTR_RO(tcmu, hw_queue_depth); | ||
1068 | |||
1069 | static struct configfs_attribute *tcmu_backend_dev_attrs[] = { | ||
1070 | &tcmu_dev_attrib_hw_pi_prot_type.attr, | ||
1071 | &tcmu_dev_attrib_hw_block_size.attr, | ||
1072 | &tcmu_dev_attrib_hw_max_sectors.attr, | ||
1073 | &tcmu_dev_attrib_hw_queue_depth.attr, | ||
1074 | NULL, | ||
1075 | }; | ||
1076 | |||
1077 | static struct se_subsystem_api tcmu_template = { | ||
1078 | .name = "user", | 1109 | .name = "user", |
1079 | .inquiry_prod = "USER", | 1110 | .inquiry_prod = "USER", |
1080 | .inquiry_rev = TCMU_VERSION, | 1111 | .inquiry_rev = TCMU_VERSION, |
@@ -1090,11 +1121,11 @@ static struct se_subsystem_api tcmu_template = { | |||
1090 | .show_configfs_dev_params = tcmu_show_configfs_dev_params, | 1121 | .show_configfs_dev_params = tcmu_show_configfs_dev_params, |
1091 | .get_device_type = sbc_get_device_type, | 1122 | .get_device_type = sbc_get_device_type, |
1092 | .get_blocks = tcmu_get_blocks, | 1123 | .get_blocks = tcmu_get_blocks, |
1124 | .tb_dev_attrib_attrs = passthrough_attrib_attrs, | ||
1093 | }; | 1125 | }; |
1094 | 1126 | ||
1095 | static int __init tcmu_module_init(void) | 1127 | static int __init tcmu_module_init(void) |
1096 | { | 1128 | { |
1097 | struct target_backend_cits *tbc = &tcmu_template.tb_cits; | ||
1098 | int ret; | 1129 | int ret; |
1099 | 1130 | ||
1100 | BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); | 1131 | BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); |
@@ -1117,10 +1148,7 @@ static int __init tcmu_module_init(void) | |||
1117 | goto out_unreg_device; | 1148 | goto out_unreg_device; |
1118 | } | 1149 | } |
1119 | 1150 | ||
1120 | target_core_setup_sub_cits(&tcmu_template); | 1151 | ret = transport_backend_register(&tcmu_ops); |
1121 | tbc->tb_dev_attrib_cit.ct_attrs = tcmu_backend_dev_attrs; | ||
1122 | |||
1123 | ret = transport_subsystem_register(&tcmu_template); | ||
1124 | if (ret) | 1152 | if (ret) |
1125 | goto out_unreg_genl; | 1153 | goto out_unreg_genl; |
1126 | 1154 | ||
@@ -1138,7 +1166,7 @@ out_free_cache: | |||
1138 | 1166 | ||
1139 | static void __exit tcmu_module_exit(void) | 1167 | static void __exit tcmu_module_exit(void) |
1140 | { | 1168 | { |
1141 | transport_subsystem_release(&tcmu_template); | 1169 | target_backend_unregister(&tcmu_ops); |
1142 | genl_unregister_family(&tcmu_genl_family); | 1170 | genl_unregister_family(&tcmu_genl_family); |
1143 | root_device_unregister(tcmu_root_device); | 1171 | root_device_unregister(tcmu_root_device); |
1144 | kmem_cache_destroy(tcmu_cmd_cache); | 1172 | kmem_cache_destroy(tcmu_cmd_cache); |
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c index 5ec0d00edaa3..4515f52546f8 100644 --- a/drivers/target/target_core_xcopy.c +++ b/drivers/target/target_core_xcopy.c | |||
@@ -31,7 +31,6 @@ | |||
31 | #include <target/target_core_base.h> | 31 | #include <target/target_core_base.h> |
32 | #include <target/target_core_backend.h> | 32 | #include <target/target_core_backend.h> |
33 | #include <target/target_core_fabric.h> | 33 | #include <target/target_core_fabric.h> |
34 | #include <target/target_core_configfs.h> | ||
35 | 34 | ||
36 | #include "target_core_internal.h" | 35 | #include "target_core_internal.h" |
37 | #include "target_core_pr.h" | 36 | #include "target_core_pr.h" |
@@ -348,8 +347,7 @@ struct xcopy_pt_cmd { | |||
348 | unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER]; | 347 | unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER]; |
349 | }; | 348 | }; |
350 | 349 | ||
351 | static struct se_port xcopy_pt_port; | 350 | struct se_portal_group xcopy_pt_tpg; |
352 | static struct se_portal_group xcopy_pt_tpg; | ||
353 | static struct se_session xcopy_pt_sess; | 351 | static struct se_session xcopy_pt_sess; |
354 | static struct se_node_acl xcopy_pt_nacl; | 352 | static struct se_node_acl xcopy_pt_nacl; |
355 | 353 | ||
@@ -358,11 +356,6 @@ static char *xcopy_pt_get_fabric_name(void) | |||
358 | return "xcopy-pt"; | 356 | return "xcopy-pt"; |
359 | } | 357 | } |
360 | 358 | ||
361 | static u32 xcopy_pt_get_tag(struct se_cmd *se_cmd) | ||
362 | { | ||
363 | return 0; | ||
364 | } | ||
365 | |||
366 | static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd) | 359 | static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd) |
367 | { | 360 | { |
368 | return 0; | 361 | return 0; |
@@ -423,7 +416,6 @@ static int xcopy_pt_queue_status(struct se_cmd *se_cmd) | |||
423 | 416 | ||
424 | static const struct target_core_fabric_ops xcopy_pt_tfo = { | 417 | static const struct target_core_fabric_ops xcopy_pt_tfo = { |
425 | .get_fabric_name = xcopy_pt_get_fabric_name, | 418 | .get_fabric_name = xcopy_pt_get_fabric_name, |
426 | .get_task_tag = xcopy_pt_get_tag, | ||
427 | .get_cmd_state = xcopy_pt_get_cmd_state, | 419 | .get_cmd_state = xcopy_pt_get_cmd_state, |
428 | .release_cmd = xcopy_pt_release_cmd, | 420 | .release_cmd = xcopy_pt_release_cmd, |
429 | .check_stop_free = xcopy_pt_check_stop_free, | 421 | .check_stop_free = xcopy_pt_check_stop_free, |
@@ -445,17 +437,11 @@ int target_xcopy_setup_pt(void) | |||
445 | return -ENOMEM; | 437 | return -ENOMEM; |
446 | } | 438 | } |
447 | 439 | ||
448 | memset(&xcopy_pt_port, 0, sizeof(struct se_port)); | ||
449 | INIT_LIST_HEAD(&xcopy_pt_port.sep_alua_list); | ||
450 | INIT_LIST_HEAD(&xcopy_pt_port.sep_list); | ||
451 | mutex_init(&xcopy_pt_port.sep_tg_pt_md_mutex); | ||
452 | |||
453 | memset(&xcopy_pt_tpg, 0, sizeof(struct se_portal_group)); | 440 | memset(&xcopy_pt_tpg, 0, sizeof(struct se_portal_group)); |
454 | INIT_LIST_HEAD(&xcopy_pt_tpg.se_tpg_node); | 441 | INIT_LIST_HEAD(&xcopy_pt_tpg.se_tpg_node); |
455 | INIT_LIST_HEAD(&xcopy_pt_tpg.acl_node_list); | 442 | INIT_LIST_HEAD(&xcopy_pt_tpg.acl_node_list); |
456 | INIT_LIST_HEAD(&xcopy_pt_tpg.tpg_sess_list); | 443 | INIT_LIST_HEAD(&xcopy_pt_tpg.tpg_sess_list); |
457 | 444 | ||
458 | xcopy_pt_port.sep_tpg = &xcopy_pt_tpg; | ||
459 | xcopy_pt_tpg.se_tpg_tfo = &xcopy_pt_tfo; | 445 | xcopy_pt_tpg.se_tpg_tfo = &xcopy_pt_tfo; |
460 | 446 | ||
461 | memset(&xcopy_pt_nacl, 0, sizeof(struct se_node_acl)); | 447 | memset(&xcopy_pt_nacl, 0, sizeof(struct se_node_acl)); |
@@ -496,10 +482,6 @@ static void target_xcopy_setup_pt_port( | |||
496 | */ | 482 | */ |
497 | if (remote_port) { | 483 | if (remote_port) { |
498 | xpt_cmd->remote_port = remote_port; | 484 | xpt_cmd->remote_port = remote_port; |
499 | pt_cmd->se_lun->lun_sep = &xcopy_pt_port; | ||
500 | pr_debug("Setup emulated remote DEST xcopy_pt_port: %p to" | ||
501 | " cmd->se_lun->lun_sep for X-COPY data PUSH\n", | ||
502 | pt_cmd->se_lun->lun_sep); | ||
503 | } else { | 485 | } else { |
504 | pt_cmd->se_lun = ec_cmd->se_lun; | 486 | pt_cmd->se_lun = ec_cmd->se_lun; |
505 | pt_cmd->se_dev = ec_cmd->se_dev; | 487 | pt_cmd->se_dev = ec_cmd->se_dev; |
@@ -519,10 +501,6 @@ static void target_xcopy_setup_pt_port( | |||
519 | */ | 501 | */ |
520 | if (remote_port) { | 502 | if (remote_port) { |
521 | xpt_cmd->remote_port = remote_port; | 503 | xpt_cmd->remote_port = remote_port; |
522 | pt_cmd->se_lun->lun_sep = &xcopy_pt_port; | ||
523 | pr_debug("Setup emulated remote SRC xcopy_pt_port: %p to" | ||
524 | " cmd->se_lun->lun_sep for X-COPY data PULL\n", | ||
525 | pt_cmd->se_lun->lun_sep); | ||
526 | } else { | 504 | } else { |
527 | pt_cmd->se_lun = ec_cmd->se_lun; | 505 | pt_cmd->se_lun = ec_cmd->se_lun; |
528 | pt_cmd->se_dev = ec_cmd->se_dev; | 506 | pt_cmd->se_dev = ec_cmd->se_dev; |
@@ -574,6 +552,7 @@ static int target_xcopy_setup_pt_cmd( | |||
574 | xpt_cmd->xcopy_op = xop; | 552 | xpt_cmd->xcopy_op = xop; |
575 | target_xcopy_setup_pt_port(xpt_cmd, xop, remote_port); | 553 | target_xcopy_setup_pt_port(xpt_cmd, xop, remote_port); |
576 | 554 | ||
555 | cmd->tag = 0; | ||
577 | sense_rc = target_setup_cmd_from_cdb(cmd, cdb); | 556 | sense_rc = target_setup_cmd_from_cdb(cmd, cdb); |
578 | if (sense_rc) { | 557 | if (sense_rc) { |
579 | ret = -EINVAL; | 558 | ret = -EINVAL; |
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h index 881deb3d499a..39909dadef3e 100644 --- a/drivers/target/tcm_fc/tcm_fc.h +++ b/drivers/target/tcm_fc/tcm_fc.h | |||
@@ -80,8 +80,8 @@ struct ft_node_auth { | |||
80 | * Node ACL for FC remote port session. | 80 | * Node ACL for FC remote port session. |
81 | */ | 81 | */ |
82 | struct ft_node_acl { | 82 | struct ft_node_acl { |
83 | struct ft_node_auth node_auth; | ||
84 | struct se_node_acl se_node_acl; | 83 | struct se_node_acl se_node_acl; |
84 | struct ft_node_auth node_auth; | ||
85 | }; | 85 | }; |
86 | 86 | ||
87 | struct ft_lun { | 87 | struct ft_lun { |
@@ -157,7 +157,6 @@ int ft_queue_status(struct se_cmd *); | |||
157 | int ft_queue_data_in(struct se_cmd *); | 157 | int ft_queue_data_in(struct se_cmd *); |
158 | int ft_write_pending(struct se_cmd *); | 158 | int ft_write_pending(struct se_cmd *); |
159 | int ft_write_pending_status(struct se_cmd *); | 159 | int ft_write_pending_status(struct se_cmd *); |
160 | u32 ft_get_task_tag(struct se_cmd *); | ||
161 | int ft_get_cmd_state(struct se_cmd *); | 160 | int ft_get_cmd_state(struct se_cmd *); |
162 | void ft_queue_tm_resp(struct se_cmd *); | 161 | void ft_queue_tm_resp(struct se_cmd *); |
163 | void ft_aborted_task(struct se_cmd *); | 162 | void ft_aborted_task(struct se_cmd *); |
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 1bf78e7c994c..68031723e5be 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c | |||
@@ -36,7 +36,6 @@ | |||
36 | 36 | ||
37 | #include <target/target_core_base.h> | 37 | #include <target/target_core_base.h> |
38 | #include <target/target_core_fabric.h> | 38 | #include <target/target_core_fabric.h> |
39 | #include <target/target_core_configfs.h> | ||
40 | #include <target/configfs_macros.h> | 39 | #include <target/configfs_macros.h> |
41 | 40 | ||
42 | #include "tcm_fc.h" | 41 | #include "tcm_fc.h" |
@@ -243,15 +242,6 @@ int ft_write_pending(struct se_cmd *se_cmd) | |||
243 | return 0; | 242 | return 0; |
244 | } | 243 | } |
245 | 244 | ||
246 | u32 ft_get_task_tag(struct se_cmd *se_cmd) | ||
247 | { | ||
248 | struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); | ||
249 | |||
250 | if (cmd->aborted) | ||
251 | return ~0; | ||
252 | return fc_seq_exch(cmd->seq)->rxid; | ||
253 | } | ||
254 | |||
255 | int ft_get_cmd_state(struct se_cmd *se_cmd) | 245 | int ft_get_cmd_state(struct se_cmd *se_cmd) |
256 | { | 246 | { |
257 | return 0; | 247 | return 0; |
@@ -564,6 +554,7 @@ static void ft_send_work(struct work_struct *work) | |||
564 | } | 554 | } |
565 | 555 | ||
566 | fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd); | 556 | fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd); |
557 | cmd->se_cmd.tag = fc_seq_exch(cmd->seq)->rxid; | ||
567 | /* | 558 | /* |
568 | * Use a single se_cmd->cmd_kref as we expect to release se_cmd | 559 | * Use a single se_cmd->cmd_kref as we expect to release se_cmd |
569 | * directly from ft_check_stop_free callback in response path. | 560 | * directly from ft_check_stop_free callback in response path. |
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index 86b699b94c7b..16670933013b 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c | |||
@@ -39,13 +39,10 @@ | |||
39 | #include <target/target_core_base.h> | 39 | #include <target/target_core_base.h> |
40 | #include <target/target_core_fabric.h> | 40 | #include <target/target_core_fabric.h> |
41 | #include <target/target_core_fabric_configfs.h> | 41 | #include <target/target_core_fabric_configfs.h> |
42 | #include <target/target_core_configfs.h> | ||
43 | #include <target/configfs_macros.h> | 42 | #include <target/configfs_macros.h> |
44 | 43 | ||
45 | #include "tcm_fc.h" | 44 | #include "tcm_fc.h" |
46 | 45 | ||
47 | static const struct target_core_fabric_ops ft_fabric_ops; | ||
48 | |||
49 | static LIST_HEAD(ft_wwn_list); | 46 | static LIST_HEAD(ft_wwn_list); |
50 | DEFINE_MUTEX(ft_lport_lock); | 47 | DEFINE_MUTEX(ft_lport_lock); |
51 | 48 | ||
@@ -194,48 +191,17 @@ static struct configfs_attribute *ft_nacl_base_attrs[] = { | |||
194 | * Add ACL for an initiator. The ACL is named arbitrarily. | 191 | * Add ACL for an initiator. The ACL is named arbitrarily. |
195 | * The port_name and/or node_name are attributes. | 192 | * The port_name and/or node_name are attributes. |
196 | */ | 193 | */ |
197 | static struct se_node_acl *ft_add_acl( | 194 | static int ft_init_nodeacl(struct se_node_acl *nacl, const char *name) |
198 | struct se_portal_group *se_tpg, | ||
199 | struct config_group *group, | ||
200 | const char *name) | ||
201 | { | 195 | { |
202 | struct ft_node_acl *acl; | 196 | struct ft_node_acl *acl = |
203 | struct ft_tpg *tpg; | 197 | container_of(nacl, struct ft_node_acl, se_node_acl); |
204 | u64 wwpn; | 198 | u64 wwpn; |
205 | u32 q_depth; | ||
206 | |||
207 | pr_debug("add acl %s\n", name); | ||
208 | tpg = container_of(se_tpg, struct ft_tpg, se_tpg); | ||
209 | 199 | ||
210 | if (ft_parse_wwn(name, &wwpn, 1) < 0) | 200 | if (ft_parse_wwn(name, &wwpn, 1) < 0) |
211 | return ERR_PTR(-EINVAL); | 201 | return -EINVAL; |
212 | 202 | ||
213 | acl = kzalloc(sizeof(struct ft_node_acl), GFP_KERNEL); | ||
214 | if (!acl) | ||
215 | return ERR_PTR(-ENOMEM); | ||
216 | acl->node_auth.port_name = wwpn; | 203 | acl->node_auth.port_name = wwpn; |
217 | 204 | return 0; | |
218 | q_depth = 32; /* XXX bogus default - get from tpg? */ | ||
219 | return core_tpg_add_initiator_node_acl(&tpg->se_tpg, | ||
220 | &acl->se_node_acl, name, q_depth); | ||
221 | } | ||
222 | |||
223 | static void ft_del_acl(struct se_node_acl *se_acl) | ||
224 | { | ||
225 | struct se_portal_group *se_tpg = se_acl->se_tpg; | ||
226 | struct ft_tpg *tpg; | ||
227 | struct ft_node_acl *acl = container_of(se_acl, | ||
228 | struct ft_node_acl, se_node_acl); | ||
229 | |||
230 | pr_debug("del acl %s\n", | ||
231 | config_item_name(&se_acl->acl_group.cg_item)); | ||
232 | |||
233 | tpg = container_of(se_tpg, struct ft_tpg, se_tpg); | ||
234 | pr_debug("del acl %p se_acl %p tpg %p se_tpg %p\n", | ||
235 | acl, se_acl, tpg, &tpg->se_tpg); | ||
236 | |||
237 | core_tpg_del_initiator_node_acl(&tpg->se_tpg, se_acl, 1); | ||
238 | kfree(acl); | ||
239 | } | 205 | } |
240 | 206 | ||
241 | struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata) | 207 | struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata) |
@@ -245,7 +211,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata) | |||
245 | struct se_portal_group *se_tpg = &tpg->se_tpg; | 211 | struct se_portal_group *se_tpg = &tpg->se_tpg; |
246 | struct se_node_acl *se_acl; | 212 | struct se_node_acl *se_acl; |
247 | 213 | ||
248 | spin_lock_irq(&se_tpg->acl_node_lock); | 214 | mutex_lock(&se_tpg->acl_node_mutex); |
249 | list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) { | 215 | list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) { |
250 | acl = container_of(se_acl, struct ft_node_acl, se_node_acl); | 216 | acl = container_of(se_acl, struct ft_node_acl, se_node_acl); |
251 | pr_debug("acl %p port_name %llx\n", | 217 | pr_debug("acl %p port_name %llx\n", |
@@ -259,33 +225,10 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata) | |||
259 | break; | 225 | break; |
260 | } | 226 | } |
261 | } | 227 | } |
262 | spin_unlock_irq(&se_tpg->acl_node_lock); | 228 | mutex_unlock(&se_tpg->acl_node_mutex); |
263 | return found; | 229 | return found; |
264 | } | 230 | } |
265 | 231 | ||
266 | static struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg) | ||
267 | { | ||
268 | struct ft_node_acl *acl; | ||
269 | |||
270 | acl = kzalloc(sizeof(*acl), GFP_KERNEL); | ||
271 | if (!acl) { | ||
272 | pr_err("Unable to allocate struct ft_node_acl\n"); | ||
273 | return NULL; | ||
274 | } | ||
275 | pr_debug("acl %p\n", acl); | ||
276 | return &acl->se_node_acl; | ||
277 | } | ||
278 | |||
279 | static void ft_tpg_release_fabric_acl(struct se_portal_group *se_tpg, | ||
280 | struct se_node_acl *se_acl) | ||
281 | { | ||
282 | struct ft_node_acl *acl = container_of(se_acl, | ||
283 | struct ft_node_acl, se_node_acl); | ||
284 | |||
285 | pr_debug("acl %p\n", acl); | ||
286 | kfree(acl); | ||
287 | } | ||
288 | |||
289 | /* | 232 | /* |
290 | * local_port port_group (tpg) ops. | 233 | * local_port port_group (tpg) ops. |
291 | */ | 234 | */ |
@@ -333,8 +276,7 @@ static struct se_portal_group *ft_add_tpg( | |||
333 | return NULL; | 276 | return NULL; |
334 | } | 277 | } |
335 | 278 | ||
336 | ret = core_tpg_register(&ft_fabric_ops, wwn, &tpg->se_tpg, | 279 | ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP); |
337 | tpg, TRANSPORT_TPG_TYPE_NORMAL); | ||
338 | if (ret < 0) { | 280 | if (ret < 0) { |
339 | destroy_workqueue(wq); | 281 | destroy_workqueue(wq); |
340 | kfree(tpg); | 282 | kfree(tpg); |
@@ -459,6 +401,11 @@ static struct configfs_attribute *ft_wwn_attrs[] = { | |||
459 | NULL, | 401 | NULL, |
460 | }; | 402 | }; |
461 | 403 | ||
404 | static inline struct ft_tpg *ft_tpg(struct se_portal_group *se_tpg) | ||
405 | { | ||
406 | return container_of(se_tpg, struct ft_tpg, se_tpg); | ||
407 | } | ||
408 | |||
462 | static char *ft_get_fabric_name(void) | 409 | static char *ft_get_fabric_name(void) |
463 | { | 410 | { |
464 | return "fc"; | 411 | return "fc"; |
@@ -466,25 +413,16 @@ static char *ft_get_fabric_name(void) | |||
466 | 413 | ||
467 | static char *ft_get_fabric_wwn(struct se_portal_group *se_tpg) | 414 | static char *ft_get_fabric_wwn(struct se_portal_group *se_tpg) |
468 | { | 415 | { |
469 | struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr; | 416 | return ft_tpg(se_tpg)->lport_wwn->name; |
470 | |||
471 | return tpg->lport_wwn->name; | ||
472 | } | 417 | } |
473 | 418 | ||
474 | static u16 ft_get_tag(struct se_portal_group *se_tpg) | 419 | static u16 ft_get_tag(struct se_portal_group *se_tpg) |
475 | { | 420 | { |
476 | struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr; | ||
477 | |||
478 | /* | 421 | /* |
479 | * This tag is used when forming SCSI Name identifier in EVPD=1 0x83 | 422 | * This tag is used when forming SCSI Name identifier in EVPD=1 0x83 |
480 | * to represent the SCSI Target Port. | 423 | * to represent the SCSI Target Port. |
481 | */ | 424 | */ |
482 | return tpg->index; | 425 | return ft_tpg(se_tpg)->index; |
483 | } | ||
484 | |||
485 | static u32 ft_get_default_depth(struct se_portal_group *se_tpg) | ||
486 | { | ||
487 | return 1; | ||
488 | } | 426 | } |
489 | 427 | ||
490 | static int ft_check_false(struct se_portal_group *se_tpg) | 428 | static int ft_check_false(struct se_portal_group *se_tpg) |
@@ -498,28 +436,20 @@ static void ft_set_default_node_attr(struct se_node_acl *se_nacl) | |||
498 | 436 | ||
499 | static u32 ft_tpg_get_inst_index(struct se_portal_group *se_tpg) | 437 | static u32 ft_tpg_get_inst_index(struct se_portal_group *se_tpg) |
500 | { | 438 | { |
501 | struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr; | 439 | return ft_tpg(se_tpg)->index; |
502 | |||
503 | return tpg->index; | ||
504 | } | 440 | } |
505 | 441 | ||
506 | static const struct target_core_fabric_ops ft_fabric_ops = { | 442 | static const struct target_core_fabric_ops ft_fabric_ops = { |
507 | .module = THIS_MODULE, | 443 | .module = THIS_MODULE, |
508 | .name = "fc", | 444 | .name = "fc", |
445 | .node_acl_size = sizeof(struct ft_node_acl), | ||
509 | .get_fabric_name = ft_get_fabric_name, | 446 | .get_fabric_name = ft_get_fabric_name, |
510 | .get_fabric_proto_ident = fc_get_fabric_proto_ident, | ||
511 | .tpg_get_wwn = ft_get_fabric_wwn, | 447 | .tpg_get_wwn = ft_get_fabric_wwn, |
512 | .tpg_get_tag = ft_get_tag, | 448 | .tpg_get_tag = ft_get_tag, |
513 | .tpg_get_default_depth = ft_get_default_depth, | ||
514 | .tpg_get_pr_transport_id = fc_get_pr_transport_id, | ||
515 | .tpg_get_pr_transport_id_len = fc_get_pr_transport_id_len, | ||
516 | .tpg_parse_pr_out_transport_id = fc_parse_pr_out_transport_id, | ||
517 | .tpg_check_demo_mode = ft_check_false, | 449 | .tpg_check_demo_mode = ft_check_false, |
518 | .tpg_check_demo_mode_cache = ft_check_false, | 450 | .tpg_check_demo_mode_cache = ft_check_false, |
519 | .tpg_check_demo_mode_write_protect = ft_check_false, | 451 | .tpg_check_demo_mode_write_protect = ft_check_false, |
520 | .tpg_check_prod_mode_write_protect = ft_check_false, | 452 | .tpg_check_prod_mode_write_protect = ft_check_false, |
521 | .tpg_alloc_fabric_acl = ft_tpg_alloc_fabric_acl, | ||
522 | .tpg_release_fabric_acl = ft_tpg_release_fabric_acl, | ||
523 | .tpg_get_inst_index = ft_tpg_get_inst_index, | 453 | .tpg_get_inst_index = ft_tpg_get_inst_index, |
524 | .check_stop_free = ft_check_stop_free, | 454 | .check_stop_free = ft_check_stop_free, |
525 | .release_cmd = ft_release_cmd, | 455 | .release_cmd = ft_release_cmd, |
@@ -530,7 +460,6 @@ static const struct target_core_fabric_ops ft_fabric_ops = { | |||
530 | .write_pending = ft_write_pending, | 460 | .write_pending = ft_write_pending, |
531 | .write_pending_status = ft_write_pending_status, | 461 | .write_pending_status = ft_write_pending_status, |
532 | .set_default_node_attributes = ft_set_default_node_attr, | 462 | .set_default_node_attributes = ft_set_default_node_attr, |
533 | .get_task_tag = ft_get_task_tag, | ||
534 | .get_cmd_state = ft_get_cmd_state, | 463 | .get_cmd_state = ft_get_cmd_state, |
535 | .queue_data_in = ft_queue_data_in, | 464 | .queue_data_in = ft_queue_data_in, |
536 | .queue_status = ft_queue_status, | 465 | .queue_status = ft_queue_status, |
@@ -544,12 +473,7 @@ static const struct target_core_fabric_ops ft_fabric_ops = { | |||
544 | .fabric_drop_wwn = &ft_del_wwn, | 473 | .fabric_drop_wwn = &ft_del_wwn, |
545 | .fabric_make_tpg = &ft_add_tpg, | 474 | .fabric_make_tpg = &ft_add_tpg, |
546 | .fabric_drop_tpg = &ft_del_tpg, | 475 | .fabric_drop_tpg = &ft_del_tpg, |
547 | .fabric_post_link = NULL, | 476 | .fabric_init_nodeacl = &ft_init_nodeacl, |
548 | .fabric_pre_unlink = NULL, | ||
549 | .fabric_make_np = NULL, | ||
550 | .fabric_drop_np = NULL, | ||
551 | .fabric_make_nodeacl = &ft_add_acl, | ||
552 | .fabric_drop_nodeacl = &ft_del_acl, | ||
553 | 477 | ||
554 | .tfc_wwn_attrs = ft_wwn_attrs, | 478 | .tfc_wwn_attrs = ft_wwn_attrs, |
555 | .tfc_tpg_nacl_base_attrs = ft_nacl_base_attrs, | 479 | .tfc_tpg_nacl_base_attrs = ft_nacl_base_attrs, |
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c index fe585d1cce23..4b0fedd6bd4b 100644 --- a/drivers/target/tcm_fc/tfc_io.c +++ b/drivers/target/tcm_fc/tfc_io.c | |||
@@ -44,7 +44,6 @@ | |||
44 | 44 | ||
45 | #include <target/target_core_base.h> | 45 | #include <target/target_core_base.h> |
46 | #include <target/target_core_fabric.h> | 46 | #include <target/target_core_fabric.h> |
47 | #include <target/target_core_configfs.h> | ||
48 | #include <target/configfs_macros.h> | 47 | #include <target/configfs_macros.h> |
49 | 48 | ||
50 | #include "tcm_fc.h" | 49 | #include "tcm_fc.h" |
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c index f2a616d4f2c4..31a9e3fb98c5 100644 --- a/drivers/target/tcm_fc/tfc_sess.c +++ b/drivers/target/tcm_fc/tfc_sess.c | |||
@@ -36,7 +36,6 @@ | |||
36 | 36 | ||
37 | #include <target/target_core_base.h> | 37 | #include <target/target_core_base.h> |
38 | #include <target/target_core_fabric.h> | 38 | #include <target/target_core_fabric.h> |
39 | #include <target/target_core_configfs.h> | ||
40 | #include <target/configfs_macros.h> | 39 | #include <target/configfs_macros.h> |
41 | 40 | ||
42 | #include "tcm_fc.h" | 41 | #include "tcm_fc.h" |
diff --git a/drivers/usb/gadget/legacy/tcm_usb_gadget.c b/drivers/usb/gadget/legacy/tcm_usb_gadget.c index 6ce932f90ef8..c3c48088fced 100644 --- a/drivers/usb/gadget/legacy/tcm_usb_gadget.c +++ b/drivers/usb/gadget/legacy/tcm_usb_gadget.c | |||
@@ -20,7 +20,6 @@ | |||
20 | #include <target/target_core_base.h> | 20 | #include <target/target_core_base.h> |
21 | #include <target/target_core_fabric.h> | 21 | #include <target/target_core_fabric.h> |
22 | #include <target/target_core_fabric_configfs.h> | 22 | #include <target/target_core_fabric_configfs.h> |
23 | #include <target/target_core_configfs.h> | ||
24 | #include <target/configfs_macros.h> | 23 | #include <target/configfs_macros.h> |
25 | #include <asm/unaligned.h> | 24 | #include <asm/unaligned.h> |
26 | 25 | ||
@@ -28,8 +27,6 @@ | |||
28 | 27 | ||
29 | USB_GADGET_COMPOSITE_OPTIONS(); | 28 | USB_GADGET_COMPOSITE_OPTIONS(); |
30 | 29 | ||
31 | static const struct target_core_fabric_ops usbg_ops; | ||
32 | |||
33 | static inline struct f_uas *to_f_uas(struct usb_function *f) | 30 | static inline struct f_uas *to_f_uas(struct usb_function *f) |
34 | { | 31 | { |
35 | return container_of(f, struct f_uas, function); | 32 | return container_of(f, struct f_uas, function); |
@@ -1111,6 +1108,7 @@ static int usbg_submit_command(struct f_uas *fu, | |||
1111 | memcpy(cmd->cmd_buf, cmd_iu->cdb, cmd_len); | 1108 | memcpy(cmd->cmd_buf, cmd_iu->cdb, cmd_len); |
1112 | 1109 | ||
1113 | cmd->tag = be16_to_cpup(&cmd_iu->tag); | 1110 | cmd->tag = be16_to_cpup(&cmd_iu->tag); |
1111 | cmd->se_cmd.tag = cmd->tag; | ||
1114 | if (fu->flags & USBG_USE_STREAMS) { | 1112 | if (fu->flags & USBG_USE_STREAMS) { |
1115 | if (cmd->tag > UASP_SS_EP_COMP_NUM_STREAMS) | 1113 | if (cmd->tag > UASP_SS_EP_COMP_NUM_STREAMS) |
1116 | goto err; | 1114 | goto err; |
@@ -1244,6 +1242,7 @@ static int bot_submit_command(struct f_uas *fu, | |||
1244 | cmd->unpacked_lun = cbw->Lun; | 1242 | cmd->unpacked_lun = cbw->Lun; |
1245 | cmd->is_read = cbw->Flags & US_BULK_FLAG_IN ? 1 : 0; | 1243 | cmd->is_read = cbw->Flags & US_BULK_FLAG_IN ? 1 : 0; |
1246 | cmd->data_len = le32_to_cpu(cbw->DataTransferLength); | 1244 | cmd->data_len = le32_to_cpu(cbw->DataTransferLength); |
1245 | cmd->se_cmd.tag = le32_to_cpu(cmd->bot_tag); | ||
1247 | 1246 | ||
1248 | INIT_WORK(&cmd->work, bot_cmd_work); | 1247 | INIT_WORK(&cmd->work, bot_cmd_work); |
1249 | ret = queue_work(tpg->workqueue, &cmd->work); | 1248 | ret = queue_work(tpg->workqueue, &cmd->work); |
@@ -1273,23 +1272,6 @@ static char *usbg_get_fabric_name(void) | |||
1273 | return "usb_gadget"; | 1272 | return "usb_gadget"; |
1274 | } | 1273 | } |
1275 | 1274 | ||
1276 | static u8 usbg_get_fabric_proto_ident(struct se_portal_group *se_tpg) | ||
1277 | { | ||
1278 | struct usbg_tpg *tpg = container_of(se_tpg, | ||
1279 | struct usbg_tpg, se_tpg); | ||
1280 | struct usbg_tport *tport = tpg->tport; | ||
1281 | u8 proto_id; | ||
1282 | |||
1283 | switch (tport->tport_proto_id) { | ||
1284 | case SCSI_PROTOCOL_SAS: | ||
1285 | default: | ||
1286 | proto_id = sas_get_fabric_proto_ident(se_tpg); | ||
1287 | break; | ||
1288 | } | ||
1289 | |||
1290 | return proto_id; | ||
1291 | } | ||
1292 | |||
1293 | static char *usbg_get_fabric_wwn(struct se_portal_group *se_tpg) | 1275 | static char *usbg_get_fabric_wwn(struct se_portal_group *se_tpg) |
1294 | { | 1276 | { |
1295 | struct usbg_tpg *tpg = container_of(se_tpg, | 1277 | struct usbg_tpg *tpg = container_of(se_tpg, |
@@ -1306,97 +1288,6 @@ static u16 usbg_get_tag(struct se_portal_group *se_tpg) | |||
1306 | return tpg->tport_tpgt; | 1288 | return tpg->tport_tpgt; |
1307 | } | 1289 | } |
1308 | 1290 | ||
1309 | static u32 usbg_get_default_depth(struct se_portal_group *se_tpg) | ||
1310 | { | ||
1311 | return 1; | ||
1312 | } | ||
1313 | |||
1314 | static u32 usbg_get_pr_transport_id( | ||
1315 | struct se_portal_group *se_tpg, | ||
1316 | struct se_node_acl *se_nacl, | ||
1317 | struct t10_pr_registration *pr_reg, | ||
1318 | int *format_code, | ||
1319 | unsigned char *buf) | ||
1320 | { | ||
1321 | struct usbg_tpg *tpg = container_of(se_tpg, | ||
1322 | struct usbg_tpg, se_tpg); | ||
1323 | struct usbg_tport *tport = tpg->tport; | ||
1324 | int ret = 0; | ||
1325 | |||
1326 | switch (tport->tport_proto_id) { | ||
1327 | case SCSI_PROTOCOL_SAS: | ||
1328 | default: | ||
1329 | ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg, | ||
1330 | format_code, buf); | ||
1331 | break; | ||
1332 | } | ||
1333 | |||
1334 | return ret; | ||
1335 | } | ||
1336 | |||
1337 | static u32 usbg_get_pr_transport_id_len( | ||
1338 | struct se_portal_group *se_tpg, | ||
1339 | struct se_node_acl *se_nacl, | ||
1340 | struct t10_pr_registration *pr_reg, | ||
1341 | int *format_code) | ||
1342 | { | ||
1343 | struct usbg_tpg *tpg = container_of(se_tpg, | ||
1344 | struct usbg_tpg, se_tpg); | ||
1345 | struct usbg_tport *tport = tpg->tport; | ||
1346 | int ret = 0; | ||
1347 | |||
1348 | switch (tport->tport_proto_id) { | ||
1349 | case SCSI_PROTOCOL_SAS: | ||
1350 | default: | ||
1351 | ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, | ||
1352 | format_code); | ||
1353 | break; | ||
1354 | } | ||
1355 | |||
1356 | return ret; | ||
1357 | } | ||
1358 | |||
1359 | static char *usbg_parse_pr_out_transport_id( | ||
1360 | struct se_portal_group *se_tpg, | ||
1361 | const char *buf, | ||
1362 | u32 *out_tid_len, | ||
1363 | char **port_nexus_ptr) | ||
1364 | { | ||
1365 | struct usbg_tpg *tpg = container_of(se_tpg, | ||
1366 | struct usbg_tpg, se_tpg); | ||
1367 | struct usbg_tport *tport = tpg->tport; | ||
1368 | char *tid = NULL; | ||
1369 | |||
1370 | switch (tport->tport_proto_id) { | ||
1371 | case SCSI_PROTOCOL_SAS: | ||
1372 | default: | ||
1373 | tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, | ||
1374 | port_nexus_ptr); | ||
1375 | } | ||
1376 | |||
1377 | return tid; | ||
1378 | } | ||
1379 | |||
1380 | static struct se_node_acl *usbg_alloc_fabric_acl(struct se_portal_group *se_tpg) | ||
1381 | { | ||
1382 | struct usbg_nacl *nacl; | ||
1383 | |||
1384 | nacl = kzalloc(sizeof(struct usbg_nacl), GFP_KERNEL); | ||
1385 | if (!nacl) | ||
1386 | return NULL; | ||
1387 | |||
1388 | return &nacl->se_node_acl; | ||
1389 | } | ||
1390 | |||
1391 | static void usbg_release_fabric_acl( | ||
1392 | struct se_portal_group *se_tpg, | ||
1393 | struct se_node_acl *se_nacl) | ||
1394 | { | ||
1395 | struct usbg_nacl *nacl = container_of(se_nacl, | ||
1396 | struct usbg_nacl, se_node_acl); | ||
1397 | kfree(nacl); | ||
1398 | } | ||
1399 | |||
1400 | static u32 usbg_tpg_get_inst_index(struct se_portal_group *se_tpg) | 1291 | static u32 usbg_tpg_get_inst_index(struct se_portal_group *se_tpg) |
1401 | { | 1292 | { |
1402 | return 1; | 1293 | return 1; |
@@ -1447,18 +1338,6 @@ static void usbg_set_default_node_attrs(struct se_node_acl *nacl) | |||
1447 | return; | 1338 | return; |
1448 | } | 1339 | } |
1449 | 1340 | ||
1450 | static u32 usbg_get_task_tag(struct se_cmd *se_cmd) | ||
1451 | { | ||
1452 | struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd, | ||
1453 | se_cmd); | ||
1454 | struct f_uas *fu = cmd->fu; | ||
1455 | |||
1456 | if (fu->flags & USBG_IS_BOT) | ||
1457 | return le32_to_cpu(cmd->bot_tag); | ||
1458 | else | ||
1459 | return cmd->tag; | ||
1460 | } | ||
1461 | |||
1462 | static int usbg_get_cmd_state(struct se_cmd *se_cmd) | 1341 | static int usbg_get_cmd_state(struct se_cmd *se_cmd) |
1463 | { | 1342 | { |
1464 | return 0; | 1343 | return 0; |
@@ -1488,50 +1367,11 @@ static const char *usbg_check_wwn(const char *name) | |||
1488 | return n; | 1367 | return n; |
1489 | } | 1368 | } |
1490 | 1369 | ||
1491 | static struct se_node_acl *usbg_make_nodeacl( | 1370 | static int usbg_init_nodeacl(struct se_node_acl *se_nacl, const char *name) |
1492 | struct se_portal_group *se_tpg, | ||
1493 | struct config_group *group, | ||
1494 | const char *name) | ||
1495 | { | ||
1496 | struct se_node_acl *se_nacl, *se_nacl_new; | ||
1497 | struct usbg_nacl *nacl; | ||
1498 | u64 wwpn = 0; | ||
1499 | u32 nexus_depth; | ||
1500 | const char *wnn_name; | ||
1501 | |||
1502 | wnn_name = usbg_check_wwn(name); | ||
1503 | if (!wnn_name) | ||
1504 | return ERR_PTR(-EINVAL); | ||
1505 | se_nacl_new = usbg_alloc_fabric_acl(se_tpg); | ||
1506 | if (!(se_nacl_new)) | ||
1507 | return ERR_PTR(-ENOMEM); | ||
1508 | |||
1509 | nexus_depth = 1; | ||
1510 | /* | ||
1511 | * se_nacl_new may be released by core_tpg_add_initiator_node_acl() | ||
1512 | * when converting a NodeACL from demo mode -> explict | ||
1513 | */ | ||
1514 | se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new, | ||
1515 | name, nexus_depth); | ||
1516 | if (IS_ERR(se_nacl)) { | ||
1517 | usbg_release_fabric_acl(se_tpg, se_nacl_new); | ||
1518 | return se_nacl; | ||
1519 | } | ||
1520 | /* | ||
1521 | * Locate our struct usbg_nacl and set the FC Nport WWPN | ||
1522 | */ | ||
1523 | nacl = container_of(se_nacl, struct usbg_nacl, se_node_acl); | ||
1524 | nacl->iport_wwpn = wwpn; | ||
1525 | snprintf(nacl->iport_name, sizeof(nacl->iport_name), "%s", name); | ||
1526 | return se_nacl; | ||
1527 | } | ||
1528 | |||
1529 | static void usbg_drop_nodeacl(struct se_node_acl *se_acl) | ||
1530 | { | 1371 | { |
1531 | struct usbg_nacl *nacl = container_of(se_acl, | 1372 | if (!usbg_check_wwn(name)) |
1532 | struct usbg_nacl, se_node_acl); | 1373 | return -EINVAL; |
1533 | core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1); | 1374 | return 0; |
1534 | kfree(nacl); | ||
1535 | } | 1375 | } |
1536 | 1376 | ||
1537 | struct usbg_tpg *the_only_tpg_I_currently_have; | 1377 | struct usbg_tpg *the_only_tpg_I_currently_have; |
@@ -1571,8 +1411,11 @@ static struct se_portal_group *usbg_make_tpg( | |||
1571 | tpg->tport = tport; | 1411 | tpg->tport = tport; |
1572 | tpg->tport_tpgt = tpgt; | 1412 | tpg->tport_tpgt = tpgt; |
1573 | 1413 | ||
1574 | ret = core_tpg_register(&usbg_ops, wwn, &tpg->se_tpg, tpg, | 1414 | /* |
1575 | TRANSPORT_TPG_TYPE_NORMAL); | 1415 | * SPC doesn't assign a protocol identifier for USB-SCSI, so we |
1416 | * pretend to be SAS.. | ||
1417 | */ | ||
1418 | ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SAS); | ||
1576 | if (ret < 0) { | 1419 | if (ret < 0) { |
1577 | destroy_workqueue(tpg->workqueue); | 1420 | destroy_workqueue(tpg->workqueue); |
1578 | kfree(tpg); | 1421 | kfree(tpg); |
@@ -1866,19 +1709,12 @@ static const struct target_core_fabric_ops usbg_ops = { | |||
1866 | .module = THIS_MODULE, | 1709 | .module = THIS_MODULE, |
1867 | .name = "usb_gadget", | 1710 | .name = "usb_gadget", |
1868 | .get_fabric_name = usbg_get_fabric_name, | 1711 | .get_fabric_name = usbg_get_fabric_name, |
1869 | .get_fabric_proto_ident = usbg_get_fabric_proto_ident, | ||
1870 | .tpg_get_wwn = usbg_get_fabric_wwn, | 1712 | .tpg_get_wwn = usbg_get_fabric_wwn, |
1871 | .tpg_get_tag = usbg_get_tag, | 1713 | .tpg_get_tag = usbg_get_tag, |
1872 | .tpg_get_default_depth = usbg_get_default_depth, | ||
1873 | .tpg_get_pr_transport_id = usbg_get_pr_transport_id, | ||
1874 | .tpg_get_pr_transport_id_len = usbg_get_pr_transport_id_len, | ||
1875 | .tpg_parse_pr_out_transport_id = usbg_parse_pr_out_transport_id, | ||
1876 | .tpg_check_demo_mode = usbg_check_true, | 1714 | .tpg_check_demo_mode = usbg_check_true, |
1877 | .tpg_check_demo_mode_cache = usbg_check_false, | 1715 | .tpg_check_demo_mode_cache = usbg_check_false, |
1878 | .tpg_check_demo_mode_write_protect = usbg_check_false, | 1716 | .tpg_check_demo_mode_write_protect = usbg_check_false, |
1879 | .tpg_check_prod_mode_write_protect = usbg_check_false, | 1717 | .tpg_check_prod_mode_write_protect = usbg_check_false, |
1880 | .tpg_alloc_fabric_acl = usbg_alloc_fabric_acl, | ||
1881 | .tpg_release_fabric_acl = usbg_release_fabric_acl, | ||
1882 | .tpg_get_inst_index = usbg_tpg_get_inst_index, | 1718 | .tpg_get_inst_index = usbg_tpg_get_inst_index, |
1883 | .release_cmd = usbg_release_cmd, | 1719 | .release_cmd = usbg_release_cmd, |
1884 | .shutdown_session = usbg_shutdown_session, | 1720 | .shutdown_session = usbg_shutdown_session, |
@@ -1888,7 +1724,6 @@ static const struct target_core_fabric_ops usbg_ops = { | |||
1888 | .write_pending = usbg_send_write_request, | 1724 | .write_pending = usbg_send_write_request, |
1889 | .write_pending_status = usbg_write_pending_status, | 1725 | .write_pending_status = usbg_write_pending_status, |
1890 | .set_default_node_attributes = usbg_set_default_node_attrs, | 1726 | .set_default_node_attributes = usbg_set_default_node_attrs, |
1891 | .get_task_tag = usbg_get_task_tag, | ||
1892 | .get_cmd_state = usbg_get_cmd_state, | 1727 | .get_cmd_state = usbg_get_cmd_state, |
1893 | .queue_data_in = usbg_send_read_response, | 1728 | .queue_data_in = usbg_send_read_response, |
1894 | .queue_status = usbg_send_status_response, | 1729 | .queue_status = usbg_send_status_response, |
@@ -1902,10 +1737,7 @@ static const struct target_core_fabric_ops usbg_ops = { | |||
1902 | .fabric_drop_tpg = usbg_drop_tpg, | 1737 | .fabric_drop_tpg = usbg_drop_tpg, |
1903 | .fabric_post_link = usbg_port_link, | 1738 | .fabric_post_link = usbg_port_link, |
1904 | .fabric_pre_unlink = usbg_port_unlink, | 1739 | .fabric_pre_unlink = usbg_port_unlink, |
1905 | .fabric_make_np = NULL, | 1740 | .fabric_init_nodeacl = usbg_init_nodeacl, |
1906 | .fabric_drop_np = NULL, | ||
1907 | .fabric_make_nodeacl = usbg_make_nodeacl, | ||
1908 | .fabric_drop_nodeacl = usbg_drop_nodeacl, | ||
1909 | 1741 | ||
1910 | .tfc_wwn_attrs = usbg_wwn_attrs, | 1742 | .tfc_wwn_attrs = usbg_wwn_attrs, |
1911 | .tfc_tpg_base_attrs = usbg_base_attrs, | 1743 | .tfc_tpg_base_attrs = usbg_base_attrs, |
diff --git a/drivers/usb/gadget/legacy/tcm_usb_gadget.h b/drivers/usb/gadget/legacy/tcm_usb_gadget.h index 9fb3544cc80f..0b749e1aa2f1 100644 --- a/drivers/usb/gadget/legacy/tcm_usb_gadget.h +++ b/drivers/usb/gadget/legacy/tcm_usb_gadget.h | |||
@@ -24,15 +24,6 @@ enum { | |||
24 | #define USB_G_ALT_INT_BBB 0 | 24 | #define USB_G_ALT_INT_BBB 0 |
25 | #define USB_G_ALT_INT_UAS 1 | 25 | #define USB_G_ALT_INT_UAS 1 |
26 | 26 | ||
27 | struct usbg_nacl { | ||
28 | /* Binary World Wide unique Port Name for SAS Initiator port */ | ||
29 | u64 iport_wwpn; | ||
30 | /* ASCII formatted WWPN for Sas Initiator port */ | ||
31 | char iport_name[USBG_NAMELEN]; | ||
32 | /* Returned by usbg_make_nodeacl() */ | ||
33 | struct se_node_acl se_node_acl; | ||
34 | }; | ||
35 | |||
36 | struct tcm_usbg_nexus { | 27 | struct tcm_usbg_nexus { |
37 | struct se_session *tvn_se_sess; | 28 | struct se_session *tvn_se_sess; |
38 | }; | 29 | }; |
@@ -52,8 +43,6 @@ struct usbg_tpg { | |||
52 | }; | 43 | }; |
53 | 44 | ||
54 | struct usbg_tport { | 45 | struct usbg_tport { |
55 | /* SCSI protocol the tport is providing */ | ||
56 | u8 tport_proto_id; | ||
57 | /* Binary World Wide unique Port Name for SAS Target port */ | 46 | /* Binary World Wide unique Port Name for SAS Target port */ |
58 | u64 tport_wwpn; | 47 | u64 tport_wwpn; |
59 | /* ASCII formatted WWPN for SAS Target port */ | 48 | /* ASCII formatted WWPN for SAS Target port */ |
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 55722feeb898..dfcc02c93648 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c | |||
@@ -43,7 +43,6 @@ | |||
43 | #include <target/target_core_base.h> | 43 | #include <target/target_core_base.h> |
44 | #include <target/target_core_fabric.h> | 44 | #include <target/target_core_fabric.h> |
45 | #include <target/target_core_fabric_configfs.h> | 45 | #include <target/target_core_fabric_configfs.h> |
46 | #include <target/target_core_configfs.h> | ||
47 | #include <target/configfs_macros.h> | 46 | #include <target/configfs_macros.h> |
48 | #include <linux/vhost.h> | 47 | #include <linux/vhost.h> |
49 | #include <linux/virtio_scsi.h> | 48 | #include <linux/virtio_scsi.h> |
@@ -117,15 +116,6 @@ struct vhost_scsi_nexus { | |||
117 | struct se_session *tvn_se_sess; | 116 | struct se_session *tvn_se_sess; |
118 | }; | 117 | }; |
119 | 118 | ||
120 | struct vhost_scsi_nacl { | ||
121 | /* Binary World Wide unique Port Name for Vhost Initiator port */ | ||
122 | u64 iport_wwpn; | ||
123 | /* ASCII formatted WWPN for Sas Initiator port */ | ||
124 | char iport_name[VHOST_SCSI_NAMELEN]; | ||
125 | /* Returned by vhost_scsi_make_nodeacl() */ | ||
126 | struct se_node_acl se_node_acl; | ||
127 | }; | ||
128 | |||
129 | struct vhost_scsi_tpg { | 119 | struct vhost_scsi_tpg { |
130 | /* Vhost port target portal group tag for TCM */ | 120 | /* Vhost port target portal group tag for TCM */ |
131 | u16 tport_tpgt; | 121 | u16 tport_tpgt; |
@@ -218,7 +208,6 @@ struct vhost_scsi { | |||
218 | int vs_events_nr; /* num of pending events, protected by vq->mutex */ | 208 | int vs_events_nr; /* num of pending events, protected by vq->mutex */ |
219 | }; | 209 | }; |
220 | 210 | ||
221 | static struct target_core_fabric_ops vhost_scsi_ops; | ||
222 | static struct workqueue_struct *vhost_scsi_workqueue; | 211 | static struct workqueue_struct *vhost_scsi_workqueue; |
223 | 212 | ||
224 | /* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */ | 213 | /* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */ |
@@ -299,28 +288,6 @@ static char *vhost_scsi_get_fabric_name(void) | |||
299 | return "vhost"; | 288 | return "vhost"; |
300 | } | 289 | } |
301 | 290 | ||
302 | static u8 vhost_scsi_get_fabric_proto_ident(struct se_portal_group *se_tpg) | ||
303 | { | ||
304 | struct vhost_scsi_tpg *tpg = container_of(se_tpg, | ||
305 | struct vhost_scsi_tpg, se_tpg); | ||
306 | struct vhost_scsi_tport *tport = tpg->tport; | ||
307 | |||
308 | switch (tport->tport_proto_id) { | ||
309 | case SCSI_PROTOCOL_SAS: | ||
310 | return sas_get_fabric_proto_ident(se_tpg); | ||
311 | case SCSI_PROTOCOL_FCP: | ||
312 | return fc_get_fabric_proto_ident(se_tpg); | ||
313 | case SCSI_PROTOCOL_ISCSI: | ||
314 | return iscsi_get_fabric_proto_ident(se_tpg); | ||
315 | default: | ||
316 | pr_err("Unknown tport_proto_id: 0x%02x, using" | ||
317 | " SAS emulation\n", tport->tport_proto_id); | ||
318 | break; | ||
319 | } | ||
320 | |||
321 | return sas_get_fabric_proto_ident(se_tpg); | ||
322 | } | ||
323 | |||
324 | static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg) | 291 | static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg) |
325 | { | 292 | { |
326 | struct vhost_scsi_tpg *tpg = container_of(se_tpg, | 293 | struct vhost_scsi_tpg *tpg = container_of(se_tpg, |
@@ -337,102 +304,6 @@ static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg) | |||
337 | return tpg->tport_tpgt; | 304 | return tpg->tport_tpgt; |
338 | } | 305 | } |
339 | 306 | ||
340 | static u32 vhost_scsi_get_default_depth(struct se_portal_group *se_tpg) | ||
341 | { | ||
342 | return 1; | ||
343 | } | ||
344 | |||
345 | static u32 | ||
346 | vhost_scsi_get_pr_transport_id(struct se_portal_group *se_tpg, | ||
347 | struct se_node_acl *se_nacl, | ||
348 | struct t10_pr_registration *pr_reg, | ||
349 | int *format_code, | ||
350 | unsigned char *buf) | ||
351 | { | ||
352 | struct vhost_scsi_tpg *tpg = container_of(se_tpg, | ||
353 | struct vhost_scsi_tpg, se_tpg); | ||
354 | struct vhost_scsi_tport *tport = tpg->tport; | ||
355 | |||
356 | switch (tport->tport_proto_id) { | ||
357 | case SCSI_PROTOCOL_SAS: | ||
358 | return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg, | ||
359 | format_code, buf); | ||
360 | case SCSI_PROTOCOL_FCP: | ||
361 | return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg, | ||
362 | format_code, buf); | ||
363 | case SCSI_PROTOCOL_ISCSI: | ||
364 | return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg, | ||
365 | format_code, buf); | ||
366 | default: | ||
367 | pr_err("Unknown tport_proto_id: 0x%02x, using" | ||
368 | " SAS emulation\n", tport->tport_proto_id); | ||
369 | break; | ||
370 | } | ||
371 | |||
372 | return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg, | ||
373 | format_code, buf); | ||
374 | } | ||
375 | |||
376 | static u32 | ||
377 | vhost_scsi_get_pr_transport_id_len(struct se_portal_group *se_tpg, | ||
378 | struct se_node_acl *se_nacl, | ||
379 | struct t10_pr_registration *pr_reg, | ||
380 | int *format_code) | ||
381 | { | ||
382 | struct vhost_scsi_tpg *tpg = container_of(se_tpg, | ||
383 | struct vhost_scsi_tpg, se_tpg); | ||
384 | struct vhost_scsi_tport *tport = tpg->tport; | ||
385 | |||
386 | switch (tport->tport_proto_id) { | ||
387 | case SCSI_PROTOCOL_SAS: | ||
388 | return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, | ||
389 | format_code); | ||
390 | case SCSI_PROTOCOL_FCP: | ||
391 | return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, | ||
392 | format_code); | ||
393 | case SCSI_PROTOCOL_ISCSI: | ||
394 | return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, | ||
395 | format_code); | ||
396 | default: | ||
397 | pr_err("Unknown tport_proto_id: 0x%02x, using" | ||
398 | " SAS emulation\n", tport->tport_proto_id); | ||
399 | break; | ||
400 | } | ||
401 | |||
402 | return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, | ||
403 | format_code); | ||
404 | } | ||
405 | |||
406 | static char * | ||
407 | vhost_scsi_parse_pr_out_transport_id(struct se_portal_group *se_tpg, | ||
408 | const char *buf, | ||
409 | u32 *out_tid_len, | ||
410 | char **port_nexus_ptr) | ||
411 | { | ||
412 | struct vhost_scsi_tpg *tpg = container_of(se_tpg, | ||
413 | struct vhost_scsi_tpg, se_tpg); | ||
414 | struct vhost_scsi_tport *tport = tpg->tport; | ||
415 | |||
416 | switch (tport->tport_proto_id) { | ||
417 | case SCSI_PROTOCOL_SAS: | ||
418 | return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, | ||
419 | port_nexus_ptr); | ||
420 | case SCSI_PROTOCOL_FCP: | ||
421 | return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, | ||
422 | port_nexus_ptr); | ||
423 | case SCSI_PROTOCOL_ISCSI: | ||
424 | return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, | ||
425 | port_nexus_ptr); | ||
426 | default: | ||
427 | pr_err("Unknown tport_proto_id: 0x%02x, using" | ||
428 | " SAS emulation\n", tport->tport_proto_id); | ||
429 | break; | ||
430 | } | ||
431 | |||
432 | return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, | ||
433 | port_nexus_ptr); | ||
434 | } | ||
435 | |||
436 | static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg) | 307 | static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg) |
437 | { | 308 | { |
438 | struct vhost_scsi_tpg *tpg = container_of(se_tpg, | 309 | struct vhost_scsi_tpg *tpg = container_of(se_tpg, |
@@ -441,29 +312,6 @@ static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg) | |||
441 | return tpg->tv_fabric_prot_type; | 312 | return tpg->tv_fabric_prot_type; |
442 | } | 313 | } |
443 | 314 | ||
444 | static struct se_node_acl * | ||
445 | vhost_scsi_alloc_fabric_acl(struct se_portal_group *se_tpg) | ||
446 | { | ||
447 | struct vhost_scsi_nacl *nacl; | ||
448 | |||
449 | nacl = kzalloc(sizeof(struct vhost_scsi_nacl), GFP_KERNEL); | ||
450 | if (!nacl) { | ||
451 | pr_err("Unable to allocate struct vhost_scsi_nacl\n"); | ||
452 | return NULL; | ||
453 | } | ||
454 | |||
455 | return &nacl->se_node_acl; | ||
456 | } | ||
457 | |||
458 | static void | ||
459 | vhost_scsi_release_fabric_acl(struct se_portal_group *se_tpg, | ||
460 | struct se_node_acl *se_nacl) | ||
461 | { | ||
462 | struct vhost_scsi_nacl *nacl = container_of(se_nacl, | ||
463 | struct vhost_scsi_nacl, se_node_acl); | ||
464 | kfree(nacl); | ||
465 | } | ||
466 | |||
467 | static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg) | 315 | static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg) |
468 | { | 316 | { |
469 | return 1; | 317 | return 1; |
@@ -521,11 +369,6 @@ static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl) | |||
521 | return; | 369 | return; |
522 | } | 370 | } |
523 | 371 | ||
524 | static u32 vhost_scsi_get_task_tag(struct se_cmd *se_cmd) | ||
525 | { | ||
526 | return 0; | ||
527 | } | ||
528 | |||
529 | static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd) | 372 | static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd) |
530 | { | 373 | { |
531 | return 0; | 374 | return 0; |
@@ -609,7 +452,7 @@ static void vhost_scsi_free_cmd(struct vhost_scsi_cmd *cmd) | |||
609 | 452 | ||
610 | static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd) | 453 | static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd) |
611 | { | 454 | { |
612 | return target_put_sess_cmd(se_cmd->se_sess, se_cmd); | 455 | return target_put_sess_cmd(se_cmd); |
613 | } | 456 | } |
614 | 457 | ||
615 | static void | 458 | static void |
@@ -970,6 +813,7 @@ static void vhost_scsi_submission_work(struct work_struct *work) | |||
970 | } | 813 | } |
971 | tv_nexus = cmd->tvc_nexus; | 814 | tv_nexus = cmd->tvc_nexus; |
972 | 815 | ||
816 | se_cmd->tag = 0; | ||
973 | rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess, | 817 | rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess, |
974 | cmd->tvc_cdb, &cmd->tvc_sense_buf[0], | 818 | cmd->tvc_cdb, &cmd->tvc_sense_buf[0], |
975 | cmd->tvc_lun, cmd->tvc_exp_data_len, | 819 | cmd->tvc_lun, cmd->tvc_exp_data_len, |
@@ -1824,50 +1668,6 @@ static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg, | |||
1824 | mutex_unlock(&vhost_scsi_mutex); | 1668 | mutex_unlock(&vhost_scsi_mutex); |
1825 | } | 1669 | } |
1826 | 1670 | ||
1827 | static struct se_node_acl * | ||
1828 | vhost_scsi_make_nodeacl(struct se_portal_group *se_tpg, | ||
1829 | struct config_group *group, | ||
1830 | const char *name) | ||
1831 | { | ||
1832 | struct se_node_acl *se_nacl, *se_nacl_new; | ||
1833 | struct vhost_scsi_nacl *nacl; | ||
1834 | u64 wwpn = 0; | ||
1835 | u32 nexus_depth; | ||
1836 | |||
1837 | /* vhost_scsi_parse_wwn(name, &wwpn, 1) < 0) | ||
1838 | return ERR_PTR(-EINVAL); */ | ||
1839 | se_nacl_new = vhost_scsi_alloc_fabric_acl(se_tpg); | ||
1840 | if (!se_nacl_new) | ||
1841 | return ERR_PTR(-ENOMEM); | ||
1842 | |||
1843 | nexus_depth = 1; | ||
1844 | /* | ||
1845 | * se_nacl_new may be released by core_tpg_add_initiator_node_acl() | ||
1846 | * when converting a NodeACL from demo mode -> explict | ||
1847 | */ | ||
1848 | se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new, | ||
1849 | name, nexus_depth); | ||
1850 | if (IS_ERR(se_nacl)) { | ||
1851 | vhost_scsi_release_fabric_acl(se_tpg, se_nacl_new); | ||
1852 | return se_nacl; | ||
1853 | } | ||
1854 | /* | ||
1855 | * Locate our struct vhost_scsi_nacl and set the FC Nport WWPN | ||
1856 | */ | ||
1857 | nacl = container_of(se_nacl, struct vhost_scsi_nacl, se_node_acl); | ||
1858 | nacl->iport_wwpn = wwpn; | ||
1859 | |||
1860 | return se_nacl; | ||
1861 | } | ||
1862 | |||
1863 | static void vhost_scsi_drop_nodeacl(struct se_node_acl *se_acl) | ||
1864 | { | ||
1865 | struct vhost_scsi_nacl *nacl = container_of(se_acl, | ||
1866 | struct vhost_scsi_nacl, se_node_acl); | ||
1867 | core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1); | ||
1868 | kfree(nacl); | ||
1869 | } | ||
1870 | |||
1871 | static void vhost_scsi_free_cmd_map_res(struct vhost_scsi_nexus *nexus, | 1671 | static void vhost_scsi_free_cmd_map_res(struct vhost_scsi_nexus *nexus, |
1872 | struct se_session *se_sess) | 1672 | struct se_session *se_sess) |
1873 | { | 1673 | { |
@@ -2202,8 +2002,7 @@ vhost_scsi_make_tpg(struct se_wwn *wwn, | |||
2202 | tpg->tport = tport; | 2002 | tpg->tport = tport; |
2203 | tpg->tport_tpgt = tpgt; | 2003 | tpg->tport_tpgt = tpgt; |
2204 | 2004 | ||
2205 | ret = core_tpg_register(&vhost_scsi_ops, wwn, | 2005 | ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id); |
2206 | &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); | ||
2207 | if (ret < 0) { | 2006 | if (ret < 0) { |
2208 | kfree(tpg); | 2007 | kfree(tpg); |
2209 | return NULL; | 2008 | return NULL; |
@@ -2327,20 +2126,13 @@ static struct target_core_fabric_ops vhost_scsi_ops = { | |||
2327 | .module = THIS_MODULE, | 2126 | .module = THIS_MODULE, |
2328 | .name = "vhost", | 2127 | .name = "vhost", |
2329 | .get_fabric_name = vhost_scsi_get_fabric_name, | 2128 | .get_fabric_name = vhost_scsi_get_fabric_name, |
2330 | .get_fabric_proto_ident = vhost_scsi_get_fabric_proto_ident, | ||
2331 | .tpg_get_wwn = vhost_scsi_get_fabric_wwn, | 2129 | .tpg_get_wwn = vhost_scsi_get_fabric_wwn, |
2332 | .tpg_get_tag = vhost_scsi_get_tpgt, | 2130 | .tpg_get_tag = vhost_scsi_get_tpgt, |
2333 | .tpg_get_default_depth = vhost_scsi_get_default_depth, | ||
2334 | .tpg_get_pr_transport_id = vhost_scsi_get_pr_transport_id, | ||
2335 | .tpg_get_pr_transport_id_len = vhost_scsi_get_pr_transport_id_len, | ||
2336 | .tpg_parse_pr_out_transport_id = vhost_scsi_parse_pr_out_transport_id, | ||
2337 | .tpg_check_demo_mode = vhost_scsi_check_true, | 2131 | .tpg_check_demo_mode = vhost_scsi_check_true, |
2338 | .tpg_check_demo_mode_cache = vhost_scsi_check_true, | 2132 | .tpg_check_demo_mode_cache = vhost_scsi_check_true, |
2339 | .tpg_check_demo_mode_write_protect = vhost_scsi_check_false, | 2133 | .tpg_check_demo_mode_write_protect = vhost_scsi_check_false, |
2340 | .tpg_check_prod_mode_write_protect = vhost_scsi_check_false, | 2134 | .tpg_check_prod_mode_write_protect = vhost_scsi_check_false, |
2341 | .tpg_check_prot_fabric_only = vhost_scsi_check_prot_fabric_only, | 2135 | .tpg_check_prot_fabric_only = vhost_scsi_check_prot_fabric_only, |
2342 | .tpg_alloc_fabric_acl = vhost_scsi_alloc_fabric_acl, | ||
2343 | .tpg_release_fabric_acl = vhost_scsi_release_fabric_acl, | ||
2344 | .tpg_get_inst_index = vhost_scsi_tpg_get_inst_index, | 2136 | .tpg_get_inst_index = vhost_scsi_tpg_get_inst_index, |
2345 | .release_cmd = vhost_scsi_release_cmd, | 2137 | .release_cmd = vhost_scsi_release_cmd, |
2346 | .check_stop_free = vhost_scsi_check_stop_free, | 2138 | .check_stop_free = vhost_scsi_check_stop_free, |
@@ -2351,7 +2143,6 @@ static struct target_core_fabric_ops vhost_scsi_ops = { | |||
2351 | .write_pending = vhost_scsi_write_pending, | 2143 | .write_pending = vhost_scsi_write_pending, |
2352 | .write_pending_status = vhost_scsi_write_pending_status, | 2144 | .write_pending_status = vhost_scsi_write_pending_status, |
2353 | .set_default_node_attributes = vhost_scsi_set_default_node_attrs, | 2145 | .set_default_node_attributes = vhost_scsi_set_default_node_attrs, |
2354 | .get_task_tag = vhost_scsi_get_task_tag, | ||
2355 | .get_cmd_state = vhost_scsi_get_cmd_state, | 2146 | .get_cmd_state = vhost_scsi_get_cmd_state, |
2356 | .queue_data_in = vhost_scsi_queue_data_in, | 2147 | .queue_data_in = vhost_scsi_queue_data_in, |
2357 | .queue_status = vhost_scsi_queue_status, | 2148 | .queue_status = vhost_scsi_queue_status, |
@@ -2366,10 +2157,6 @@ static struct target_core_fabric_ops vhost_scsi_ops = { | |||
2366 | .fabric_drop_tpg = vhost_scsi_drop_tpg, | 2157 | .fabric_drop_tpg = vhost_scsi_drop_tpg, |
2367 | .fabric_post_link = vhost_scsi_port_link, | 2158 | .fabric_post_link = vhost_scsi_port_link, |
2368 | .fabric_pre_unlink = vhost_scsi_port_unlink, | 2159 | .fabric_pre_unlink = vhost_scsi_port_unlink, |
2369 | .fabric_make_np = NULL, | ||
2370 | .fabric_drop_np = NULL, | ||
2371 | .fabric_make_nodeacl = vhost_scsi_make_nodeacl, | ||
2372 | .fabric_drop_nodeacl = vhost_scsi_drop_nodeacl, | ||
2373 | 2160 | ||
2374 | .tfc_wwn_attrs = vhost_scsi_wwn_attrs, | 2161 | .tfc_wwn_attrs = vhost_scsi_wwn_attrs, |
2375 | .tfc_tpg_base_attrs = vhost_scsi_tpg_attrs, | 2162 | .tfc_tpg_base_attrs = vhost_scsi_tpg_attrs, |
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c index 39223c3e99ad..9eeefd7cad41 100644 --- a/drivers/xen/xen-scsiback.c +++ b/drivers/xen/xen-scsiback.c | |||
@@ -53,7 +53,6 @@ | |||
53 | 53 | ||
54 | #include <target/target_core_base.h> | 54 | #include <target/target_core_base.h> |
55 | #include <target/target_core_fabric.h> | 55 | #include <target/target_core_fabric.h> |
56 | #include <target/target_core_configfs.h> | ||
57 | #include <target/target_core_fabric_configfs.h> | 56 | #include <target/target_core_fabric_configfs.h> |
58 | 57 | ||
59 | #include <asm/hypervisor.h> | 58 | #include <asm/hypervisor.h> |
@@ -201,8 +200,6 @@ static LIST_HEAD(scsiback_free_pages); | |||
201 | static DEFINE_MUTEX(scsiback_mutex); | 200 | static DEFINE_MUTEX(scsiback_mutex); |
202 | static LIST_HEAD(scsiback_list); | 201 | static LIST_HEAD(scsiback_list); |
203 | 202 | ||
204 | static const struct target_core_fabric_ops scsiback_ops; | ||
205 | |||
206 | static void scsiback_get(struct vscsibk_info *info) | 203 | static void scsiback_get(struct vscsibk_info *info) |
207 | { | 204 | { |
208 | atomic_inc(&info->nr_unreplied_reqs); | 205 | atomic_inc(&info->nr_unreplied_reqs); |
@@ -397,6 +394,7 @@ static void scsiback_cmd_exec(struct vscsibk_pend *pending_req) | |||
397 | memset(se_cmd, 0, sizeof(*se_cmd)); | 394 | memset(se_cmd, 0, sizeof(*se_cmd)); |
398 | 395 | ||
399 | scsiback_get(pending_req->info); | 396 | scsiback_get(pending_req->info); |
397 | se_cmd->tag = pending_req->rqid; | ||
400 | rc = target_submit_cmd_map_sgls(se_cmd, sess, pending_req->cmnd, | 398 | rc = target_submit_cmd_map_sgls(se_cmd, sess, pending_req->cmnd, |
401 | pending_req->sense_buffer, pending_req->v2p->lun, | 399 | pending_req->sense_buffer, pending_req->v2p->lun, |
402 | pending_req->data_len, 0, | 400 | pending_req->data_len, 0, |
@@ -863,7 +861,8 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info, | |||
863 | struct list_head *head = &(info->v2p_entry_lists); | 861 | struct list_head *head = &(info->v2p_entry_lists); |
864 | unsigned long flags; | 862 | unsigned long flags; |
865 | char *lunp; | 863 | char *lunp; |
866 | unsigned int lun; | 864 | unsigned long long unpacked_lun; |
865 | struct se_lun *se_lun; | ||
867 | struct scsiback_tpg *tpg_entry, *tpg = NULL; | 866 | struct scsiback_tpg *tpg_entry, *tpg = NULL; |
868 | char *error = "doesn't exist"; | 867 | char *error = "doesn't exist"; |
869 | 868 | ||
@@ -874,24 +873,27 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info, | |||
874 | } | 873 | } |
875 | *lunp = 0; | 874 | *lunp = 0; |
876 | lunp++; | 875 | lunp++; |
877 | if (kstrtouint(lunp, 10, &lun) || lun >= TRANSPORT_MAX_LUNS_PER_TPG) { | 876 | err = kstrtoull(lunp, 10, &unpacked_lun); |
877 | if (err < 0) { | ||
878 | pr_err("lun number not valid: %s\n", lunp); | 878 | pr_err("lun number not valid: %s\n", lunp); |
879 | return -EINVAL; | 879 | return err; |
880 | } | 880 | } |
881 | 881 | ||
882 | mutex_lock(&scsiback_mutex); | 882 | mutex_lock(&scsiback_mutex); |
883 | list_for_each_entry(tpg_entry, &scsiback_list, tv_tpg_list) { | 883 | list_for_each_entry(tpg_entry, &scsiback_list, tv_tpg_list) { |
884 | if (!strcmp(phy, tpg_entry->tport->tport_name) || | 884 | if (!strcmp(phy, tpg_entry->tport->tport_name) || |
885 | !strcmp(phy, tpg_entry->param_alias)) { | 885 | !strcmp(phy, tpg_entry->param_alias)) { |
886 | spin_lock(&tpg_entry->se_tpg.tpg_lun_lock); | 886 | mutex_lock(&tpg_entry->se_tpg.tpg_lun_mutex); |
887 | if (tpg_entry->se_tpg.tpg_lun_list[lun]->lun_status == | 887 | hlist_for_each_entry(se_lun, &tpg_entry->se_tpg.tpg_lun_hlist, link) { |
888 | TRANSPORT_LUN_STATUS_ACTIVE) { | 888 | if (se_lun->unpacked_lun == unpacked_lun) { |
889 | if (!tpg_entry->tpg_nexus) | 889 | if (!tpg_entry->tpg_nexus) |
890 | error = "nexus undefined"; | 890 | error = "nexus undefined"; |
891 | else | 891 | else |
892 | tpg = tpg_entry; | 892 | tpg = tpg_entry; |
893 | break; | ||
894 | } | ||
893 | } | 895 | } |
894 | spin_unlock(&tpg_entry->se_tpg.tpg_lun_lock); | 896 | mutex_unlock(&tpg_entry->se_tpg.tpg_lun_mutex); |
895 | break; | 897 | break; |
896 | } | 898 | } |
897 | } | 899 | } |
@@ -903,7 +905,7 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info, | |||
903 | mutex_unlock(&scsiback_mutex); | 905 | mutex_unlock(&scsiback_mutex); |
904 | 906 | ||
905 | if (!tpg) { | 907 | if (!tpg) { |
906 | pr_err("%s:%d %s\n", phy, lun, error); | 908 | pr_err("%s:%llu %s\n", phy, unpacked_lun, error); |
907 | return -ENODEV; | 909 | return -ENODEV; |
908 | } | 910 | } |
909 | 911 | ||
@@ -931,7 +933,7 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info, | |||
931 | kref_init(&new->kref); | 933 | kref_init(&new->kref); |
932 | new->v = *v; | 934 | new->v = *v; |
933 | new->tpg = tpg; | 935 | new->tpg = tpg; |
934 | new->lun = lun; | 936 | new->lun = unpacked_lun; |
935 | list_add_tail(&new->l, head); | 937 | list_add_tail(&new->l, head); |
936 | 938 | ||
937 | out: | 939 | out: |
@@ -1251,28 +1253,6 @@ static char *scsiback_dump_proto_id(struct scsiback_tport *tport) | |||
1251 | return "Unknown"; | 1253 | return "Unknown"; |
1252 | } | 1254 | } |
1253 | 1255 | ||
1254 | static u8 scsiback_get_fabric_proto_ident(struct se_portal_group *se_tpg) | ||
1255 | { | ||
1256 | struct scsiback_tpg *tpg = container_of(se_tpg, | ||
1257 | struct scsiback_tpg, se_tpg); | ||
1258 | struct scsiback_tport *tport = tpg->tport; | ||
1259 | |||
1260 | switch (tport->tport_proto_id) { | ||
1261 | case SCSI_PROTOCOL_SAS: | ||
1262 | return sas_get_fabric_proto_ident(se_tpg); | ||
1263 | case SCSI_PROTOCOL_FCP: | ||
1264 | return fc_get_fabric_proto_ident(se_tpg); | ||
1265 | case SCSI_PROTOCOL_ISCSI: | ||
1266 | return iscsi_get_fabric_proto_ident(se_tpg); | ||
1267 | default: | ||
1268 | pr_err("Unknown tport_proto_id: 0x%02x, using SAS emulation\n", | ||
1269 | tport->tport_proto_id); | ||
1270 | break; | ||
1271 | } | ||
1272 | |||
1273 | return sas_get_fabric_proto_ident(se_tpg); | ||
1274 | } | ||
1275 | |||
1276 | static char *scsiback_get_fabric_wwn(struct se_portal_group *se_tpg) | 1256 | static char *scsiback_get_fabric_wwn(struct se_portal_group *se_tpg) |
1277 | { | 1257 | { |
1278 | struct scsiback_tpg *tpg = container_of(se_tpg, | 1258 | struct scsiback_tpg *tpg = container_of(se_tpg, |
@@ -1289,102 +1269,6 @@ static u16 scsiback_get_tag(struct se_portal_group *se_tpg) | |||
1289 | return tpg->tport_tpgt; | 1269 | return tpg->tport_tpgt; |
1290 | } | 1270 | } |
1291 | 1271 | ||
1292 | static u32 scsiback_get_default_depth(struct se_portal_group *se_tpg) | ||
1293 | { | ||
1294 | return 1; | ||
1295 | } | ||
1296 | |||
1297 | static u32 | ||
1298 | scsiback_get_pr_transport_id(struct se_portal_group *se_tpg, | ||
1299 | struct se_node_acl *se_nacl, | ||
1300 | struct t10_pr_registration *pr_reg, | ||
1301 | int *format_code, | ||
1302 | unsigned char *buf) | ||
1303 | { | ||
1304 | struct scsiback_tpg *tpg = container_of(se_tpg, | ||
1305 | struct scsiback_tpg, se_tpg); | ||
1306 | struct scsiback_tport *tport = tpg->tport; | ||
1307 | |||
1308 | switch (tport->tport_proto_id) { | ||
1309 | case SCSI_PROTOCOL_SAS: | ||
1310 | return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg, | ||
1311 | format_code, buf); | ||
1312 | case SCSI_PROTOCOL_FCP: | ||
1313 | return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg, | ||
1314 | format_code, buf); | ||
1315 | case SCSI_PROTOCOL_ISCSI: | ||
1316 | return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg, | ||
1317 | format_code, buf); | ||
1318 | default: | ||
1319 | pr_err("Unknown tport_proto_id: 0x%02x, using SAS emulation\n", | ||
1320 | tport->tport_proto_id); | ||
1321 | break; | ||
1322 | } | ||
1323 | |||
1324 | return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg, | ||
1325 | format_code, buf); | ||
1326 | } | ||
1327 | |||
1328 | static u32 | ||
1329 | scsiback_get_pr_transport_id_len(struct se_portal_group *se_tpg, | ||
1330 | struct se_node_acl *se_nacl, | ||
1331 | struct t10_pr_registration *pr_reg, | ||
1332 | int *format_code) | ||
1333 | { | ||
1334 | struct scsiback_tpg *tpg = container_of(se_tpg, | ||
1335 | struct scsiback_tpg, se_tpg); | ||
1336 | struct scsiback_tport *tport = tpg->tport; | ||
1337 | |||
1338 | switch (tport->tport_proto_id) { | ||
1339 | case SCSI_PROTOCOL_SAS: | ||
1340 | return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, | ||
1341 | format_code); | ||
1342 | case SCSI_PROTOCOL_FCP: | ||
1343 | return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, | ||
1344 | format_code); | ||
1345 | case SCSI_PROTOCOL_ISCSI: | ||
1346 | return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, | ||
1347 | format_code); | ||
1348 | default: | ||
1349 | pr_err("Unknown tport_proto_id: 0x%02x, using SAS emulation\n", | ||
1350 | tport->tport_proto_id); | ||
1351 | break; | ||
1352 | } | ||
1353 | |||
1354 | return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, | ||
1355 | format_code); | ||
1356 | } | ||
1357 | |||
1358 | static char * | ||
1359 | scsiback_parse_pr_out_transport_id(struct se_portal_group *se_tpg, | ||
1360 | const char *buf, | ||
1361 | u32 *out_tid_len, | ||
1362 | char **port_nexus_ptr) | ||
1363 | { | ||
1364 | struct scsiback_tpg *tpg = container_of(se_tpg, | ||
1365 | struct scsiback_tpg, se_tpg); | ||
1366 | struct scsiback_tport *tport = tpg->tport; | ||
1367 | |||
1368 | switch (tport->tport_proto_id) { | ||
1369 | case SCSI_PROTOCOL_SAS: | ||
1370 | return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, | ||
1371 | port_nexus_ptr); | ||
1372 | case SCSI_PROTOCOL_FCP: | ||
1373 | return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, | ||
1374 | port_nexus_ptr); | ||
1375 | case SCSI_PROTOCOL_ISCSI: | ||
1376 | return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, | ||
1377 | port_nexus_ptr); | ||
1378 | default: | ||
1379 | pr_err("Unknown tport_proto_id: 0x%02x, using SAS emulation\n", | ||
1380 | tport->tport_proto_id); | ||
1381 | break; | ||
1382 | } | ||
1383 | |||
1384 | return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, | ||
1385 | port_nexus_ptr); | ||
1386 | } | ||
1387 | |||
1388 | static struct se_wwn * | 1272 | static struct se_wwn * |
1389 | scsiback_make_tport(struct target_fabric_configfs *tf, | 1273 | scsiback_make_tport(struct target_fabric_configfs *tf, |
1390 | struct config_group *group, | 1274 | struct config_group *group, |
@@ -1451,19 +1335,6 @@ static void scsiback_drop_tport(struct se_wwn *wwn) | |||
1451 | kfree(tport); | 1335 | kfree(tport); |
1452 | } | 1336 | } |
1453 | 1337 | ||
1454 | static struct se_node_acl * | ||
1455 | scsiback_alloc_fabric_acl(struct se_portal_group *se_tpg) | ||
1456 | { | ||
1457 | return kzalloc(sizeof(struct se_node_acl), GFP_KERNEL); | ||
1458 | } | ||
1459 | |||
1460 | static void | ||
1461 | scsiback_release_fabric_acl(struct se_portal_group *se_tpg, | ||
1462 | struct se_node_acl *se_nacl) | ||
1463 | { | ||
1464 | kfree(se_nacl); | ||
1465 | } | ||
1466 | |||
1467 | static u32 scsiback_tpg_get_inst_index(struct se_portal_group *se_tpg) | 1338 | static u32 scsiback_tpg_get_inst_index(struct se_portal_group *se_tpg) |
1468 | { | 1339 | { |
1469 | return 1; | 1340 | return 1; |
@@ -1522,14 +1393,6 @@ static void scsiback_set_default_node_attrs(struct se_node_acl *nacl) | |||
1522 | { | 1393 | { |
1523 | } | 1394 | } |
1524 | 1395 | ||
1525 | static u32 scsiback_get_task_tag(struct se_cmd *se_cmd) | ||
1526 | { | ||
1527 | struct vscsibk_pend *pending_req = container_of(se_cmd, | ||
1528 | struct vscsibk_pend, se_cmd); | ||
1529 | |||
1530 | return pending_req->rqid; | ||
1531 | } | ||
1532 | |||
1533 | static int scsiback_get_cmd_state(struct se_cmd *se_cmd) | 1396 | static int scsiback_get_cmd_state(struct se_cmd *se_cmd) |
1534 | { | 1397 | { |
1535 | return 0; | 1398 | return 0; |
@@ -1898,8 +1761,7 @@ scsiback_make_tpg(struct se_wwn *wwn, | |||
1898 | tpg->tport = tport; | 1761 | tpg->tport = tport; |
1899 | tpg->tport_tpgt = tpgt; | 1762 | tpg->tport_tpgt = tpgt; |
1900 | 1763 | ||
1901 | ret = core_tpg_register(&scsiback_ops, wwn, | 1764 | ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id); |
1902 | &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); | ||
1903 | if (ret < 0) { | 1765 | if (ret < 0) { |
1904 | kfree(tpg); | 1766 | kfree(tpg); |
1905 | return NULL; | 1767 | return NULL; |
@@ -1944,23 +1806,15 @@ static const struct target_core_fabric_ops scsiback_ops = { | |||
1944 | .module = THIS_MODULE, | 1806 | .module = THIS_MODULE, |
1945 | .name = "xen-pvscsi", | 1807 | .name = "xen-pvscsi", |
1946 | .get_fabric_name = scsiback_get_fabric_name, | 1808 | .get_fabric_name = scsiback_get_fabric_name, |
1947 | .get_fabric_proto_ident = scsiback_get_fabric_proto_ident, | ||
1948 | .tpg_get_wwn = scsiback_get_fabric_wwn, | 1809 | .tpg_get_wwn = scsiback_get_fabric_wwn, |
1949 | .tpg_get_tag = scsiback_get_tag, | 1810 | .tpg_get_tag = scsiback_get_tag, |
1950 | .tpg_get_default_depth = scsiback_get_default_depth, | ||
1951 | .tpg_get_pr_transport_id = scsiback_get_pr_transport_id, | ||
1952 | .tpg_get_pr_transport_id_len = scsiback_get_pr_transport_id_len, | ||
1953 | .tpg_parse_pr_out_transport_id = scsiback_parse_pr_out_transport_id, | ||
1954 | .tpg_check_demo_mode = scsiback_check_true, | 1811 | .tpg_check_demo_mode = scsiback_check_true, |
1955 | .tpg_check_demo_mode_cache = scsiback_check_true, | 1812 | .tpg_check_demo_mode_cache = scsiback_check_true, |
1956 | .tpg_check_demo_mode_write_protect = scsiback_check_false, | 1813 | .tpg_check_demo_mode_write_protect = scsiback_check_false, |
1957 | .tpg_check_prod_mode_write_protect = scsiback_check_false, | 1814 | .tpg_check_prod_mode_write_protect = scsiback_check_false, |
1958 | .tpg_alloc_fabric_acl = scsiback_alloc_fabric_acl, | ||
1959 | .tpg_release_fabric_acl = scsiback_release_fabric_acl, | ||
1960 | .tpg_get_inst_index = scsiback_tpg_get_inst_index, | 1815 | .tpg_get_inst_index = scsiback_tpg_get_inst_index, |
1961 | .check_stop_free = scsiback_check_stop_free, | 1816 | .check_stop_free = scsiback_check_stop_free, |
1962 | .release_cmd = scsiback_release_cmd, | 1817 | .release_cmd = scsiback_release_cmd, |
1963 | .put_session = NULL, | ||
1964 | .shutdown_session = scsiback_shutdown_session, | 1818 | .shutdown_session = scsiback_shutdown_session, |
1965 | .close_session = scsiback_close_session, | 1819 | .close_session = scsiback_close_session, |
1966 | .sess_get_index = scsiback_sess_get_index, | 1820 | .sess_get_index = scsiback_sess_get_index, |
@@ -1968,7 +1822,6 @@ static const struct target_core_fabric_ops scsiback_ops = { | |||
1968 | .write_pending = scsiback_write_pending, | 1822 | .write_pending = scsiback_write_pending, |
1969 | .write_pending_status = scsiback_write_pending_status, | 1823 | .write_pending_status = scsiback_write_pending_status, |
1970 | .set_default_node_attributes = scsiback_set_default_node_attrs, | 1824 | .set_default_node_attributes = scsiback_set_default_node_attrs, |
1971 | .get_task_tag = scsiback_get_task_tag, | ||
1972 | .get_cmd_state = scsiback_get_cmd_state, | 1825 | .get_cmd_state = scsiback_get_cmd_state, |
1973 | .queue_data_in = scsiback_queue_data_in, | 1826 | .queue_data_in = scsiback_queue_data_in, |
1974 | .queue_status = scsiback_queue_status, | 1827 | .queue_status = scsiback_queue_status, |
@@ -1983,12 +1836,6 @@ static const struct target_core_fabric_ops scsiback_ops = { | |||
1983 | .fabric_drop_tpg = scsiback_drop_tpg, | 1836 | .fabric_drop_tpg = scsiback_drop_tpg, |
1984 | .fabric_post_link = scsiback_port_link, | 1837 | .fabric_post_link = scsiback_port_link, |
1985 | .fabric_pre_unlink = scsiback_port_unlink, | 1838 | .fabric_pre_unlink = scsiback_port_unlink, |
1986 | .fabric_make_np = NULL, | ||
1987 | .fabric_drop_np = NULL, | ||
1988 | #if 0 | ||
1989 | .fabric_make_nodeacl = scsiback_make_nodeacl, | ||
1990 | .fabric_drop_nodeacl = scsiback_drop_nodeacl, | ||
1991 | #endif | ||
1992 | 1839 | ||
1993 | .tfc_wwn_attrs = scsiback_wwn_attrs, | 1840 | .tfc_wwn_attrs = scsiback_wwn_attrs, |
1994 | .tfc_tpg_base_attrs = scsiback_tpg_attrs, | 1841 | .tfc_tpg_base_attrs = scsiback_tpg_attrs, |
diff --git a/include/linux/crc-t10dif.h b/include/linux/crc-t10dif.h index cf53d0773ce3..d81961e9e37d 100644 --- a/include/linux/crc-t10dif.h +++ b/include/linux/crc-t10dif.h | |||
@@ -9,5 +9,6 @@ | |||
9 | extern __u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, | 9 | extern __u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, |
10 | size_t len); | 10 | size_t len); |
11 | extern __u16 crc_t10dif(unsigned char const *, size_t); | 11 | extern __u16 crc_t10dif(unsigned char const *, size_t); |
12 | extern __u16 crc_t10dif_update(__u16 crc, unsigned char const *, size_t); | ||
12 | 13 | ||
13 | #endif | 14 | #endif |
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h index 006983b296dd..34117b8b72e4 100644 --- a/include/target/iscsi/iscsi_target_core.h +++ b/include/target/iscsi/iscsi_target_core.h | |||
@@ -247,10 +247,6 @@ struct iscsi_conn_ops { | |||
247 | u8 DataDigest; /* [0,1] == [None,CRC32C] */ | 247 | u8 DataDigest; /* [0,1] == [None,CRC32C] */ |
248 | u32 MaxRecvDataSegmentLength; /* [512..2**24-1] */ | 248 | u32 MaxRecvDataSegmentLength; /* [512..2**24-1] */ |
249 | u32 MaxXmitDataSegmentLength; /* [512..2**24-1] */ | 249 | u32 MaxXmitDataSegmentLength; /* [512..2**24-1] */ |
250 | u8 OFMarker; /* [0,1] == [No,Yes] */ | ||
251 | u8 IFMarker; /* [0,1] == [No,Yes] */ | ||
252 | u32 OFMarkInt; /* [1..65535] */ | ||
253 | u32 IFMarkInt; /* [1..65535] */ | ||
254 | /* | 250 | /* |
255 | * iSER specific connection parameters | 251 | * iSER specific connection parameters |
256 | */ | 252 | */ |
@@ -531,12 +527,6 @@ struct iscsi_conn { | |||
531 | u32 exp_statsn; | 527 | u32 exp_statsn; |
532 | /* Per connection status sequence number */ | 528 | /* Per connection status sequence number */ |
533 | u32 stat_sn; | 529 | u32 stat_sn; |
534 | /* IFMarkInt's Current Value */ | ||
535 | u32 if_marker; | ||
536 | /* OFMarkInt's Current Value */ | ||
537 | u32 of_marker; | ||
538 | /* Used for calculating OFMarker offset to next PDU */ | ||
539 | u32 of_marker_offset; | ||
540 | #define IPV6_ADDRESS_SPACE 48 | 530 | #define IPV6_ADDRESS_SPACE 48 |
541 | unsigned char login_ip[IPV6_ADDRESS_SPACE]; | 531 | unsigned char login_ip[IPV6_ADDRESS_SPACE]; |
542 | unsigned char local_ip[IPV6_ADDRESS_SPACE]; | 532 | unsigned char local_ip[IPV6_ADDRESS_SPACE]; |
@@ -754,10 +744,10 @@ struct iscsi_node_stat_grps { | |||
754 | }; | 744 | }; |
755 | 745 | ||
756 | struct iscsi_node_acl { | 746 | struct iscsi_node_acl { |
747 | struct se_node_acl se_node_acl; | ||
757 | struct iscsi_node_attrib node_attrib; | 748 | struct iscsi_node_attrib node_attrib; |
758 | struct iscsi_node_auth node_auth; | 749 | struct iscsi_node_auth node_auth; |
759 | struct iscsi_node_stat_grps node_stat_grps; | 750 | struct iscsi_node_stat_grps node_stat_grps; |
760 | struct se_node_acl se_node_acl; | ||
761 | }; | 751 | }; |
762 | 752 | ||
763 | struct iscsi_tpg_attrib { | 753 | struct iscsi_tpg_attrib { |
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index 5f1225706993..1e5c8f949bae 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h | |||
@@ -3,18 +3,7 @@ | |||
3 | 3 | ||
4 | #define TRANSPORT_FLAG_PASSTHROUGH 1 | 4 | #define TRANSPORT_FLAG_PASSTHROUGH 1 |
5 | 5 | ||
6 | struct target_backend_cits { | 6 | struct target_backend_ops { |
7 | struct config_item_type tb_dev_cit; | ||
8 | struct config_item_type tb_dev_attrib_cit; | ||
9 | struct config_item_type tb_dev_pr_cit; | ||
10 | struct config_item_type tb_dev_wwn_cit; | ||
11 | struct config_item_type tb_dev_alua_tg_pt_gps_cit; | ||
12 | struct config_item_type tb_dev_stat_cit; | ||
13 | }; | ||
14 | |||
15 | struct se_subsystem_api { | ||
16 | struct list_head sub_api_list; | ||
17 | |||
18 | char name[16]; | 7 | char name[16]; |
19 | char inquiry_prod[16]; | 8 | char inquiry_prod[16]; |
20 | char inquiry_rev[4]; | 9 | char inquiry_rev[4]; |
@@ -52,7 +41,7 @@ struct se_subsystem_api { | |||
52 | int (*format_prot)(struct se_device *); | 41 | int (*format_prot)(struct se_device *); |
53 | void (*free_prot)(struct se_device *); | 42 | void (*free_prot)(struct se_device *); |
54 | 43 | ||
55 | struct target_backend_cits tb_cits; | 44 | struct configfs_attribute **tb_dev_attrib_attrs; |
56 | }; | 45 | }; |
57 | 46 | ||
58 | struct sbc_ops { | 47 | struct sbc_ops { |
@@ -60,12 +49,12 @@ struct sbc_ops { | |||
60 | u32, enum dma_data_direction); | 49 | u32, enum dma_data_direction); |
61 | sense_reason_t (*execute_sync_cache)(struct se_cmd *cmd); | 50 | sense_reason_t (*execute_sync_cache)(struct se_cmd *cmd); |
62 | sense_reason_t (*execute_write_same)(struct se_cmd *cmd); | 51 | sense_reason_t (*execute_write_same)(struct se_cmd *cmd); |
63 | sense_reason_t (*execute_write_same_unmap)(struct se_cmd *cmd); | 52 | sense_reason_t (*execute_unmap)(struct se_cmd *cmd, |
64 | sense_reason_t (*execute_unmap)(struct se_cmd *cmd); | 53 | sector_t lba, sector_t nolb); |
65 | }; | 54 | }; |
66 | 55 | ||
67 | int transport_subsystem_register(struct se_subsystem_api *); | 56 | int transport_backend_register(const struct target_backend_ops *); |
68 | void transport_subsystem_release(struct se_subsystem_api *); | 57 | void target_backend_unregister(const struct target_backend_ops *); |
69 | 58 | ||
70 | void target_complete_cmd(struct se_cmd *, u8); | 59 | void target_complete_cmd(struct se_cmd *, u8); |
71 | void target_complete_cmd_with_length(struct se_cmd *, u8, int); | 60 | void target_complete_cmd_with_length(struct se_cmd *, u8, int); |
@@ -79,22 +68,19 @@ sense_reason_t sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops); | |||
79 | u32 sbc_get_device_rev(struct se_device *dev); | 68 | u32 sbc_get_device_rev(struct se_device *dev); |
80 | u32 sbc_get_device_type(struct se_device *dev); | 69 | u32 sbc_get_device_type(struct se_device *dev); |
81 | sector_t sbc_get_write_same_sectors(struct se_cmd *cmd); | 70 | sector_t sbc_get_write_same_sectors(struct se_cmd *cmd); |
82 | sense_reason_t sbc_execute_unmap(struct se_cmd *cmd, | ||
83 | sense_reason_t (*do_unmap_fn)(struct se_cmd *cmd, void *priv, | ||
84 | sector_t lba, sector_t nolb), | ||
85 | void *priv); | ||
86 | void sbc_dif_generate(struct se_cmd *); | 71 | void sbc_dif_generate(struct se_cmd *); |
87 | sense_reason_t sbc_dif_verify_write(struct se_cmd *, sector_t, unsigned int, | 72 | sense_reason_t sbc_dif_verify(struct se_cmd *, sector_t, unsigned int, |
88 | unsigned int, struct scatterlist *, int); | 73 | unsigned int, struct scatterlist *, int); |
89 | sense_reason_t sbc_dif_verify_read(struct se_cmd *, sector_t, unsigned int, | 74 | void sbc_dif_copy_prot(struct se_cmd *, unsigned int, bool, |
90 | unsigned int, struct scatterlist *, int); | 75 | struct scatterlist *, int); |
91 | sense_reason_t sbc_dif_read_strip(struct se_cmd *); | ||
92 | |||
93 | void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *); | 76 | void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *); |
94 | int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *); | 77 | int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *); |
95 | int transport_set_vpd_ident_type(struct t10_vpd *, unsigned char *); | 78 | int transport_set_vpd_ident_type(struct t10_vpd *, unsigned char *); |
96 | int transport_set_vpd_ident(struct t10_vpd *, unsigned char *); | 79 | int transport_set_vpd_ident(struct t10_vpd *, unsigned char *); |
97 | 80 | ||
81 | extern struct configfs_attribute *sbc_attrib_attrs[]; | ||
82 | extern struct configfs_attribute *passthrough_attrib_attrs[]; | ||
83 | |||
98 | /* core helpers also used by command snooping in pscsi */ | 84 | /* core helpers also used by command snooping in pscsi */ |
99 | void *transport_kmap_data_sg(struct se_cmd *); | 85 | void *transport_kmap_data_sg(struct se_cmd *); |
100 | void transport_kunmap_data_sg(struct se_cmd *); | 86 | void transport_kunmap_data_sg(struct se_cmd *); |
@@ -103,39 +89,7 @@ int target_alloc_sgl(struct scatterlist **, unsigned int *, u32, bool); | |||
103 | sense_reason_t transport_generic_map_mem_to_cmd(struct se_cmd *, | 89 | sense_reason_t transport_generic_map_mem_to_cmd(struct se_cmd *, |
104 | struct scatterlist *, u32, struct scatterlist *, u32); | 90 | struct scatterlist *, u32, struct scatterlist *, u32); |
105 | 91 | ||
106 | void array_free(void *array, int n); | 92 | bool target_lun_is_rdonly(struct se_cmd *); |
107 | |||
108 | /* From target_core_configfs.c to setup default backend config_item_types */ | ||
109 | void target_core_setup_sub_cits(struct se_subsystem_api *); | ||
110 | |||
111 | /* attribute helpers from target_core_device.c for backend drivers */ | ||
112 | bool se_dev_check_wce(struct se_device *); | ||
113 | int se_dev_set_max_unmap_lba_count(struct se_device *, u32); | ||
114 | int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32); | ||
115 | int se_dev_set_unmap_granularity(struct se_device *, u32); | ||
116 | int se_dev_set_unmap_granularity_alignment(struct se_device *, u32); | ||
117 | int se_dev_set_max_write_same_len(struct se_device *, u32); | ||
118 | int se_dev_set_emulate_model_alias(struct se_device *, int); | ||
119 | int se_dev_set_emulate_dpo(struct se_device *, int); | ||
120 | int se_dev_set_emulate_fua_write(struct se_device *, int); | ||
121 | int se_dev_set_emulate_fua_read(struct se_device *, int); | ||
122 | int se_dev_set_emulate_write_cache(struct se_device *, int); | ||
123 | int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int); | ||
124 | int se_dev_set_emulate_tas(struct se_device *, int); | ||
125 | int se_dev_set_emulate_tpu(struct se_device *, int); | ||
126 | int se_dev_set_emulate_tpws(struct se_device *, int); | ||
127 | int se_dev_set_emulate_caw(struct se_device *, int); | ||
128 | int se_dev_set_emulate_3pc(struct se_device *, int); | ||
129 | int se_dev_set_pi_prot_type(struct se_device *, int); | ||
130 | int se_dev_set_pi_prot_format(struct se_device *, int); | ||
131 | int se_dev_set_enforce_pr_isids(struct se_device *, int); | ||
132 | int se_dev_set_force_pr_aptpl(struct se_device *, int); | ||
133 | int se_dev_set_is_nonrot(struct se_device *, int); | ||
134 | int se_dev_set_emulate_rest_reord(struct se_device *dev, int); | ||
135 | int se_dev_set_queue_depth(struct se_device *, u32); | ||
136 | int se_dev_set_max_sectors(struct se_device *, u32); | ||
137 | int se_dev_set_optimal_sectors(struct se_device *, u32); | ||
138 | int se_dev_set_block_size(struct se_device *, u32); | ||
139 | sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd, | 93 | sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd, |
140 | sense_reason_t (*exec_cmd)(struct se_cmd *cmd)); | 94 | sense_reason_t (*exec_cmd)(struct se_cmd *cmd)); |
141 | 95 | ||
diff --git a/include/target/target_core_backend_configfs.h b/include/target/target_core_backend_configfs.h deleted file mode 100644 index 186f7a923570..000000000000 --- a/include/target/target_core_backend_configfs.h +++ /dev/null | |||
@@ -1,118 +0,0 @@ | |||
1 | #ifndef TARGET_CORE_BACKEND_CONFIGFS_H | ||
2 | #define TARGET_CORE_BACKEND_CONFIGFS_H | ||
3 | |||
4 | #include <target/configfs_macros.h> | ||
5 | |||
6 | #define DEF_TB_DEV_ATTRIB_SHOW(_backend, _name) \ | ||
7 | static ssize_t _backend##_dev_show_attr_##_name( \ | ||
8 | struct se_dev_attrib *da, \ | ||
9 | char *page) \ | ||
10 | { \ | ||
11 | return snprintf(page, PAGE_SIZE, "%u\n", \ | ||
12 | (u32)da->da_dev->dev_attrib._name); \ | ||
13 | } | ||
14 | |||
15 | #define DEF_TB_DEV_ATTRIB_STORE(_backend, _name) \ | ||
16 | static ssize_t _backend##_dev_store_attr_##_name( \ | ||
17 | struct se_dev_attrib *da, \ | ||
18 | const char *page, \ | ||
19 | size_t count) \ | ||
20 | { \ | ||
21 | unsigned long val; \ | ||
22 | int ret; \ | ||
23 | \ | ||
24 | ret = kstrtoul(page, 0, &val); \ | ||
25 | if (ret < 0) { \ | ||
26 | pr_err("kstrtoul() failed with ret: %d\n", ret); \ | ||
27 | return -EINVAL; \ | ||
28 | } \ | ||
29 | ret = se_dev_set_##_name(da->da_dev, (u32)val); \ | ||
30 | \ | ||
31 | return (!ret) ? count : -EINVAL; \ | ||
32 | } | ||
33 | |||
34 | #define DEF_TB_DEV_ATTRIB(_backend, _name) \ | ||
35 | DEF_TB_DEV_ATTRIB_SHOW(_backend, _name); \ | ||
36 | DEF_TB_DEV_ATTRIB_STORE(_backend, _name); | ||
37 | |||
38 | #define DEF_TB_DEV_ATTRIB_RO(_backend, name) \ | ||
39 | DEF_TB_DEV_ATTRIB_SHOW(_backend, name); | ||
40 | |||
41 | CONFIGFS_EATTR_STRUCT(target_backend_dev_attrib, se_dev_attrib); | ||
42 | #define TB_DEV_ATTR(_backend, _name, _mode) \ | ||
43 | static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name = \ | ||
44 | __CONFIGFS_EATTR(_name, _mode, \ | ||
45 | _backend##_dev_show_attr_##_name, \ | ||
46 | _backend##_dev_store_attr_##_name); | ||
47 | |||
48 | #define TB_DEV_ATTR_RO(_backend, _name) \ | ||
49 | static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name = \ | ||
50 | __CONFIGFS_EATTR_RO(_name, \ | ||
51 | _backend##_dev_show_attr_##_name); | ||
52 | |||
53 | /* | ||
54 | * Default list of target backend device attributes as defined by | ||
55 | * struct se_dev_attrib | ||
56 | */ | ||
57 | |||
58 | #define DEF_TB_DEFAULT_ATTRIBS(_backend) \ | ||
59 | DEF_TB_DEV_ATTRIB(_backend, emulate_model_alias); \ | ||
60 | TB_DEV_ATTR(_backend, emulate_model_alias, S_IRUGO | S_IWUSR); \ | ||
61 | DEF_TB_DEV_ATTRIB(_backend, emulate_dpo); \ | ||
62 | TB_DEV_ATTR(_backend, emulate_dpo, S_IRUGO | S_IWUSR); \ | ||
63 | DEF_TB_DEV_ATTRIB(_backend, emulate_fua_write); \ | ||
64 | TB_DEV_ATTR(_backend, emulate_fua_write, S_IRUGO | S_IWUSR); \ | ||
65 | DEF_TB_DEV_ATTRIB(_backend, emulate_fua_read); \ | ||
66 | TB_DEV_ATTR(_backend, emulate_fua_read, S_IRUGO | S_IWUSR); \ | ||
67 | DEF_TB_DEV_ATTRIB(_backend, emulate_write_cache); \ | ||
68 | TB_DEV_ATTR(_backend, emulate_write_cache, S_IRUGO | S_IWUSR); \ | ||
69 | DEF_TB_DEV_ATTRIB(_backend, emulate_ua_intlck_ctrl); \ | ||
70 | TB_DEV_ATTR(_backend, emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR); \ | ||
71 | DEF_TB_DEV_ATTRIB(_backend, emulate_tas); \ | ||
72 | TB_DEV_ATTR(_backend, emulate_tas, S_IRUGO | S_IWUSR); \ | ||
73 | DEF_TB_DEV_ATTRIB(_backend, emulate_tpu); \ | ||
74 | TB_DEV_ATTR(_backend, emulate_tpu, S_IRUGO | S_IWUSR); \ | ||
75 | DEF_TB_DEV_ATTRIB(_backend, emulate_tpws); \ | ||
76 | TB_DEV_ATTR(_backend, emulate_tpws, S_IRUGO | S_IWUSR); \ | ||
77 | DEF_TB_DEV_ATTRIB(_backend, emulate_caw); \ | ||
78 | TB_DEV_ATTR(_backend, emulate_caw, S_IRUGO | S_IWUSR); \ | ||
79 | DEF_TB_DEV_ATTRIB(_backend, emulate_3pc); \ | ||
80 | TB_DEV_ATTR(_backend, emulate_3pc, S_IRUGO | S_IWUSR); \ | ||
81 | DEF_TB_DEV_ATTRIB(_backend, pi_prot_type); \ | ||
82 | TB_DEV_ATTR(_backend, pi_prot_type, S_IRUGO | S_IWUSR); \ | ||
83 | DEF_TB_DEV_ATTRIB_RO(_backend, hw_pi_prot_type); \ | ||
84 | TB_DEV_ATTR_RO(_backend, hw_pi_prot_type); \ | ||
85 | DEF_TB_DEV_ATTRIB(_backend, pi_prot_format); \ | ||
86 | TB_DEV_ATTR(_backend, pi_prot_format, S_IRUGO | S_IWUSR); \ | ||
87 | DEF_TB_DEV_ATTRIB(_backend, enforce_pr_isids); \ | ||
88 | TB_DEV_ATTR(_backend, enforce_pr_isids, S_IRUGO | S_IWUSR); \ | ||
89 | DEF_TB_DEV_ATTRIB(_backend, is_nonrot); \ | ||
90 | TB_DEV_ATTR(_backend, is_nonrot, S_IRUGO | S_IWUSR); \ | ||
91 | DEF_TB_DEV_ATTRIB(_backend, emulate_rest_reord); \ | ||
92 | TB_DEV_ATTR(_backend, emulate_rest_reord, S_IRUGO | S_IWUSR); \ | ||
93 | DEF_TB_DEV_ATTRIB(_backend, force_pr_aptpl); \ | ||
94 | TB_DEV_ATTR(_backend, force_pr_aptpl, S_IRUGO | S_IWUSR); \ | ||
95 | DEF_TB_DEV_ATTRIB_RO(_backend, hw_block_size); \ | ||
96 | TB_DEV_ATTR_RO(_backend, hw_block_size); \ | ||
97 | DEF_TB_DEV_ATTRIB(_backend, block_size); \ | ||
98 | TB_DEV_ATTR(_backend, block_size, S_IRUGO | S_IWUSR); \ | ||
99 | DEF_TB_DEV_ATTRIB_RO(_backend, hw_max_sectors); \ | ||
100 | TB_DEV_ATTR_RO(_backend, hw_max_sectors); \ | ||
101 | DEF_TB_DEV_ATTRIB(_backend, optimal_sectors); \ | ||
102 | TB_DEV_ATTR(_backend, optimal_sectors, S_IRUGO | S_IWUSR); \ | ||
103 | DEF_TB_DEV_ATTRIB_RO(_backend, hw_queue_depth); \ | ||
104 | TB_DEV_ATTR_RO(_backend, hw_queue_depth); \ | ||
105 | DEF_TB_DEV_ATTRIB(_backend, queue_depth); \ | ||
106 | TB_DEV_ATTR(_backend, queue_depth, S_IRUGO | S_IWUSR); \ | ||
107 | DEF_TB_DEV_ATTRIB(_backend, max_unmap_lba_count); \ | ||
108 | TB_DEV_ATTR(_backend, max_unmap_lba_count, S_IRUGO | S_IWUSR); \ | ||
109 | DEF_TB_DEV_ATTRIB(_backend, max_unmap_block_desc_count); \ | ||
110 | TB_DEV_ATTR(_backend, max_unmap_block_desc_count, S_IRUGO | S_IWUSR); \ | ||
111 | DEF_TB_DEV_ATTRIB(_backend, unmap_granularity); \ | ||
112 | TB_DEV_ATTR(_backend, unmap_granularity, S_IRUGO | S_IWUSR); \ | ||
113 | DEF_TB_DEV_ATTRIB(_backend, unmap_granularity_alignment); \ | ||
114 | TB_DEV_ATTR(_backend, unmap_granularity_alignment, S_IRUGO | S_IWUSR); \ | ||
115 | DEF_TB_DEV_ATTRIB(_backend, max_write_same_len); \ | ||
116 | TB_DEV_ATTR(_backend, max_write_same_len, S_IRUGO | S_IWUSR); | ||
117 | |||
118 | #endif /* TARGET_CORE_BACKEND_CONFIGFS_H */ | ||
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index aec6f6a4477c..17ae2d6a4891 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h | |||
@@ -9,12 +9,8 @@ | |||
9 | #include <net/sock.h> | 9 | #include <net/sock.h> |
10 | #include <net/tcp.h> | 10 | #include <net/tcp.h> |
11 | 11 | ||
12 | #define TARGET_CORE_MOD_VERSION "v4.1.0" | 12 | #define TARGET_CORE_VERSION "v5.0" |
13 | #define TARGET_CORE_VERSION TARGET_CORE_MOD_VERSION | ||
14 | 13 | ||
15 | /* Maximum Number of LUNs per Target Portal Group */ | ||
16 | /* Don't raise above 511 or REPORT_LUNS needs to handle >1 page */ | ||
17 | #define TRANSPORT_MAX_LUNS_PER_TPG 256 | ||
18 | /* | 14 | /* |
19 | * Maximum size of a CDB that can be stored in se_cmd without allocating | 15 | * Maximum size of a CDB that can be stored in se_cmd without allocating |
20 | * memory dynamically for the CDB. | 16 | * memory dynamically for the CDB. |
@@ -70,12 +66,6 @@ | |||
70 | #define DA_MAX_WRITE_SAME_LEN 0 | 66 | #define DA_MAX_WRITE_SAME_LEN 0 |
71 | /* Use a model alias based on the configfs backend device name */ | 67 | /* Use a model alias based on the configfs backend device name */ |
72 | #define DA_EMULATE_MODEL_ALIAS 0 | 68 | #define DA_EMULATE_MODEL_ALIAS 0 |
73 | /* Emulation for Direct Page Out */ | ||
74 | #define DA_EMULATE_DPO 0 | ||
75 | /* Emulation for Forced Unit Access WRITEs */ | ||
76 | #define DA_EMULATE_FUA_WRITE 1 | ||
77 | /* Emulation for Forced Unit Access READs */ | ||
78 | #define DA_EMULATE_FUA_READ 0 | ||
79 | /* Emulation for WriteCache and SYNCHRONIZE_CACHE */ | 69 | /* Emulation for WriteCache and SYNCHRONIZE_CACHE */ |
80 | #define DA_EMULATE_WRITE_CACHE 0 | 70 | #define DA_EMULATE_WRITE_CACHE 0 |
81 | /* Emulation for UNIT ATTENTION Interlock Control */ | 71 | /* Emulation for UNIT ATTENTION Interlock Control */ |
@@ -116,18 +106,6 @@ enum hba_flags_table { | |||
116 | HBA_FLAGS_PSCSI_MODE = 0x02, | 106 | HBA_FLAGS_PSCSI_MODE = 0x02, |
117 | }; | 107 | }; |
118 | 108 | ||
119 | /* struct se_lun->lun_status */ | ||
120 | enum transport_lun_status_table { | ||
121 | TRANSPORT_LUN_STATUS_FREE = 0, | ||
122 | TRANSPORT_LUN_STATUS_ACTIVE = 1, | ||
123 | }; | ||
124 | |||
125 | /* struct se_portal_group->se_tpg_type */ | ||
126 | enum transport_tpg_type_table { | ||
127 | TRANSPORT_TPG_TYPE_NORMAL = 0, | ||
128 | TRANSPORT_TPG_TYPE_DISCOVERY = 1, | ||
129 | }; | ||
130 | |||
131 | /* Special transport agnostic struct se_cmd->t_states */ | 109 | /* Special transport agnostic struct se_cmd->t_states */ |
132 | enum transport_state_table { | 110 | enum transport_state_table { |
133 | TRANSPORT_NO_STATE = 0, | 111 | TRANSPORT_NO_STATE = 0, |
@@ -158,14 +136,13 @@ enum se_cmd_flags_table { | |||
158 | SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000, | 136 | SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000, |
159 | SCF_COMPARE_AND_WRITE = 0x00080000, | 137 | SCF_COMPARE_AND_WRITE = 0x00080000, |
160 | SCF_COMPARE_AND_WRITE_POST = 0x00100000, | 138 | SCF_COMPARE_AND_WRITE_POST = 0x00100000, |
139 | SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000, | ||
161 | }; | 140 | }; |
162 | 141 | ||
163 | /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */ | 142 | /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */ |
164 | enum transport_lunflags_table { | 143 | enum transport_lunflags_table { |
165 | TRANSPORT_LUNFLAGS_NO_ACCESS = 0x00, | 144 | TRANSPORT_LUNFLAGS_READ_ONLY = 0x01, |
166 | TRANSPORT_LUNFLAGS_INITIATOR_ACCESS = 0x01, | 145 | TRANSPORT_LUNFLAGS_READ_WRITE = 0x02, |
167 | TRANSPORT_LUNFLAGS_READ_ONLY = 0x02, | ||
168 | TRANSPORT_LUNFLAGS_READ_WRITE = 0x04, | ||
169 | }; | 146 | }; |
170 | 147 | ||
171 | /* | 148 | /* |
@@ -314,22 +291,13 @@ struct t10_alua_tg_pt_gp { | |||
314 | struct se_device *tg_pt_gp_dev; | 291 | struct se_device *tg_pt_gp_dev; |
315 | struct config_group tg_pt_gp_group; | 292 | struct config_group tg_pt_gp_group; |
316 | struct list_head tg_pt_gp_list; | 293 | struct list_head tg_pt_gp_list; |
317 | struct list_head tg_pt_gp_mem_list; | 294 | struct list_head tg_pt_gp_lun_list; |
318 | struct se_port *tg_pt_gp_alua_port; | 295 | struct se_lun *tg_pt_gp_alua_lun; |
319 | struct se_node_acl *tg_pt_gp_alua_nacl; | 296 | struct se_node_acl *tg_pt_gp_alua_nacl; |
320 | struct delayed_work tg_pt_gp_transition_work; | 297 | struct delayed_work tg_pt_gp_transition_work; |
321 | struct completion *tg_pt_gp_transition_complete; | 298 | struct completion *tg_pt_gp_transition_complete; |
322 | }; | 299 | }; |
323 | 300 | ||
324 | struct t10_alua_tg_pt_gp_member { | ||
325 | bool tg_pt_gp_assoc; | ||
326 | atomic_t tg_pt_gp_mem_ref_cnt; | ||
327 | spinlock_t tg_pt_gp_mem_lock; | ||
328 | struct t10_alua_tg_pt_gp *tg_pt_gp; | ||
329 | struct se_port *tg_pt; | ||
330 | struct list_head tg_pt_gp_mem_list; | ||
331 | }; | ||
332 | |||
333 | struct t10_vpd { | 301 | struct t10_vpd { |
334 | unsigned char device_identifier[INQUIRY_VPD_DEVICE_IDENTIFIER_LEN]; | 302 | unsigned char device_identifier[INQUIRY_VPD_DEVICE_IDENTIFIER_LEN]; |
335 | int protocol_identifier_set; | 303 | int protocol_identifier_set; |
@@ -374,15 +342,16 @@ struct t10_pr_registration { | |||
374 | int pr_res_scope; | 342 | int pr_res_scope; |
375 | /* Used for fabric initiator WWPNs using a ISID */ | 343 | /* Used for fabric initiator WWPNs using a ISID */ |
376 | bool isid_present_at_reg; | 344 | bool isid_present_at_reg; |
377 | u32 pr_res_mapped_lun; | 345 | u64 pr_res_mapped_lun; |
378 | u32 pr_aptpl_target_lun; | 346 | u64 pr_aptpl_target_lun; |
347 | u16 tg_pt_sep_rtpi; | ||
379 | u32 pr_res_generation; | 348 | u32 pr_res_generation; |
380 | u64 pr_reg_bin_isid; | 349 | u64 pr_reg_bin_isid; |
381 | u64 pr_res_key; | 350 | u64 pr_res_key; |
382 | atomic_t pr_res_holders; | 351 | atomic_t pr_res_holders; |
383 | struct se_node_acl *pr_reg_nacl; | 352 | struct se_node_acl *pr_reg_nacl; |
353 | /* Used by ALL_TG_PT=1 registration with deve->pr_ref taken */ | ||
384 | struct se_dev_entry *pr_reg_deve; | 354 | struct se_dev_entry *pr_reg_deve; |
385 | struct se_lun *pr_reg_tg_pt_lun; | ||
386 | struct list_head pr_reg_list; | 355 | struct list_head pr_reg_list; |
387 | struct list_head pr_reg_abort_list; | 356 | struct list_head pr_reg_abort_list; |
388 | struct list_head pr_reg_aptpl_list; | 357 | struct list_head pr_reg_aptpl_list; |
@@ -422,7 +391,7 @@ struct se_tmr_req { | |||
422 | u8 response; | 391 | u8 response; |
423 | int call_transport; | 392 | int call_transport; |
424 | /* Reference to ITT that Task Mgmt should be performed */ | 393 | /* Reference to ITT that Task Mgmt should be performed */ |
425 | u32 ref_task_tag; | 394 | u64 ref_task_tag; |
426 | void *fabric_tmr_ptr; | 395 | void *fabric_tmr_ptr; |
427 | struct se_cmd *task_cmd; | 396 | struct se_cmd *task_cmd; |
428 | struct se_device *tmr_dev; | 397 | struct se_device *tmr_dev; |
@@ -475,6 +444,7 @@ struct se_cmd { | |||
475 | u8 scsi_asc; | 444 | u8 scsi_asc; |
476 | u8 scsi_ascq; | 445 | u8 scsi_ascq; |
477 | u16 scsi_sense_length; | 446 | u16 scsi_sense_length; |
447 | u64 tag; /* SAM command identifier aka task tag */ | ||
478 | /* Delay for ALUA Active/NonOptimized state access in milliseconds */ | 448 | /* Delay for ALUA Active/NonOptimized state access in milliseconds */ |
479 | int alua_nonop_delay; | 449 | int alua_nonop_delay; |
480 | /* See include/linux/dma-mapping.h */ | 450 | /* See include/linux/dma-mapping.h */ |
@@ -493,7 +463,7 @@ struct se_cmd { | |||
493 | /* Total size in bytes associated with command */ | 463 | /* Total size in bytes associated with command */ |
494 | u32 data_length; | 464 | u32 data_length; |
495 | u32 residual_count; | 465 | u32 residual_count; |
496 | u32 orig_fe_lun; | 466 | u64 orig_fe_lun; |
497 | /* Persistent Reservation key */ | 467 | /* Persistent Reservation key */ |
498 | u64 pr_res_key; | 468 | u64 pr_res_key; |
499 | /* Used for sense data */ | 469 | /* Used for sense data */ |
@@ -501,7 +471,6 @@ struct se_cmd { | |||
501 | struct list_head se_delayed_node; | 471 | struct list_head se_delayed_node; |
502 | struct list_head se_qf_node; | 472 | struct list_head se_qf_node; |
503 | struct se_device *se_dev; | 473 | struct se_device *se_dev; |
504 | struct se_dev_entry *se_deve; | ||
505 | struct se_lun *se_lun; | 474 | struct se_lun *se_lun; |
506 | /* Only used for internal passthrough and legacy TCM fabric modules */ | 475 | /* Only used for internal passthrough and legacy TCM fabric modules */ |
507 | struct se_session *se_sess; | 476 | struct se_session *se_sess; |
@@ -511,9 +480,8 @@ struct se_cmd { | |||
511 | struct kref cmd_kref; | 480 | struct kref cmd_kref; |
512 | const struct target_core_fabric_ops *se_tfo; | 481 | const struct target_core_fabric_ops *se_tfo; |
513 | sense_reason_t (*execute_cmd)(struct se_cmd *); | 482 | sense_reason_t (*execute_cmd)(struct se_cmd *); |
514 | sense_reason_t (*execute_rw)(struct se_cmd *, struct scatterlist *, | ||
515 | u32, enum dma_data_direction); | ||
516 | sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool); | 483 | sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool); |
484 | void *protocol_data; | ||
517 | 485 | ||
518 | unsigned char *t_task_cdb; | 486 | unsigned char *t_task_cdb; |
519 | unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE]; | 487 | unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE]; |
@@ -569,7 +537,6 @@ struct se_cmd { | |||
569 | struct se_ua { | 537 | struct se_ua { |
570 | u8 ua_asc; | 538 | u8 ua_asc; |
571 | u8 ua_ascq; | 539 | u8 ua_ascq; |
572 | struct se_node_acl *ua_nacl; | ||
573 | struct list_head ua_nacl_list; | 540 | struct list_head ua_nacl_list; |
574 | }; | 541 | }; |
575 | 542 | ||
@@ -585,10 +552,10 @@ struct se_node_acl { | |||
585 | char acl_tag[MAX_ACL_TAG_SIZE]; | 552 | char acl_tag[MAX_ACL_TAG_SIZE]; |
586 | /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ | 553 | /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ |
587 | atomic_t acl_pr_ref_count; | 554 | atomic_t acl_pr_ref_count; |
588 | struct se_dev_entry **device_list; | 555 | struct hlist_head lun_entry_hlist; |
589 | struct se_session *nacl_sess; | 556 | struct se_session *nacl_sess; |
590 | struct se_portal_group *se_tpg; | 557 | struct se_portal_group *se_tpg; |
591 | spinlock_t device_list_lock; | 558 | struct mutex lun_entry_mutex; |
592 | spinlock_t nacl_sess_lock; | 559 | spinlock_t nacl_sess_lock; |
593 | struct config_group acl_group; | 560 | struct config_group acl_group; |
594 | struct config_group acl_attrib_group; | 561 | struct config_group acl_attrib_group; |
@@ -632,33 +599,37 @@ struct se_ml_stat_grps { | |||
632 | 599 | ||
633 | struct se_lun_acl { | 600 | struct se_lun_acl { |
634 | char initiatorname[TRANSPORT_IQN_LEN]; | 601 | char initiatorname[TRANSPORT_IQN_LEN]; |
635 | u32 mapped_lun; | 602 | u64 mapped_lun; |
636 | struct se_node_acl *se_lun_nacl; | 603 | struct se_node_acl *se_lun_nacl; |
637 | struct se_lun *se_lun; | 604 | struct se_lun *se_lun; |
638 | struct list_head lacl_list; | ||
639 | struct config_group se_lun_group; | 605 | struct config_group se_lun_group; |
640 | struct se_ml_stat_grps ml_stat_grps; | 606 | struct se_ml_stat_grps ml_stat_grps; |
641 | }; | 607 | }; |
642 | 608 | ||
643 | struct se_dev_entry { | 609 | struct se_dev_entry { |
644 | bool def_pr_registered; | ||
645 | /* See transport_lunflags_table */ | 610 | /* See transport_lunflags_table */ |
646 | u32 lun_flags; | 611 | u64 mapped_lun; |
647 | u32 mapped_lun; | ||
648 | u32 total_cmds; | ||
649 | u64 pr_res_key; | 612 | u64 pr_res_key; |
650 | u64 creation_time; | 613 | u64 creation_time; |
614 | u32 lun_flags; | ||
651 | u32 attach_count; | 615 | u32 attach_count; |
652 | u64 read_bytes; | 616 | atomic_long_t total_cmds; |
653 | u64 write_bytes; | 617 | atomic_long_t read_bytes; |
618 | atomic_long_t write_bytes; | ||
654 | atomic_t ua_count; | 619 | atomic_t ua_count; |
655 | /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ | 620 | /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ |
656 | atomic_t pr_ref_count; | 621 | struct kref pr_kref; |
657 | struct se_lun_acl *se_lun_acl; | 622 | struct completion pr_comp; |
623 | struct se_lun_acl __rcu *se_lun_acl; | ||
658 | spinlock_t ua_lock; | 624 | spinlock_t ua_lock; |
659 | struct se_lun *se_lun; | 625 | struct se_lun __rcu *se_lun; |
626 | #define DEF_PR_REG_ACTIVE 1 | ||
627 | unsigned long deve_flags; | ||
660 | struct list_head alua_port_list; | 628 | struct list_head alua_port_list; |
629 | struct list_head lun_link; | ||
661 | struct list_head ua_list; | 630 | struct list_head ua_list; |
631 | struct hlist_node link; | ||
632 | struct rcu_head rcu_head; | ||
662 | }; | 633 | }; |
663 | 634 | ||
664 | struct se_dev_attrib { | 635 | struct se_dev_attrib { |
@@ -703,25 +674,48 @@ struct se_port_stat_grps { | |||
703 | struct config_group scsi_transport_group; | 674 | struct config_group scsi_transport_group; |
704 | }; | 675 | }; |
705 | 676 | ||
677 | struct scsi_port_stats { | ||
678 | atomic_long_t cmd_pdus; | ||
679 | atomic_long_t tx_data_octets; | ||
680 | atomic_long_t rx_data_octets; | ||
681 | }; | ||
682 | |||
706 | struct se_lun { | 683 | struct se_lun { |
684 | u64 unpacked_lun; | ||
707 | #define SE_LUN_LINK_MAGIC 0xffff7771 | 685 | #define SE_LUN_LINK_MAGIC 0xffff7771 |
708 | u32 lun_link_magic; | 686 | u32 lun_link_magic; |
709 | /* See transport_lun_status_table */ | ||
710 | enum transport_lun_status_table lun_status; | ||
711 | u32 lun_access; | 687 | u32 lun_access; |
712 | u32 lun_flags; | 688 | u32 lun_flags; |
713 | u32 unpacked_lun; | 689 | u32 lun_index; |
690 | |||
691 | /* RELATIVE TARGET PORT IDENTIFER */ | ||
692 | u16 lun_rtpi; | ||
714 | atomic_t lun_acl_count; | 693 | atomic_t lun_acl_count; |
715 | spinlock_t lun_acl_lock; | 694 | struct se_device __rcu *lun_se_dev; |
716 | spinlock_t lun_sep_lock; | 695 | |
717 | struct completion lun_shutdown_comp; | 696 | struct list_head lun_deve_list; |
718 | struct list_head lun_acl_list; | 697 | spinlock_t lun_deve_lock; |
719 | struct se_device *lun_se_dev; | 698 | |
720 | struct se_port *lun_sep; | 699 | /* ALUA state */ |
700 | int lun_tg_pt_secondary_stat; | ||
701 | int lun_tg_pt_secondary_write_md; | ||
702 | atomic_t lun_tg_pt_secondary_offline; | ||
703 | struct mutex lun_tg_pt_md_mutex; | ||
704 | |||
705 | /* ALUA target port group linkage */ | ||
706 | struct list_head lun_tg_pt_gp_link; | ||
707 | struct t10_alua_tg_pt_gp *lun_tg_pt_gp; | ||
708 | spinlock_t lun_tg_pt_gp_lock; | ||
709 | |||
710 | struct se_portal_group *lun_tpg; | ||
711 | struct scsi_port_stats lun_stats; | ||
721 | struct config_group lun_group; | 712 | struct config_group lun_group; |
722 | struct se_port_stat_grps port_stat_grps; | 713 | struct se_port_stat_grps port_stat_grps; |
723 | struct completion lun_ref_comp; | 714 | struct completion lun_ref_comp; |
724 | struct percpu_ref lun_ref; | 715 | struct percpu_ref lun_ref; |
716 | struct list_head lun_dev_link; | ||
717 | struct hlist_node link; | ||
718 | struct rcu_head rcu_head; | ||
725 | }; | 719 | }; |
726 | 720 | ||
727 | struct se_dev_stat_grps { | 721 | struct se_dev_stat_grps { |
@@ -744,7 +738,6 @@ struct se_device { | |||
744 | #define DF_EMULATED_VPD_UNIT_SERIAL 0x00000004 | 738 | #define DF_EMULATED_VPD_UNIT_SERIAL 0x00000004 |
745 | #define DF_USING_UDEV_PATH 0x00000008 | 739 | #define DF_USING_UDEV_PATH 0x00000008 |
746 | #define DF_USING_ALIAS 0x00000010 | 740 | #define DF_USING_ALIAS 0x00000010 |
747 | u32 dev_port_count; | ||
748 | /* Physical device queue depth */ | 741 | /* Physical device queue depth */ |
749 | u32 queue_depth; | 742 | u32 queue_depth; |
750 | /* Used for SPC-2 reservations enforce of ISIDs */ | 743 | /* Used for SPC-2 reservations enforce of ISIDs */ |
@@ -761,7 +754,7 @@ struct se_device { | |||
761 | atomic_t dev_ordered_id; | 754 | atomic_t dev_ordered_id; |
762 | atomic_t dev_ordered_sync; | 755 | atomic_t dev_ordered_sync; |
763 | atomic_t dev_qf_count; | 756 | atomic_t dev_qf_count; |
764 | int export_count; | 757 | u32 export_count; |
765 | spinlock_t delayed_cmd_lock; | 758 | spinlock_t delayed_cmd_lock; |
766 | spinlock_t execute_task_lock; | 759 | spinlock_t execute_task_lock; |
767 | spinlock_t dev_reservation_lock; | 760 | spinlock_t dev_reservation_lock; |
@@ -803,12 +796,15 @@ struct se_device { | |||
803 | #define SE_UDEV_PATH_LEN 512 /* must be less than PAGE_SIZE */ | 796 | #define SE_UDEV_PATH_LEN 512 /* must be less than PAGE_SIZE */ |
804 | unsigned char udev_path[SE_UDEV_PATH_LEN]; | 797 | unsigned char udev_path[SE_UDEV_PATH_LEN]; |
805 | /* Pointer to template of function pointers for transport */ | 798 | /* Pointer to template of function pointers for transport */ |
806 | struct se_subsystem_api *transport; | 799 | const struct target_backend_ops *transport; |
807 | /* Linked list for struct se_hba struct se_device list */ | 800 | /* Linked list for struct se_hba struct se_device list */ |
808 | struct list_head dev_list; | 801 | struct list_head dev_list; |
809 | struct se_lun xcopy_lun; | 802 | struct se_lun xcopy_lun; |
810 | /* Protection Information */ | 803 | /* Protection Information */ |
811 | int prot_length; | 804 | int prot_length; |
805 | /* For se_lun->lun_se_dev RCU read-side critical access */ | ||
806 | u32 hba_index; | ||
807 | struct rcu_head rcu_head; | ||
812 | }; | 808 | }; |
813 | 809 | ||
814 | struct se_hba { | 810 | struct se_hba { |
@@ -825,33 +821,7 @@ struct se_hba { | |||
825 | spinlock_t device_lock; | 821 | spinlock_t device_lock; |
826 | struct config_group hba_group; | 822 | struct config_group hba_group; |
827 | struct mutex hba_access_mutex; | 823 | struct mutex hba_access_mutex; |
828 | struct se_subsystem_api *transport; | 824 | struct target_backend *backend; |
829 | }; | ||
830 | |||
831 | struct scsi_port_stats { | ||
832 | u64 cmd_pdus; | ||
833 | u64 tx_data_octets; | ||
834 | u64 rx_data_octets; | ||
835 | }; | ||
836 | |||
837 | struct se_port { | ||
838 | /* RELATIVE TARGET PORT IDENTIFER */ | ||
839 | u16 sep_rtpi; | ||
840 | int sep_tg_pt_secondary_stat; | ||
841 | int sep_tg_pt_secondary_write_md; | ||
842 | u32 sep_index; | ||
843 | struct scsi_port_stats sep_stats; | ||
844 | /* Used for ALUA Target Port Groups membership */ | ||
845 | atomic_t sep_tg_pt_secondary_offline; | ||
846 | /* Used for PR ALL_TG_PT=1 */ | ||
847 | atomic_t sep_tg_pt_ref_cnt; | ||
848 | spinlock_t sep_alua_lock; | ||
849 | struct mutex sep_tg_pt_md_mutex; | ||
850 | struct t10_alua_tg_pt_gp_member *sep_alua_tg_pt_gp_mem; | ||
851 | struct se_lun *sep_lun; | ||
852 | struct se_portal_group *sep_tpg; | ||
853 | struct list_head sep_alua_list; | ||
854 | struct list_head sep_list; | ||
855 | }; | 825 | }; |
856 | 826 | ||
857 | struct se_tpg_np { | 827 | struct se_tpg_np { |
@@ -860,24 +830,26 @@ struct se_tpg_np { | |||
860 | }; | 830 | }; |
861 | 831 | ||
862 | struct se_portal_group { | 832 | struct se_portal_group { |
863 | /* Type of target portal group, see transport_tpg_type_table */ | 833 | /* |
864 | enum transport_tpg_type_table se_tpg_type; | 834 | * PROTOCOL IDENTIFIER value per SPC4, 7.5.1. |
835 | * | ||
836 | * Negative values can be used by fabric drivers for internal use TPGs. | ||
837 | */ | ||
838 | int proto_id; | ||
865 | /* Number of ACLed Initiator Nodes for this TPG */ | 839 | /* Number of ACLed Initiator Nodes for this TPG */ |
866 | u32 num_node_acls; | 840 | u32 num_node_acls; |
867 | /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ | 841 | /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ |
868 | atomic_t tpg_pr_ref_count; | 842 | atomic_t tpg_pr_ref_count; |
869 | /* Spinlock for adding/removing ACLed Nodes */ | 843 | /* Spinlock for adding/removing ACLed Nodes */ |
870 | spinlock_t acl_node_lock; | 844 | struct mutex acl_node_mutex; |
871 | /* Spinlock for adding/removing sessions */ | 845 | /* Spinlock for adding/removing sessions */ |
872 | spinlock_t session_lock; | 846 | spinlock_t session_lock; |
873 | spinlock_t tpg_lun_lock; | 847 | struct mutex tpg_lun_mutex; |
874 | /* Pointer to $FABRIC_MOD portal group */ | ||
875 | void *se_tpg_fabric_ptr; | ||
876 | struct list_head se_tpg_node; | 848 | struct list_head se_tpg_node; |
877 | /* linked list for initiator ACL list */ | 849 | /* linked list for initiator ACL list */ |
878 | struct list_head acl_node_list; | 850 | struct list_head acl_node_list; |
879 | struct se_lun **tpg_lun_list; | 851 | struct hlist_head tpg_lun_hlist; |
880 | struct se_lun tpg_virt_lun0; | 852 | struct se_lun *tpg_virt_lun0; |
881 | /* List of TCM sessions associated wth this TPG */ | 853 | /* List of TCM sessions associated wth this TPG */ |
882 | struct list_head tpg_sess_list; | 854 | struct list_head tpg_sess_list; |
883 | /* Pointer to $FABRIC_MOD dependent code */ | 855 | /* Pointer to $FABRIC_MOD dependent code */ |
diff --git a/include/target/target_core_configfs.h b/include/target/target_core_configfs.h deleted file mode 100644 index b99c01170392..000000000000 --- a/include/target/target_core_configfs.h +++ /dev/null | |||
@@ -1,48 +0,0 @@ | |||
1 | #define TARGET_CORE_CONFIGFS_VERSION TARGET_CORE_MOD_VERSION | ||
2 | |||
3 | #define TARGET_CORE_CONFIG_ROOT "/sys/kernel/config" | ||
4 | |||
5 | #define TARGET_CORE_NAME_MAX_LEN 64 | ||
6 | #define TARGET_FABRIC_NAME_SIZE 32 | ||
7 | |||
8 | struct target_fabric_configfs_template { | ||
9 | struct config_item_type tfc_discovery_cit; | ||
10 | struct config_item_type tfc_wwn_cit; | ||
11 | struct config_item_type tfc_wwn_fabric_stats_cit; | ||
12 | struct config_item_type tfc_tpg_cit; | ||
13 | struct config_item_type tfc_tpg_base_cit; | ||
14 | struct config_item_type tfc_tpg_lun_cit; | ||
15 | struct config_item_type tfc_tpg_port_cit; | ||
16 | struct config_item_type tfc_tpg_port_stat_cit; | ||
17 | struct config_item_type tfc_tpg_np_cit; | ||
18 | struct config_item_type tfc_tpg_np_base_cit; | ||
19 | struct config_item_type tfc_tpg_attrib_cit; | ||
20 | struct config_item_type tfc_tpg_auth_cit; | ||
21 | struct config_item_type tfc_tpg_param_cit; | ||
22 | struct config_item_type tfc_tpg_nacl_cit; | ||
23 | struct config_item_type tfc_tpg_nacl_base_cit; | ||
24 | struct config_item_type tfc_tpg_nacl_attrib_cit; | ||
25 | struct config_item_type tfc_tpg_nacl_auth_cit; | ||
26 | struct config_item_type tfc_tpg_nacl_param_cit; | ||
27 | struct config_item_type tfc_tpg_nacl_stat_cit; | ||
28 | struct config_item_type tfc_tpg_mappedlun_cit; | ||
29 | struct config_item_type tfc_tpg_mappedlun_stat_cit; | ||
30 | }; | ||
31 | |||
32 | struct target_fabric_configfs { | ||
33 | char tf_name[TARGET_FABRIC_NAME_SIZE]; | ||
34 | atomic_t tf_access_cnt; | ||
35 | struct list_head tf_list; | ||
36 | struct config_group tf_group; | ||
37 | struct config_group tf_disc_group; | ||
38 | struct config_group *tf_default_groups[2]; | ||
39 | /* Pointer to fabric's config_item */ | ||
40 | struct config_item *tf_fabric; | ||
41 | /* Passed from fabric modules */ | ||
42 | struct config_item_type *tf_fabric_cit; | ||
43 | /* Pointer to fabric's struct module */ | ||
44 | struct module *tf_module; | ||
45 | struct target_core_fabric_ops tf_ops; | ||
46 | struct target_fabric_configfs_template tf_cit_tmpl; | ||
47 | }; | ||
48 | |||
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h index 0f4dc3768587..18afef91b447 100644 --- a/include/target/target_core_fabric.h +++ b/include/target/target_core_fabric.h | |||
@@ -4,20 +4,11 @@ | |||
4 | struct target_core_fabric_ops { | 4 | struct target_core_fabric_ops { |
5 | struct module *module; | 5 | struct module *module; |
6 | const char *name; | 6 | const char *name; |
7 | size_t node_acl_size; | ||
7 | char *(*get_fabric_name)(void); | 8 | char *(*get_fabric_name)(void); |
8 | u8 (*get_fabric_proto_ident)(struct se_portal_group *); | ||
9 | char *(*tpg_get_wwn)(struct se_portal_group *); | 9 | char *(*tpg_get_wwn)(struct se_portal_group *); |
10 | u16 (*tpg_get_tag)(struct se_portal_group *); | 10 | u16 (*tpg_get_tag)(struct se_portal_group *); |
11 | u32 (*tpg_get_default_depth)(struct se_portal_group *); | 11 | u32 (*tpg_get_default_depth)(struct se_portal_group *); |
12 | u32 (*tpg_get_pr_transport_id)(struct se_portal_group *, | ||
13 | struct se_node_acl *, | ||
14 | struct t10_pr_registration *, int *, | ||
15 | unsigned char *); | ||
16 | u32 (*tpg_get_pr_transport_id_len)(struct se_portal_group *, | ||
17 | struct se_node_acl *, | ||
18 | struct t10_pr_registration *, int *); | ||
19 | char *(*tpg_parse_pr_out_transport_id)(struct se_portal_group *, | ||
20 | const char *, u32 *, char **); | ||
21 | int (*tpg_check_demo_mode)(struct se_portal_group *); | 12 | int (*tpg_check_demo_mode)(struct se_portal_group *); |
22 | int (*tpg_check_demo_mode_cache)(struct se_portal_group *); | 13 | int (*tpg_check_demo_mode_cache)(struct se_portal_group *); |
23 | int (*tpg_check_demo_mode_write_protect)(struct se_portal_group *); | 14 | int (*tpg_check_demo_mode_write_protect)(struct se_portal_group *); |
@@ -36,10 +27,6 @@ struct target_core_fabric_ops { | |||
36 | * WRITE_STRIP and READ_INSERT operations. | 27 | * WRITE_STRIP and READ_INSERT operations. |
37 | */ | 28 | */ |
38 | int (*tpg_check_prot_fabric_only)(struct se_portal_group *); | 29 | int (*tpg_check_prot_fabric_only)(struct se_portal_group *); |
39 | struct se_node_acl *(*tpg_alloc_fabric_acl)( | ||
40 | struct se_portal_group *); | ||
41 | void (*tpg_release_fabric_acl)(struct se_portal_group *, | ||
42 | struct se_node_acl *); | ||
43 | u32 (*tpg_get_inst_index)(struct se_portal_group *); | 30 | u32 (*tpg_get_inst_index)(struct se_portal_group *); |
44 | /* | 31 | /* |
45 | * Optional to release struct se_cmd and fabric dependent allocated | 32 | * Optional to release struct se_cmd and fabric dependent allocated |
@@ -50,7 +37,6 @@ struct target_core_fabric_ops { | |||
50 | */ | 37 | */ |
51 | int (*check_stop_free)(struct se_cmd *); | 38 | int (*check_stop_free)(struct se_cmd *); |
52 | void (*release_cmd)(struct se_cmd *); | 39 | void (*release_cmd)(struct se_cmd *); |
53 | void (*put_session)(struct se_session *); | ||
54 | /* | 40 | /* |
55 | * Called with spin_lock_bh(struct se_portal_group->session_lock held. | 41 | * Called with spin_lock_bh(struct se_portal_group->session_lock held. |
56 | */ | 42 | */ |
@@ -66,7 +52,6 @@ struct target_core_fabric_ops { | |||
66 | int (*write_pending)(struct se_cmd *); | 52 | int (*write_pending)(struct se_cmd *); |
67 | int (*write_pending_status)(struct se_cmd *); | 53 | int (*write_pending_status)(struct se_cmd *); |
68 | void (*set_default_node_attributes)(struct se_node_acl *); | 54 | void (*set_default_node_attributes)(struct se_node_acl *); |
69 | u32 (*get_task_tag)(struct se_cmd *); | ||
70 | int (*get_cmd_state)(struct se_cmd *); | 55 | int (*get_cmd_state)(struct se_cmd *); |
71 | int (*queue_data_in)(struct se_cmd *); | 56 | int (*queue_data_in)(struct se_cmd *); |
72 | int (*queue_status)(struct se_cmd *); | 57 | int (*queue_status)(struct se_cmd *); |
@@ -88,9 +73,8 @@ struct target_core_fabric_ops { | |||
88 | struct se_tpg_np *(*fabric_make_np)(struct se_portal_group *, | 73 | struct se_tpg_np *(*fabric_make_np)(struct se_portal_group *, |
89 | struct config_group *, const char *); | 74 | struct config_group *, const char *); |
90 | void (*fabric_drop_np)(struct se_tpg_np *); | 75 | void (*fabric_drop_np)(struct se_tpg_np *); |
91 | struct se_node_acl *(*fabric_make_nodeacl)(struct se_portal_group *, | 76 | int (*fabric_init_nodeacl)(struct se_node_acl *, const char *); |
92 | struct config_group *, const char *); | 77 | void (*fabric_cleanup_nodeacl)(struct se_node_acl *); |
93 | void (*fabric_drop_nodeacl)(struct se_node_acl *); | ||
94 | 78 | ||
95 | struct configfs_attribute **tfc_discovery_attrs; | 79 | struct configfs_attribute **tfc_discovery_attrs; |
96 | struct configfs_attribute **tfc_wwn_attrs; | 80 | struct configfs_attribute **tfc_wwn_attrs; |
@@ -132,16 +116,16 @@ void transport_deregister_session(struct se_session *); | |||
132 | void transport_init_se_cmd(struct se_cmd *, | 116 | void transport_init_se_cmd(struct se_cmd *, |
133 | const struct target_core_fabric_ops *, | 117 | const struct target_core_fabric_ops *, |
134 | struct se_session *, u32, int, int, unsigned char *); | 118 | struct se_session *, u32, int, int, unsigned char *); |
135 | sense_reason_t transport_lookup_cmd_lun(struct se_cmd *, u32); | 119 | sense_reason_t transport_lookup_cmd_lun(struct se_cmd *, u64); |
136 | sense_reason_t target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *); | 120 | sense_reason_t target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *); |
137 | int target_submit_cmd_map_sgls(struct se_cmd *, struct se_session *, | 121 | int target_submit_cmd_map_sgls(struct se_cmd *, struct se_session *, |
138 | unsigned char *, unsigned char *, u32, u32, int, int, int, | 122 | unsigned char *, unsigned char *, u64, u32, int, int, int, |
139 | struct scatterlist *, u32, struct scatterlist *, u32, | 123 | struct scatterlist *, u32, struct scatterlist *, u32, |
140 | struct scatterlist *, u32); | 124 | struct scatterlist *, u32); |
141 | int target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *, | 125 | int target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *, |
142 | unsigned char *, u32, u32, int, int, int); | 126 | unsigned char *, u64, u32, int, int, int); |
143 | int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, | 127 | int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, |
144 | unsigned char *sense, u32 unpacked_lun, | 128 | unsigned char *sense, u64 unpacked_lun, |
145 | void *fabric_tmr_ptr, unsigned char tm_type, | 129 | void *fabric_tmr_ptr, unsigned char tm_type, |
146 | gfp_t, unsigned int, int); | 130 | gfp_t, unsigned int, int); |
147 | int transport_handle_cdb_direct(struct se_cmd *); | 131 | int transport_handle_cdb_direct(struct se_cmd *); |
@@ -155,8 +139,8 @@ bool transport_wait_for_tasks(struct se_cmd *); | |||
155 | int transport_check_aborted_status(struct se_cmd *, int); | 139 | int transport_check_aborted_status(struct se_cmd *, int); |
156 | int transport_send_check_condition_and_sense(struct se_cmd *, | 140 | int transport_send_check_condition_and_sense(struct se_cmd *, |
157 | sense_reason_t, int); | 141 | sense_reason_t, int); |
158 | int target_get_sess_cmd(struct se_session *, struct se_cmd *, bool); | 142 | int target_get_sess_cmd(struct se_cmd *, bool); |
159 | int target_put_sess_cmd(struct se_session *, struct se_cmd *); | 143 | int target_put_sess_cmd(struct se_cmd *); |
160 | void target_sess_cmd_list_set_waiting(struct se_session *); | 144 | void target_sess_cmd_list_set_waiting(struct se_session *); |
161 | void target_wait_for_sess_cmds(struct se_session *); | 145 | void target_wait_for_sess_cmds(struct se_session *); |
162 | 146 | ||
@@ -167,52 +151,19 @@ void core_tmr_release_req(struct se_tmr_req *); | |||
167 | int transport_generic_handle_tmr(struct se_cmd *); | 151 | int transport_generic_handle_tmr(struct se_cmd *); |
168 | void transport_generic_request_failure(struct se_cmd *, sense_reason_t); | 152 | void transport_generic_request_failure(struct se_cmd *, sense_reason_t); |
169 | void __target_execute_cmd(struct se_cmd *); | 153 | void __target_execute_cmd(struct se_cmd *); |
170 | int transport_lookup_tmr_lun(struct se_cmd *, u32); | 154 | int transport_lookup_tmr_lun(struct se_cmd *, u64); |
171 | 155 | ||
172 | struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg, | 156 | struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg, |
173 | unsigned char *); | 157 | unsigned char *); |
174 | struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *, | 158 | struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *, |
175 | unsigned char *); | 159 | unsigned char *); |
176 | void core_tpg_clear_object_luns(struct se_portal_group *); | ||
177 | struct se_node_acl *core_tpg_add_initiator_node_acl(struct se_portal_group *, | ||
178 | struct se_node_acl *, const char *, u32); | ||
179 | int core_tpg_del_initiator_node_acl(struct se_portal_group *, | ||
180 | struct se_node_acl *, int); | ||
181 | int core_tpg_set_initiator_node_queue_depth(struct se_portal_group *, | 160 | int core_tpg_set_initiator_node_queue_depth(struct se_portal_group *, |
182 | unsigned char *, u32, int); | 161 | unsigned char *, u32, int); |
183 | int core_tpg_set_initiator_node_tag(struct se_portal_group *, | 162 | int core_tpg_set_initiator_node_tag(struct se_portal_group *, |
184 | struct se_node_acl *, const char *); | 163 | struct se_node_acl *, const char *); |
185 | int core_tpg_register(const struct target_core_fabric_ops *, | 164 | int core_tpg_register(struct se_wwn *, struct se_portal_group *, int); |
186 | struct se_wwn *, struct se_portal_group *, void *, int); | ||
187 | int core_tpg_deregister(struct se_portal_group *); | 165 | int core_tpg_deregister(struct se_portal_group *); |
188 | 166 | ||
189 | /* SAS helpers */ | ||
190 | u8 sas_get_fabric_proto_ident(struct se_portal_group *); | ||
191 | u32 sas_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *, | ||
192 | struct t10_pr_registration *, int *, unsigned char *); | ||
193 | u32 sas_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *, | ||
194 | struct t10_pr_registration *, int *); | ||
195 | char *sas_parse_pr_out_transport_id(struct se_portal_group *, const char *, | ||
196 | u32 *, char **); | ||
197 | |||
198 | /* FC helpers */ | ||
199 | u8 fc_get_fabric_proto_ident(struct se_portal_group *); | ||
200 | u32 fc_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *, | ||
201 | struct t10_pr_registration *, int *, unsigned char *); | ||
202 | u32 fc_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *, | ||
203 | struct t10_pr_registration *, int *); | ||
204 | char *fc_parse_pr_out_transport_id(struct se_portal_group *, const char *, | ||
205 | u32 *, char **); | ||
206 | |||
207 | /* iSCSI helpers */ | ||
208 | u8 iscsi_get_fabric_proto_ident(struct se_portal_group *); | ||
209 | u32 iscsi_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *, | ||
210 | struct t10_pr_registration *, int *, unsigned char *); | ||
211 | u32 iscsi_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *, | ||
212 | struct t10_pr_registration *, int *); | ||
213 | char *iscsi_parse_pr_out_transport_id(struct se_portal_group *, const char *, | ||
214 | u32 *, char **); | ||
215 | |||
216 | /* | 167 | /* |
217 | * The LIO target core uses DMA_TO_DEVICE to mean that data is going | 168 | * The LIO target core uses DMA_TO_DEVICE to mean that data is going |
218 | * to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean | 169 | * to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean |
diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c index dfe6ec17c0a5..1ad33e555805 100644 --- a/lib/crc-t10dif.c +++ b/lib/crc-t10dif.c | |||
@@ -19,7 +19,7 @@ | |||
19 | static struct crypto_shash *crct10dif_tfm; | 19 | static struct crypto_shash *crct10dif_tfm; |
20 | static struct static_key crct10dif_fallback __read_mostly; | 20 | static struct static_key crct10dif_fallback __read_mostly; |
21 | 21 | ||
22 | __u16 crc_t10dif(const unsigned char *buffer, size_t len) | 22 | __u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len) |
23 | { | 23 | { |
24 | struct { | 24 | struct { |
25 | struct shash_desc shash; | 25 | struct shash_desc shash; |
@@ -28,17 +28,23 @@ __u16 crc_t10dif(const unsigned char *buffer, size_t len) | |||
28 | int err; | 28 | int err; |
29 | 29 | ||
30 | if (static_key_false(&crct10dif_fallback)) | 30 | if (static_key_false(&crct10dif_fallback)) |
31 | return crc_t10dif_generic(0, buffer, len); | 31 | return crc_t10dif_generic(crc, buffer, len); |
32 | 32 | ||
33 | desc.shash.tfm = crct10dif_tfm; | 33 | desc.shash.tfm = crct10dif_tfm; |
34 | desc.shash.flags = 0; | 34 | desc.shash.flags = 0; |
35 | *(__u16 *)desc.ctx = 0; | 35 | *(__u16 *)desc.ctx = crc; |
36 | 36 | ||
37 | err = crypto_shash_update(&desc.shash, buffer, len); | 37 | err = crypto_shash_update(&desc.shash, buffer, len); |
38 | BUG_ON(err); | 38 | BUG_ON(err); |
39 | 39 | ||
40 | return *(__u16 *)desc.ctx; | 40 | return *(__u16 *)desc.ctx; |
41 | } | 41 | } |
42 | EXPORT_SYMBOL(crc_t10dif_update); | ||
43 | |||
44 | __u16 crc_t10dif(const unsigned char *buffer, size_t len) | ||
45 | { | ||
46 | return crc_t10dif_update(0, buffer, len); | ||
47 | } | ||
42 | EXPORT_SYMBOL(crc_t10dif); | 48 | EXPORT_SYMBOL(crc_t10dif); |
43 | 49 | ||
44 | static int __init crc_t10dif_mod_init(void) | 50 | static int __init crc_t10dif_mod_init(void) |