diff options
50 files changed, 1687 insertions, 2273 deletions
diff --git a/Documentation/target/tcm_mod_builder.py b/Documentation/target/tcm_mod_builder.py index 2b47704f75cb..2ba71cea0172 100755 --- a/Documentation/target/tcm_mod_builder.py +++ b/Documentation/target/tcm_mod_builder.py | |||
@@ -237,8 +237,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name): | |||
237 | buf += "#include \"" + fabric_mod_name + "_base.h\"\n" | 237 | buf += "#include \"" + fabric_mod_name + "_base.h\"\n" |
238 | buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n" | 238 | buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n" |
239 | 239 | ||
240 | buf += "/* Local pointer to allocated TCM configfs fabric module */\n" | 240 | buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops;\n\n" |
241 | buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n" | ||
242 | 241 | ||
243 | buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n" | 242 | buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n" |
244 | buf += " struct se_portal_group *se_tpg,\n" | 243 | buf += " struct se_portal_group *se_tpg,\n" |
@@ -309,8 +308,8 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name): | |||
309 | buf += " }\n" | 308 | buf += " }\n" |
310 | buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n" | 309 | buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n" |
311 | buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n" | 310 | buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n" |
312 | buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n" | 311 | buf += " ret = core_tpg_register(&" + fabric_mod_name + "_ops, wwn,\n" |
313 | buf += " &tpg->se_tpg, (void *)tpg,\n" | 312 | buf += " &tpg->se_tpg, tpg,\n" |
314 | buf += " TRANSPORT_TPG_TYPE_NORMAL);\n" | 313 | buf += " TRANSPORT_TPG_TYPE_NORMAL);\n" |
315 | buf += " if (ret < 0) {\n" | 314 | buf += " if (ret < 0) {\n" |
316 | buf += " kfree(tpg);\n" | 315 | buf += " kfree(tpg);\n" |
@@ -370,7 +369,10 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name): | |||
370 | buf += " NULL,\n" | 369 | buf += " NULL,\n" |
371 | buf += "};\n\n" | 370 | buf += "};\n\n" |
372 | 371 | ||
373 | buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n" | 372 | buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n" |
373 | buf += " .module = THIS_MODULE,\n" | ||
374 | buf += " .name = " + fabric_mod_name + ",\n" | ||
375 | buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n" | ||
374 | buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n" | 376 | buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n" |
375 | buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n" | 377 | buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n" |
376 | buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n" | 378 | buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n" |
@@ -413,75 +415,18 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name): | |||
413 | buf += " .fabric_drop_np = NULL,\n" | 415 | buf += " .fabric_drop_np = NULL,\n" |
414 | buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n" | 416 | buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n" |
415 | buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n" | 417 | buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n" |
416 | buf += "};\n\n" | 418 | buf += "\n" |
417 | 419 | buf += " .tfc_wwn_attrs = " + fabric_mod_name + "_wwn_attrs;\n" | |
418 | buf += "static int " + fabric_mod_name + "_register_configfs(void)\n" | ||
419 | buf += "{\n" | ||
420 | buf += " struct target_fabric_configfs *fabric;\n" | ||
421 | buf += " int ret;\n\n" | ||
422 | buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n" | ||
423 | buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n" | ||
424 | buf += " utsname()->machine);\n" | ||
425 | buf += " /*\n" | ||
426 | buf += " * Register the top level struct config_item_type with TCM core\n" | ||
427 | buf += " */\n" | ||
428 | buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name + "\");\n" | ||
429 | buf += " if (IS_ERR(fabric)) {\n" | ||
430 | buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n" | ||
431 | buf += " return PTR_ERR(fabric);\n" | ||
432 | buf += " }\n" | ||
433 | buf += " /*\n" | ||
434 | buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n" | ||
435 | buf += " */\n" | ||
436 | buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n" | ||
437 | buf += " /*\n" | ||
438 | buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n" | ||
439 | buf += " */\n" | ||
440 | buf += " fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n" | ||
441 | buf += " fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL;\n" | ||
442 | buf += " fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;\n" | ||
443 | buf += " fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;\n" | ||
444 | buf += " fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;\n" | ||
445 | buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n" | ||
446 | buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n" | ||
447 | buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n" | ||
448 | buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n" | ||
449 | buf += " /*\n" | ||
450 | buf += " * Register the fabric for use within TCM\n" | ||
451 | buf += " */\n" | ||
452 | buf += " ret = target_fabric_configfs_register(fabric);\n" | ||
453 | buf += " if (ret < 0) {\n" | ||
454 | buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n" | ||
455 | buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n" | ||
456 | buf += " return ret;\n" | ||
457 | buf += " }\n" | ||
458 | buf += " /*\n" | ||
459 | buf += " * Setup our local pointer to *fabric\n" | ||
460 | buf += " */\n" | ||
461 | buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n" | ||
462 | buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n" | ||
463 | buf += " return 0;\n" | ||
464 | buf += "};\n\n" | ||
465 | buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n" | ||
466 | buf += "{\n" | ||
467 | buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n" | ||
468 | buf += " return;\n\n" | ||
469 | buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n" | ||
470 | buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n" | ||
471 | buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n" | ||
472 | buf += "};\n\n" | 420 | buf += "};\n\n" |
473 | 421 | ||
474 | buf += "static int __init " + fabric_mod_name + "_init(void)\n" | 422 | buf += "static int __init " + fabric_mod_name + "_init(void)\n" |
475 | buf += "{\n" | 423 | buf += "{\n" |
476 | buf += " int ret;\n\n" | 424 | buf += " return target_register_template(" + fabric_mod_name + "_ops);\n" |
477 | buf += " ret = " + fabric_mod_name + "_register_configfs();\n" | ||
478 | buf += " if (ret < 0)\n" | ||
479 | buf += " return ret;\n\n" | ||
480 | buf += " return 0;\n" | ||
481 | buf += "};\n\n" | 425 | buf += "};\n\n" |
426 | |||
482 | buf += "static void __exit " + fabric_mod_name + "_exit(void)\n" | 427 | buf += "static void __exit " + fabric_mod_name + "_exit(void)\n" |
483 | buf += "{\n" | 428 | buf += "{\n" |
484 | buf += " " + fabric_mod_name + "_deregister_configfs();\n" | 429 | buf += " target_unregister_template(" + fabric_mod_name + "_ops);\n" |
485 | buf += "};\n\n" | 430 | buf += "};\n\n" |
486 | 431 | ||
487 | buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n" | 432 | buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n" |
diff --git a/Documentation/target/tcmu-design.txt b/Documentation/target/tcmu-design.txt index 5518465290bf..43e94ea6d2ca 100644 --- a/Documentation/target/tcmu-design.txt +++ b/Documentation/target/tcmu-design.txt | |||
@@ -138,27 +138,40 @@ signals the kernel via a 4-byte write(). When cmd_head equals | |||
138 | cmd_tail, the ring is empty -- no commands are currently waiting to be | 138 | cmd_tail, the ring is empty -- no commands are currently waiting to be |
139 | processed by userspace. | 139 | processed by userspace. |
140 | 140 | ||
141 | TCMU commands start with a common header containing "len_op", a 32-bit | 141 | TCMU commands are 8-byte aligned. They start with a common header |
142 | value that stores the length, as well as the opcode in the lowest | 142 | containing "len_op", a 32-bit value that stores the length, as well as |
143 | unused bits. Currently only two opcodes are defined, TCMU_OP_PAD and | 143 | the opcode in the lowest unused bits. It also contains cmd_id and |
144 | TCMU_OP_CMD. When userspace encounters a command with PAD opcode, it | 144 | flags fields for setting by the kernel (kflags) and userspace |
145 | should skip ahead by the bytes in "length". (The kernel inserts PAD | 145 | (uflags). |
146 | entries to ensure each CMD entry fits contigously into the circular | 146 | |
147 | buffer.) | 147 | Currently only two opcodes are defined, TCMU_OP_CMD and TCMU_OP_PAD. |
148 | 148 | ||
149 | When userspace handles a CMD, it finds the SCSI CDB (Command Data | 149 | When the opcode is CMD, the entry in the command ring is a struct |
150 | Block) via tcmu_cmd_entry.req.cdb_off. This is an offset from the | 150 | tcmu_cmd_entry. Userspace finds the SCSI CDB (Command Data Block) via |
151 | start of the overall shared memory region, not the entry. The data | 151 | tcmu_cmd_entry.req.cdb_off. This is an offset from the start of the |
152 | in/out buffers are accessible via tht req.iov[] array. Note that | 152 | overall shared memory region, not the entry. The data in/out buffers |
153 | each iov.iov_base is also an offset from the start of the region. | 153 | are accessible via tht req.iov[] array. iov_cnt contains the number of |
154 | 154 | entries in iov[] needed to describe either the Data-In or Data-Out | |
155 | TCMU currently does not support BIDI operations. | 155 | buffers. For bidirectional commands, iov_cnt specifies how many iovec |
156 | entries cover the Data-Out area, and iov_bidi_count specifies how many | ||
157 | iovec entries immediately after that in iov[] cover the Data-In | ||
158 | area. Just like other fields, iov.iov_base is an offset from the start | ||
159 | of the region. | ||
156 | 160 | ||
157 | When completing a command, userspace sets rsp.scsi_status, and | 161 | When completing a command, userspace sets rsp.scsi_status, and |
158 | rsp.sense_buffer if necessary. Userspace then increments | 162 | rsp.sense_buffer if necessary. Userspace then increments |
159 | mailbox.cmd_tail by entry.hdr.length (mod cmdr_size) and signals the | 163 | mailbox.cmd_tail by entry.hdr.length (mod cmdr_size) and signals the |
160 | kernel via the UIO method, a 4-byte write to the file descriptor. | 164 | kernel via the UIO method, a 4-byte write to the file descriptor. |
161 | 165 | ||
166 | When the opcode is PAD, userspace only updates cmd_tail as above -- | ||
167 | it's a no-op. (The kernel inserts PAD entries to ensure each CMD entry | ||
168 | is contiguous within the command ring.) | ||
169 | |||
170 | More opcodes may be added in the future. If userspace encounters an | ||
171 | opcode it does not handle, it must set UNKNOWN_OP bit (bit 0) in | ||
172 | hdr.uflags, update cmd_tail, and proceed with processing additional | ||
173 | commands, if any. | ||
174 | |||
162 | The Data Area: | 175 | The Data Area: |
163 | 176 | ||
164 | This is shared-memory space after the command ring. The organization | 177 | This is shared-memory space after the command ring. The organization |
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 075b19cc78e8..327529ee85eb 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c | |||
@@ -76,12 +76,12 @@ isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd) | |||
76 | static void | 76 | static void |
77 | isert_qp_event_callback(struct ib_event *e, void *context) | 77 | isert_qp_event_callback(struct ib_event *e, void *context) |
78 | { | 78 | { |
79 | struct isert_conn *isert_conn = (struct isert_conn *)context; | 79 | struct isert_conn *isert_conn = context; |
80 | 80 | ||
81 | isert_err("conn %p event: %d\n", isert_conn, e->event); | 81 | isert_err("conn %p event: %d\n", isert_conn, e->event); |
82 | switch (e->event) { | 82 | switch (e->event) { |
83 | case IB_EVENT_COMM_EST: | 83 | case IB_EVENT_COMM_EST: |
84 | rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST); | 84 | rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST); |
85 | break; | 85 | break; |
86 | case IB_EVENT_QP_LAST_WQE_REACHED: | 86 | case IB_EVENT_QP_LAST_WQE_REACHED: |
87 | isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n"); | 87 | isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n"); |
@@ -107,13 +107,12 @@ isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr) | |||
107 | return 0; | 107 | return 0; |
108 | } | 108 | } |
109 | 109 | ||
110 | static int | 110 | static struct isert_comp * |
111 | isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id) | 111 | isert_comp_get(struct isert_conn *isert_conn) |
112 | { | 112 | { |
113 | struct isert_device *device = isert_conn->conn_device; | 113 | struct isert_device *device = isert_conn->device; |
114 | struct ib_qp_init_attr attr; | ||
115 | struct isert_comp *comp; | 114 | struct isert_comp *comp; |
116 | int ret, i, min = 0; | 115 | int i, min = 0; |
117 | 116 | ||
118 | mutex_lock(&device_list_mutex); | 117 | mutex_lock(&device_list_mutex); |
119 | for (i = 0; i < device->comps_used; i++) | 118 | for (i = 0; i < device->comps_used; i++) |
@@ -122,9 +121,30 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id) | |||
122 | min = i; | 121 | min = i; |
123 | comp = &device->comps[min]; | 122 | comp = &device->comps[min]; |
124 | comp->active_qps++; | 123 | comp->active_qps++; |
124 | mutex_unlock(&device_list_mutex); | ||
125 | |||
125 | isert_info("conn %p, using comp %p min_index: %d\n", | 126 | isert_info("conn %p, using comp %p min_index: %d\n", |
126 | isert_conn, comp, min); | 127 | isert_conn, comp, min); |
128 | |||
129 | return comp; | ||
130 | } | ||
131 | |||
132 | static void | ||
133 | isert_comp_put(struct isert_comp *comp) | ||
134 | { | ||
135 | mutex_lock(&device_list_mutex); | ||
136 | comp->active_qps--; | ||
127 | mutex_unlock(&device_list_mutex); | 137 | mutex_unlock(&device_list_mutex); |
138 | } | ||
139 | |||
140 | static struct ib_qp * | ||
141 | isert_create_qp(struct isert_conn *isert_conn, | ||
142 | struct isert_comp *comp, | ||
143 | struct rdma_cm_id *cma_id) | ||
144 | { | ||
145 | struct isert_device *device = isert_conn->device; | ||
146 | struct ib_qp_init_attr attr; | ||
147 | int ret; | ||
128 | 148 | ||
129 | memset(&attr, 0, sizeof(struct ib_qp_init_attr)); | 149 | memset(&attr, 0, sizeof(struct ib_qp_init_attr)); |
130 | attr.event_handler = isert_qp_event_callback; | 150 | attr.event_handler = isert_qp_event_callback; |
@@ -149,19 +169,31 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id) | |||
149 | if (device->pi_capable) | 169 | if (device->pi_capable) |
150 | attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN; | 170 | attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN; |
151 | 171 | ||
152 | ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr); | 172 | ret = rdma_create_qp(cma_id, device->pd, &attr); |
153 | if (ret) { | 173 | if (ret) { |
154 | isert_err("rdma_create_qp failed for cma_id %d\n", ret); | 174 | isert_err("rdma_create_qp failed for cma_id %d\n", ret); |
175 | return ERR_PTR(ret); | ||
176 | } | ||
177 | |||
178 | return cma_id->qp; | ||
179 | } | ||
180 | |||
181 | static int | ||
182 | isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id) | ||
183 | { | ||
184 | struct isert_comp *comp; | ||
185 | int ret; | ||
186 | |||
187 | comp = isert_comp_get(isert_conn); | ||
188 | isert_conn->qp = isert_create_qp(isert_conn, comp, cma_id); | ||
189 | if (IS_ERR(isert_conn->qp)) { | ||
190 | ret = PTR_ERR(isert_conn->qp); | ||
155 | goto err; | 191 | goto err; |
156 | } | 192 | } |
157 | isert_conn->conn_qp = cma_id->qp; | ||
158 | 193 | ||
159 | return 0; | 194 | return 0; |
160 | err: | 195 | err: |
161 | mutex_lock(&device_list_mutex); | 196 | isert_comp_put(comp); |
162 | comp->active_qps--; | ||
163 | mutex_unlock(&device_list_mutex); | ||
164 | |||
165 | return ret; | 197 | return ret; |
166 | } | 198 | } |
167 | 199 | ||
@@ -174,18 +206,19 @@ isert_cq_event_callback(struct ib_event *e, void *context) | |||
174 | static int | 206 | static int |
175 | isert_alloc_rx_descriptors(struct isert_conn *isert_conn) | 207 | isert_alloc_rx_descriptors(struct isert_conn *isert_conn) |
176 | { | 208 | { |
177 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | 209 | struct isert_device *device = isert_conn->device; |
210 | struct ib_device *ib_dev = device->ib_device; | ||
178 | struct iser_rx_desc *rx_desc; | 211 | struct iser_rx_desc *rx_desc; |
179 | struct ib_sge *rx_sg; | 212 | struct ib_sge *rx_sg; |
180 | u64 dma_addr; | 213 | u64 dma_addr; |
181 | int i, j; | 214 | int i, j; |
182 | 215 | ||
183 | isert_conn->conn_rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS * | 216 | isert_conn->rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS * |
184 | sizeof(struct iser_rx_desc), GFP_KERNEL); | 217 | sizeof(struct iser_rx_desc), GFP_KERNEL); |
185 | if (!isert_conn->conn_rx_descs) | 218 | if (!isert_conn->rx_descs) |
186 | goto fail; | 219 | goto fail; |
187 | 220 | ||
188 | rx_desc = isert_conn->conn_rx_descs; | 221 | rx_desc = isert_conn->rx_descs; |
189 | 222 | ||
190 | for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { | 223 | for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { |
191 | dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc, | 224 | dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc, |
@@ -198,21 +231,21 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn) | |||
198 | rx_sg = &rx_desc->rx_sg; | 231 | rx_sg = &rx_desc->rx_sg; |
199 | rx_sg->addr = rx_desc->dma_addr; | 232 | rx_sg->addr = rx_desc->dma_addr; |
200 | rx_sg->length = ISER_RX_PAYLOAD_SIZE; | 233 | rx_sg->length = ISER_RX_PAYLOAD_SIZE; |
201 | rx_sg->lkey = isert_conn->conn_mr->lkey; | 234 | rx_sg->lkey = device->mr->lkey; |
202 | } | 235 | } |
203 | 236 | ||
204 | isert_conn->conn_rx_desc_head = 0; | 237 | isert_conn->rx_desc_head = 0; |
205 | 238 | ||
206 | return 0; | 239 | return 0; |
207 | 240 | ||
208 | dma_map_fail: | 241 | dma_map_fail: |
209 | rx_desc = isert_conn->conn_rx_descs; | 242 | rx_desc = isert_conn->rx_descs; |
210 | for (j = 0; j < i; j++, rx_desc++) { | 243 | for (j = 0; j < i; j++, rx_desc++) { |
211 | ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, | 244 | ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, |
212 | ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); | 245 | ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); |
213 | } | 246 | } |
214 | kfree(isert_conn->conn_rx_descs); | 247 | kfree(isert_conn->rx_descs); |
215 | isert_conn->conn_rx_descs = NULL; | 248 | isert_conn->rx_descs = NULL; |
216 | fail: | 249 | fail: |
217 | isert_err("conn %p failed to allocate rx descriptors\n", isert_conn); | 250 | isert_err("conn %p failed to allocate rx descriptors\n", isert_conn); |
218 | 251 | ||
@@ -222,59 +255,51 @@ fail: | |||
222 | static void | 255 | static void |
223 | isert_free_rx_descriptors(struct isert_conn *isert_conn) | 256 | isert_free_rx_descriptors(struct isert_conn *isert_conn) |
224 | { | 257 | { |
225 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | 258 | struct ib_device *ib_dev = isert_conn->device->ib_device; |
226 | struct iser_rx_desc *rx_desc; | 259 | struct iser_rx_desc *rx_desc; |
227 | int i; | 260 | int i; |
228 | 261 | ||
229 | if (!isert_conn->conn_rx_descs) | 262 | if (!isert_conn->rx_descs) |
230 | return; | 263 | return; |
231 | 264 | ||
232 | rx_desc = isert_conn->conn_rx_descs; | 265 | rx_desc = isert_conn->rx_descs; |
233 | for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { | 266 | for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { |
234 | ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, | 267 | ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, |
235 | ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); | 268 | ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); |
236 | } | 269 | } |
237 | 270 | ||
238 | kfree(isert_conn->conn_rx_descs); | 271 | kfree(isert_conn->rx_descs); |
239 | isert_conn->conn_rx_descs = NULL; | 272 | isert_conn->rx_descs = NULL; |
240 | } | 273 | } |
241 | 274 | ||
242 | static void isert_cq_work(struct work_struct *); | 275 | static void isert_cq_work(struct work_struct *); |
243 | static void isert_cq_callback(struct ib_cq *, void *); | 276 | static void isert_cq_callback(struct ib_cq *, void *); |
244 | 277 | ||
245 | static int | 278 | static void |
246 | isert_create_device_ib_res(struct isert_device *device) | 279 | isert_free_comps(struct isert_device *device) |
247 | { | 280 | { |
248 | struct ib_device *ib_dev = device->ib_device; | 281 | int i; |
249 | struct ib_device_attr *dev_attr; | ||
250 | int ret = 0, i; | ||
251 | int max_cqe; | ||
252 | |||
253 | dev_attr = &device->dev_attr; | ||
254 | ret = isert_query_device(ib_dev, dev_attr); | ||
255 | if (ret) | ||
256 | return ret; | ||
257 | 282 | ||
258 | max_cqe = min(ISER_MAX_CQ_LEN, dev_attr->max_cqe); | 283 | for (i = 0; i < device->comps_used; i++) { |
284 | struct isert_comp *comp = &device->comps[i]; | ||
259 | 285 | ||
260 | /* asign function handlers */ | 286 | if (comp->cq) { |
261 | if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS && | 287 | cancel_work_sync(&comp->work); |
262 | dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) { | 288 | ib_destroy_cq(comp->cq); |
263 | device->use_fastreg = 1; | 289 | } |
264 | device->reg_rdma_mem = isert_reg_rdma; | ||
265 | device->unreg_rdma_mem = isert_unreg_rdma; | ||
266 | } else { | ||
267 | device->use_fastreg = 0; | ||
268 | device->reg_rdma_mem = isert_map_rdma; | ||
269 | device->unreg_rdma_mem = isert_unmap_cmd; | ||
270 | } | 290 | } |
291 | kfree(device->comps); | ||
292 | } | ||
271 | 293 | ||
272 | /* Check signature cap */ | 294 | static int |
273 | device->pi_capable = dev_attr->device_cap_flags & | 295 | isert_alloc_comps(struct isert_device *device, |
274 | IB_DEVICE_SIGNATURE_HANDOVER ? true : false; | 296 | struct ib_device_attr *attr) |
297 | { | ||
298 | int i, max_cqe, ret = 0; | ||
275 | 299 | ||
276 | device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(), | 300 | device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(), |
277 | device->ib_device->num_comp_vectors)); | 301 | device->ib_device->num_comp_vectors)); |
302 | |||
278 | isert_info("Using %d CQs, %s supports %d vectors support " | 303 | isert_info("Using %d CQs, %s supports %d vectors support " |
279 | "Fast registration %d pi_capable %d\n", | 304 | "Fast registration %d pi_capable %d\n", |
280 | device->comps_used, device->ib_device->name, | 305 | device->comps_used, device->ib_device->name, |
@@ -288,6 +313,8 @@ isert_create_device_ib_res(struct isert_device *device) | |||
288 | return -ENOMEM; | 313 | return -ENOMEM; |
289 | } | 314 | } |
290 | 315 | ||
316 | max_cqe = min(ISER_MAX_CQ_LEN, attr->max_cqe); | ||
317 | |||
291 | for (i = 0; i < device->comps_used; i++) { | 318 | for (i = 0; i < device->comps_used; i++) { |
292 | struct isert_comp *comp = &device->comps[i]; | 319 | struct isert_comp *comp = &device->comps[i]; |
293 | 320 | ||
@@ -299,6 +326,7 @@ isert_create_device_ib_res(struct isert_device *device) | |||
299 | (void *)comp, | 326 | (void *)comp, |
300 | max_cqe, i); | 327 | max_cqe, i); |
301 | if (IS_ERR(comp->cq)) { | 328 | if (IS_ERR(comp->cq)) { |
329 | isert_err("Unable to allocate cq\n"); | ||
302 | ret = PTR_ERR(comp->cq); | 330 | ret = PTR_ERR(comp->cq); |
303 | comp->cq = NULL; | 331 | comp->cq = NULL; |
304 | goto out_cq; | 332 | goto out_cq; |
@@ -310,40 +338,79 @@ isert_create_device_ib_res(struct isert_device *device) | |||
310 | } | 338 | } |
311 | 339 | ||
312 | return 0; | 340 | return 0; |
313 | |||
314 | out_cq: | 341 | out_cq: |
315 | for (i = 0; i < device->comps_used; i++) { | 342 | isert_free_comps(device); |
316 | struct isert_comp *comp = &device->comps[i]; | 343 | return ret; |
344 | } | ||
317 | 345 | ||
318 | if (comp->cq) { | 346 | static int |
319 | cancel_work_sync(&comp->work); | 347 | isert_create_device_ib_res(struct isert_device *device) |
320 | ib_destroy_cq(comp->cq); | 348 | { |
321 | } | 349 | struct ib_device_attr *dev_attr; |
350 | int ret; | ||
351 | |||
352 | dev_attr = &device->dev_attr; | ||
353 | ret = isert_query_device(device->ib_device, dev_attr); | ||
354 | if (ret) | ||
355 | return ret; | ||
356 | |||
357 | /* asign function handlers */ | ||
358 | if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS && | ||
359 | dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) { | ||
360 | device->use_fastreg = 1; | ||
361 | device->reg_rdma_mem = isert_reg_rdma; | ||
362 | device->unreg_rdma_mem = isert_unreg_rdma; | ||
363 | } else { | ||
364 | device->use_fastreg = 0; | ||
365 | device->reg_rdma_mem = isert_map_rdma; | ||
366 | device->unreg_rdma_mem = isert_unmap_cmd; | ||
322 | } | 367 | } |
323 | kfree(device->comps); | ||
324 | 368 | ||
369 | ret = isert_alloc_comps(device, dev_attr); | ||
370 | if (ret) | ||
371 | return ret; | ||
372 | |||
373 | device->pd = ib_alloc_pd(device->ib_device); | ||
374 | if (IS_ERR(device->pd)) { | ||
375 | ret = PTR_ERR(device->pd); | ||
376 | isert_err("failed to allocate pd, device %p, ret=%d\n", | ||
377 | device, ret); | ||
378 | goto out_cq; | ||
379 | } | ||
380 | |||
381 | device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE); | ||
382 | if (IS_ERR(device->mr)) { | ||
383 | ret = PTR_ERR(device->mr); | ||
384 | isert_err("failed to create dma mr, device %p, ret=%d\n", | ||
385 | device, ret); | ||
386 | goto out_mr; | ||
387 | } | ||
388 | |||
389 | /* Check signature cap */ | ||
390 | device->pi_capable = dev_attr->device_cap_flags & | ||
391 | IB_DEVICE_SIGNATURE_HANDOVER ? true : false; | ||
392 | |||
393 | return 0; | ||
394 | |||
395 | out_mr: | ||
396 | ib_dealloc_pd(device->pd); | ||
397 | out_cq: | ||
398 | isert_free_comps(device); | ||
325 | return ret; | 399 | return ret; |
326 | } | 400 | } |
327 | 401 | ||
328 | static void | 402 | static void |
329 | isert_free_device_ib_res(struct isert_device *device) | 403 | isert_free_device_ib_res(struct isert_device *device) |
330 | { | 404 | { |
331 | int i; | ||
332 | |||
333 | isert_info("device %p\n", device); | 405 | isert_info("device %p\n", device); |
334 | 406 | ||
335 | for (i = 0; i < device->comps_used; i++) { | 407 | ib_dereg_mr(device->mr); |
336 | struct isert_comp *comp = &device->comps[i]; | 408 | ib_dealloc_pd(device->pd); |
337 | 409 | isert_free_comps(device); | |
338 | cancel_work_sync(&comp->work); | ||
339 | ib_destroy_cq(comp->cq); | ||
340 | comp->cq = NULL; | ||
341 | } | ||
342 | kfree(device->comps); | ||
343 | } | 410 | } |
344 | 411 | ||
345 | static void | 412 | static void |
346 | isert_device_try_release(struct isert_device *device) | 413 | isert_device_put(struct isert_device *device) |
347 | { | 414 | { |
348 | mutex_lock(&device_list_mutex); | 415 | mutex_lock(&device_list_mutex); |
349 | device->refcount--; | 416 | device->refcount--; |
@@ -357,7 +424,7 @@ isert_device_try_release(struct isert_device *device) | |||
357 | } | 424 | } |
358 | 425 | ||
359 | static struct isert_device * | 426 | static struct isert_device * |
360 | isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id) | 427 | isert_device_get(struct rdma_cm_id *cma_id) |
361 | { | 428 | { |
362 | struct isert_device *device; | 429 | struct isert_device *device; |
363 | int ret; | 430 | int ret; |
@@ -404,13 +471,13 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn) | |||
404 | struct fast_reg_descriptor *fr_desc, *tmp; | 471 | struct fast_reg_descriptor *fr_desc, *tmp; |
405 | int i = 0; | 472 | int i = 0; |
406 | 473 | ||
407 | if (list_empty(&isert_conn->conn_fr_pool)) | 474 | if (list_empty(&isert_conn->fr_pool)) |
408 | return; | 475 | return; |
409 | 476 | ||
410 | isert_info("Freeing conn %p fastreg pool", isert_conn); | 477 | isert_info("Freeing conn %p fastreg pool", isert_conn); |
411 | 478 | ||
412 | list_for_each_entry_safe(fr_desc, tmp, | 479 | list_for_each_entry_safe(fr_desc, tmp, |
413 | &isert_conn->conn_fr_pool, list) { | 480 | &isert_conn->fr_pool, list) { |
414 | list_del(&fr_desc->list); | 481 | list_del(&fr_desc->list); |
415 | ib_free_fast_reg_page_list(fr_desc->data_frpl); | 482 | ib_free_fast_reg_page_list(fr_desc->data_frpl); |
416 | ib_dereg_mr(fr_desc->data_mr); | 483 | ib_dereg_mr(fr_desc->data_mr); |
@@ -424,9 +491,9 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn) | |||
424 | ++i; | 491 | ++i; |
425 | } | 492 | } |
426 | 493 | ||
427 | if (i < isert_conn->conn_fr_pool_size) | 494 | if (i < isert_conn->fr_pool_size) |
428 | isert_warn("Pool still has %d regions registered\n", | 495 | isert_warn("Pool still has %d regions registered\n", |
429 | isert_conn->conn_fr_pool_size - i); | 496 | isert_conn->fr_pool_size - i); |
430 | } | 497 | } |
431 | 498 | ||
432 | static int | 499 | static int |
@@ -526,7 +593,7 @@ static int | |||
526 | isert_conn_create_fastreg_pool(struct isert_conn *isert_conn) | 593 | isert_conn_create_fastreg_pool(struct isert_conn *isert_conn) |
527 | { | 594 | { |
528 | struct fast_reg_descriptor *fr_desc; | 595 | struct fast_reg_descriptor *fr_desc; |
529 | struct isert_device *device = isert_conn->conn_device; | 596 | struct isert_device *device = isert_conn->device; |
530 | struct se_session *se_sess = isert_conn->conn->sess->se_sess; | 597 | struct se_session *se_sess = isert_conn->conn->sess->se_sess; |
531 | struct se_node_acl *se_nacl = se_sess->se_node_acl; | 598 | struct se_node_acl *se_nacl = se_sess->se_node_acl; |
532 | int i, ret, tag_num; | 599 | int i, ret, tag_num; |
@@ -537,7 +604,7 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn) | |||
537 | tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth); | 604 | tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth); |
538 | tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS; | 605 | tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS; |
539 | 606 | ||
540 | isert_conn->conn_fr_pool_size = 0; | 607 | isert_conn->fr_pool_size = 0; |
541 | for (i = 0; i < tag_num; i++) { | 608 | for (i = 0; i < tag_num; i++) { |
542 | fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL); | 609 | fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL); |
543 | if (!fr_desc) { | 610 | if (!fr_desc) { |
@@ -547,7 +614,7 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn) | |||
547 | } | 614 | } |
548 | 615 | ||
549 | ret = isert_create_fr_desc(device->ib_device, | 616 | ret = isert_create_fr_desc(device->ib_device, |
550 | isert_conn->conn_pd, fr_desc); | 617 | device->pd, fr_desc); |
551 | if (ret) { | 618 | if (ret) { |
552 | isert_err("Failed to create fastreg descriptor err=%d\n", | 619 | isert_err("Failed to create fastreg descriptor err=%d\n", |
553 | ret); | 620 | ret); |
@@ -555,12 +622,12 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn) | |||
555 | goto err; | 622 | goto err; |
556 | } | 623 | } |
557 | 624 | ||
558 | list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool); | 625 | list_add_tail(&fr_desc->list, &isert_conn->fr_pool); |
559 | isert_conn->conn_fr_pool_size++; | 626 | isert_conn->fr_pool_size++; |
560 | } | 627 | } |
561 | 628 | ||
562 | isert_dbg("Creating conn %p fastreg pool size=%d", | 629 | isert_dbg("Creating conn %p fastreg pool size=%d", |
563 | isert_conn, isert_conn->conn_fr_pool_size); | 630 | isert_conn, isert_conn->fr_pool_size); |
564 | 631 | ||
565 | return 0; | 632 | return 0; |
566 | 633 | ||
@@ -569,55 +636,50 @@ err: | |||
569 | return ret; | 636 | return ret; |
570 | } | 637 | } |
571 | 638 | ||
572 | static int | 639 | static void |
573 | isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | 640 | isert_init_conn(struct isert_conn *isert_conn) |
574 | { | 641 | { |
575 | struct isert_np *isert_np = cma_id->context; | ||
576 | struct iscsi_np *np = isert_np->np; | ||
577 | struct isert_conn *isert_conn; | ||
578 | struct isert_device *device; | ||
579 | struct ib_device *ib_dev = cma_id->device; | ||
580 | int ret = 0; | ||
581 | |||
582 | spin_lock_bh(&np->np_thread_lock); | ||
583 | if (!np->enabled) { | ||
584 | spin_unlock_bh(&np->np_thread_lock); | ||
585 | isert_dbg("iscsi_np is not enabled, reject connect request\n"); | ||
586 | return rdma_reject(cma_id, NULL, 0); | ||
587 | } | ||
588 | spin_unlock_bh(&np->np_thread_lock); | ||
589 | |||
590 | isert_dbg("cma_id: %p, portal: %p\n", | ||
591 | cma_id, cma_id->context); | ||
592 | |||
593 | isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL); | ||
594 | if (!isert_conn) { | ||
595 | isert_err("Unable to allocate isert_conn\n"); | ||
596 | return -ENOMEM; | ||
597 | } | ||
598 | isert_conn->state = ISER_CONN_INIT; | 642 | isert_conn->state = ISER_CONN_INIT; |
599 | INIT_LIST_HEAD(&isert_conn->conn_accept_node); | 643 | INIT_LIST_HEAD(&isert_conn->accept_node); |
600 | init_completion(&isert_conn->conn_login_comp); | 644 | init_completion(&isert_conn->login_comp); |
601 | init_completion(&isert_conn->login_req_comp); | 645 | init_completion(&isert_conn->login_req_comp); |
602 | init_completion(&isert_conn->conn_wait); | 646 | init_completion(&isert_conn->wait); |
603 | kref_init(&isert_conn->conn_kref); | 647 | kref_init(&isert_conn->kref); |
604 | mutex_init(&isert_conn->conn_mutex); | 648 | mutex_init(&isert_conn->mutex); |
605 | spin_lock_init(&isert_conn->conn_lock); | 649 | spin_lock_init(&isert_conn->pool_lock); |
606 | INIT_LIST_HEAD(&isert_conn->conn_fr_pool); | 650 | INIT_LIST_HEAD(&isert_conn->fr_pool); |
651 | } | ||
652 | |||
653 | static void | ||
654 | isert_free_login_buf(struct isert_conn *isert_conn) | ||
655 | { | ||
656 | struct ib_device *ib_dev = isert_conn->device->ib_device; | ||
607 | 657 | ||
608 | isert_conn->conn_cm_id = cma_id; | 658 | ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, |
659 | ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE); | ||
660 | ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, | ||
661 | ISCSI_DEF_MAX_RECV_SEG_LEN, | ||
662 | DMA_FROM_DEVICE); | ||
663 | kfree(isert_conn->login_buf); | ||
664 | } | ||
665 | |||
666 | static int | ||
667 | isert_alloc_login_buf(struct isert_conn *isert_conn, | ||
668 | struct ib_device *ib_dev) | ||
669 | { | ||
670 | int ret; | ||
609 | 671 | ||
610 | isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN + | 672 | isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN + |
611 | ISER_RX_LOGIN_SIZE, GFP_KERNEL); | 673 | ISER_RX_LOGIN_SIZE, GFP_KERNEL); |
612 | if (!isert_conn->login_buf) { | 674 | if (!isert_conn->login_buf) { |
613 | isert_err("Unable to allocate isert_conn->login_buf\n"); | 675 | isert_err("Unable to allocate isert_conn->login_buf\n"); |
614 | ret = -ENOMEM; | 676 | return -ENOMEM; |
615 | goto out; | ||
616 | } | 677 | } |
617 | 678 | ||
618 | isert_conn->login_req_buf = isert_conn->login_buf; | 679 | isert_conn->login_req_buf = isert_conn->login_buf; |
619 | isert_conn->login_rsp_buf = isert_conn->login_buf + | 680 | isert_conn->login_rsp_buf = isert_conn->login_buf + |
620 | ISCSI_DEF_MAX_RECV_SEG_LEN; | 681 | ISCSI_DEF_MAX_RECV_SEG_LEN; |
682 | |||
621 | isert_dbg("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n", | 683 | isert_dbg("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n", |
622 | isert_conn->login_buf, isert_conn->login_req_buf, | 684 | isert_conn->login_buf, isert_conn->login_req_buf, |
623 | isert_conn->login_rsp_buf); | 685 | isert_conn->login_rsp_buf); |
@@ -628,8 +690,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | |||
628 | 690 | ||
629 | ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma); | 691 | ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma); |
630 | if (ret) { | 692 | if (ret) { |
631 | isert_err("ib_dma_mapping_error failed for login_req_dma: %d\n", | 693 | isert_err("login_req_dma mapping error: %d\n", ret); |
632 | ret); | ||
633 | isert_conn->login_req_dma = 0; | 694 | isert_conn->login_req_dma = 0; |
634 | goto out_login_buf; | 695 | goto out_login_buf; |
635 | } | 696 | } |
@@ -640,17 +701,58 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | |||
640 | 701 | ||
641 | ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma); | 702 | ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma); |
642 | if (ret) { | 703 | if (ret) { |
643 | isert_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n", | 704 | isert_err("login_rsp_dma mapping error: %d\n", ret); |
644 | ret); | ||
645 | isert_conn->login_rsp_dma = 0; | 705 | isert_conn->login_rsp_dma = 0; |
646 | goto out_req_dma_map; | 706 | goto out_req_dma_map; |
647 | } | 707 | } |
648 | 708 | ||
649 | device = isert_device_find_by_ib_dev(cma_id); | 709 | return 0; |
710 | |||
711 | out_req_dma_map: | ||
712 | ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, | ||
713 | ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE); | ||
714 | out_login_buf: | ||
715 | kfree(isert_conn->login_buf); | ||
716 | return ret; | ||
717 | } | ||
718 | |||
719 | static int | ||
720 | isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | ||
721 | { | ||
722 | struct isert_np *isert_np = cma_id->context; | ||
723 | struct iscsi_np *np = isert_np->np; | ||
724 | struct isert_conn *isert_conn; | ||
725 | struct isert_device *device; | ||
726 | int ret = 0; | ||
727 | |||
728 | spin_lock_bh(&np->np_thread_lock); | ||
729 | if (!np->enabled) { | ||
730 | spin_unlock_bh(&np->np_thread_lock); | ||
731 | isert_dbg("iscsi_np is not enabled, reject connect request\n"); | ||
732 | return rdma_reject(cma_id, NULL, 0); | ||
733 | } | ||
734 | spin_unlock_bh(&np->np_thread_lock); | ||
735 | |||
736 | isert_dbg("cma_id: %p, portal: %p\n", | ||
737 | cma_id, cma_id->context); | ||
738 | |||
739 | isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL); | ||
740 | if (!isert_conn) | ||
741 | return -ENOMEM; | ||
742 | |||
743 | isert_init_conn(isert_conn); | ||
744 | isert_conn->cm_id = cma_id; | ||
745 | |||
746 | ret = isert_alloc_login_buf(isert_conn, cma_id->device); | ||
747 | if (ret) | ||
748 | goto out; | ||
749 | |||
750 | device = isert_device_get(cma_id); | ||
650 | if (IS_ERR(device)) { | 751 | if (IS_ERR(device)) { |
651 | ret = PTR_ERR(device); | 752 | ret = PTR_ERR(device); |
652 | goto out_rsp_dma_map; | 753 | goto out_rsp_dma_map; |
653 | } | 754 | } |
755 | isert_conn->device = device; | ||
654 | 756 | ||
655 | /* Set max inflight RDMA READ requests */ | 757 | /* Set max inflight RDMA READ requests */ |
656 | isert_conn->initiator_depth = min_t(u8, | 758 | isert_conn->initiator_depth = min_t(u8, |
@@ -658,24 +760,6 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | |||
658 | device->dev_attr.max_qp_init_rd_atom); | 760 | device->dev_attr.max_qp_init_rd_atom); |
659 | isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth); | 761 | isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth); |
660 | 762 | ||
661 | isert_conn->conn_device = device; | ||
662 | isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device); | ||
663 | if (IS_ERR(isert_conn->conn_pd)) { | ||
664 | ret = PTR_ERR(isert_conn->conn_pd); | ||
665 | isert_err("ib_alloc_pd failed for conn %p: ret=%d\n", | ||
666 | isert_conn, ret); | ||
667 | goto out_pd; | ||
668 | } | ||
669 | |||
670 | isert_conn->conn_mr = ib_get_dma_mr(isert_conn->conn_pd, | ||
671 | IB_ACCESS_LOCAL_WRITE); | ||
672 | if (IS_ERR(isert_conn->conn_mr)) { | ||
673 | ret = PTR_ERR(isert_conn->conn_mr); | ||
674 | isert_err("ib_get_dma_mr failed for conn %p: ret=%d\n", | ||
675 | isert_conn, ret); | ||
676 | goto out_mr; | ||
677 | } | ||
678 | |||
679 | ret = isert_conn_setup_qp(isert_conn, cma_id); | 763 | ret = isert_conn_setup_qp(isert_conn, cma_id); |
680 | if (ret) | 764 | if (ret) |
681 | goto out_conn_dev; | 765 | goto out_conn_dev; |
@@ -689,7 +773,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | |||
689 | goto out_conn_dev; | 773 | goto out_conn_dev; |
690 | 774 | ||
691 | mutex_lock(&isert_np->np_accept_mutex); | 775 | mutex_lock(&isert_np->np_accept_mutex); |
692 | list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list); | 776 | list_add_tail(&isert_conn->accept_node, &isert_np->np_accept_list); |
693 | mutex_unlock(&isert_np->np_accept_mutex); | 777 | mutex_unlock(&isert_np->np_accept_mutex); |
694 | 778 | ||
695 | isert_info("np %p: Allow accept_np to continue\n", np); | 779 | isert_info("np %p: Allow accept_np to continue\n", np); |
@@ -697,19 +781,9 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | |||
697 | return 0; | 781 | return 0; |
698 | 782 | ||
699 | out_conn_dev: | 783 | out_conn_dev: |
700 | ib_dereg_mr(isert_conn->conn_mr); | 784 | isert_device_put(device); |
701 | out_mr: | ||
702 | ib_dealloc_pd(isert_conn->conn_pd); | ||
703 | out_pd: | ||
704 | isert_device_try_release(device); | ||
705 | out_rsp_dma_map: | 785 | out_rsp_dma_map: |
706 | ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, | 786 | isert_free_login_buf(isert_conn); |
707 | ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE); | ||
708 | out_req_dma_map: | ||
709 | ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, | ||
710 | ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE); | ||
711 | out_login_buf: | ||
712 | kfree(isert_conn->login_buf); | ||
713 | out: | 787 | out: |
714 | kfree(isert_conn); | 788 | kfree(isert_conn); |
715 | rdma_reject(cma_id, NULL, 0); | 789 | rdma_reject(cma_id, NULL, 0); |
@@ -719,43 +793,32 @@ out: | |||
719 | static void | 793 | static void |
720 | isert_connect_release(struct isert_conn *isert_conn) | 794 | isert_connect_release(struct isert_conn *isert_conn) |
721 | { | 795 | { |
722 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | 796 | struct isert_device *device = isert_conn->device; |
723 | struct isert_device *device = isert_conn->conn_device; | ||
724 | 797 | ||
725 | isert_dbg("conn %p\n", isert_conn); | 798 | isert_dbg("conn %p\n", isert_conn); |
726 | 799 | ||
727 | if (device && device->use_fastreg) | 800 | BUG_ON(!device); |
801 | |||
802 | if (device->use_fastreg) | ||
728 | isert_conn_free_fastreg_pool(isert_conn); | 803 | isert_conn_free_fastreg_pool(isert_conn); |
729 | 804 | ||
730 | isert_free_rx_descriptors(isert_conn); | 805 | isert_free_rx_descriptors(isert_conn); |
731 | rdma_destroy_id(isert_conn->conn_cm_id); | 806 | if (isert_conn->cm_id) |
807 | rdma_destroy_id(isert_conn->cm_id); | ||
732 | 808 | ||
733 | if (isert_conn->conn_qp) { | 809 | if (isert_conn->qp) { |
734 | struct isert_comp *comp = isert_conn->conn_qp->recv_cq->cq_context; | 810 | struct isert_comp *comp = isert_conn->qp->recv_cq->cq_context; |
735 | 811 | ||
736 | isert_dbg("dec completion context %p active_qps\n", comp); | 812 | isert_comp_put(comp); |
737 | mutex_lock(&device_list_mutex); | 813 | ib_destroy_qp(isert_conn->qp); |
738 | comp->active_qps--; | ||
739 | mutex_unlock(&device_list_mutex); | ||
740 | |||
741 | ib_destroy_qp(isert_conn->conn_qp); | ||
742 | } | 814 | } |
743 | 815 | ||
744 | ib_dereg_mr(isert_conn->conn_mr); | 816 | if (isert_conn->login_buf) |
745 | ib_dealloc_pd(isert_conn->conn_pd); | 817 | isert_free_login_buf(isert_conn); |
746 | 818 | ||
747 | if (isert_conn->login_buf) { | 819 | isert_device_put(device); |
748 | ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, | ||
749 | ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE); | ||
750 | ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, | ||
751 | ISCSI_DEF_MAX_RECV_SEG_LEN, | ||
752 | DMA_FROM_DEVICE); | ||
753 | kfree(isert_conn->login_buf); | ||
754 | } | ||
755 | kfree(isert_conn); | ||
756 | 820 | ||
757 | if (device) | 821 | kfree(isert_conn); |
758 | isert_device_try_release(device); | ||
759 | } | 822 | } |
760 | 823 | ||
761 | static void | 824 | static void |
@@ -765,22 +828,22 @@ isert_connected_handler(struct rdma_cm_id *cma_id) | |||
765 | 828 | ||
766 | isert_info("conn %p\n", isert_conn); | 829 | isert_info("conn %p\n", isert_conn); |
767 | 830 | ||
768 | if (!kref_get_unless_zero(&isert_conn->conn_kref)) { | 831 | if (!kref_get_unless_zero(&isert_conn->kref)) { |
769 | isert_warn("conn %p connect_release is running\n", isert_conn); | 832 | isert_warn("conn %p connect_release is running\n", isert_conn); |
770 | return; | 833 | return; |
771 | } | 834 | } |
772 | 835 | ||
773 | mutex_lock(&isert_conn->conn_mutex); | 836 | mutex_lock(&isert_conn->mutex); |
774 | if (isert_conn->state != ISER_CONN_FULL_FEATURE) | 837 | if (isert_conn->state != ISER_CONN_FULL_FEATURE) |
775 | isert_conn->state = ISER_CONN_UP; | 838 | isert_conn->state = ISER_CONN_UP; |
776 | mutex_unlock(&isert_conn->conn_mutex); | 839 | mutex_unlock(&isert_conn->mutex); |
777 | } | 840 | } |
778 | 841 | ||
779 | static void | 842 | static void |
780 | isert_release_conn_kref(struct kref *kref) | 843 | isert_release_kref(struct kref *kref) |
781 | { | 844 | { |
782 | struct isert_conn *isert_conn = container_of(kref, | 845 | struct isert_conn *isert_conn = container_of(kref, |
783 | struct isert_conn, conn_kref); | 846 | struct isert_conn, kref); |
784 | 847 | ||
785 | isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm, | 848 | isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm, |
786 | current->pid); | 849 | current->pid); |
@@ -791,7 +854,7 @@ isert_release_conn_kref(struct kref *kref) | |||
791 | static void | 854 | static void |
792 | isert_put_conn(struct isert_conn *isert_conn) | 855 | isert_put_conn(struct isert_conn *isert_conn) |
793 | { | 856 | { |
794 | kref_put(&isert_conn->conn_kref, isert_release_conn_kref); | 857 | kref_put(&isert_conn->kref, isert_release_kref); |
795 | } | 858 | } |
796 | 859 | ||
797 | /** | 860 | /** |
@@ -803,7 +866,7 @@ isert_put_conn(struct isert_conn *isert_conn) | |||
803 | * to TEMINATING and start teardown sequence (rdma_disconnect). | 866 | * to TEMINATING and start teardown sequence (rdma_disconnect). |
804 | * In case the connection state is UP, complete flush as well. | 867 | * In case the connection state is UP, complete flush as well. |
805 | * | 868 | * |
806 | * This routine must be called with conn_mutex held. Thus it is | 869 | * This routine must be called with mutex held. Thus it is |
807 | * safe to call multiple times. | 870 | * safe to call multiple times. |
808 | */ | 871 | */ |
809 | static void | 872 | static void |
@@ -819,7 +882,7 @@ isert_conn_terminate(struct isert_conn *isert_conn) | |||
819 | isert_info("Terminating conn %p state %d\n", | 882 | isert_info("Terminating conn %p state %d\n", |
820 | isert_conn, isert_conn->state); | 883 | isert_conn, isert_conn->state); |
821 | isert_conn->state = ISER_CONN_TERMINATING; | 884 | isert_conn->state = ISER_CONN_TERMINATING; |
822 | err = rdma_disconnect(isert_conn->conn_cm_id); | 885 | err = rdma_disconnect(isert_conn->cm_id); |
823 | if (err) | 886 | if (err) |
824 | isert_warn("Failed rdma_disconnect isert_conn %p\n", | 887 | isert_warn("Failed rdma_disconnect isert_conn %p\n", |
825 | isert_conn); | 888 | isert_conn); |
@@ -868,22 +931,25 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id, | |||
868 | 931 | ||
869 | isert_conn = cma_id->qp->qp_context; | 932 | isert_conn = cma_id->qp->qp_context; |
870 | 933 | ||
871 | mutex_lock(&isert_conn->conn_mutex); | 934 | mutex_lock(&isert_conn->mutex); |
872 | isert_conn_terminate(isert_conn); | 935 | isert_conn_terminate(isert_conn); |
873 | mutex_unlock(&isert_conn->conn_mutex); | 936 | mutex_unlock(&isert_conn->mutex); |
874 | 937 | ||
875 | isert_info("conn %p completing conn_wait\n", isert_conn); | 938 | isert_info("conn %p completing wait\n", isert_conn); |
876 | complete(&isert_conn->conn_wait); | 939 | complete(&isert_conn->wait); |
877 | 940 | ||
878 | return 0; | 941 | return 0; |
879 | } | 942 | } |
880 | 943 | ||
881 | static void | 944 | static int |
882 | isert_connect_error(struct rdma_cm_id *cma_id) | 945 | isert_connect_error(struct rdma_cm_id *cma_id) |
883 | { | 946 | { |
884 | struct isert_conn *isert_conn = cma_id->qp->qp_context; | 947 | struct isert_conn *isert_conn = cma_id->qp->qp_context; |
885 | 948 | ||
949 | isert_conn->cm_id = NULL; | ||
886 | isert_put_conn(isert_conn); | 950 | isert_put_conn(isert_conn); |
951 | |||
952 | return -1; | ||
887 | } | 953 | } |
888 | 954 | ||
889 | static int | 955 | static int |
@@ -912,7 +978,7 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | |||
912 | case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */ | 978 | case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */ |
913 | case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */ | 979 | case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */ |
914 | case RDMA_CM_EVENT_CONNECT_ERROR: | 980 | case RDMA_CM_EVENT_CONNECT_ERROR: |
915 | isert_connect_error(cma_id); | 981 | ret = isert_connect_error(cma_id); |
916 | break; | 982 | break; |
917 | default: | 983 | default: |
918 | isert_err("Unhandled RDMA CMA event: %d\n", event->event); | 984 | isert_err("Unhandled RDMA CMA event: %d\n", event->event); |
@@ -927,11 +993,11 @@ isert_post_recv(struct isert_conn *isert_conn, u32 count) | |||
927 | { | 993 | { |
928 | struct ib_recv_wr *rx_wr, *rx_wr_failed; | 994 | struct ib_recv_wr *rx_wr, *rx_wr_failed; |
929 | int i, ret; | 995 | int i, ret; |
930 | unsigned int rx_head = isert_conn->conn_rx_desc_head; | 996 | unsigned int rx_head = isert_conn->rx_desc_head; |
931 | struct iser_rx_desc *rx_desc; | 997 | struct iser_rx_desc *rx_desc; |
932 | 998 | ||
933 | for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) { | 999 | for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) { |
934 | rx_desc = &isert_conn->conn_rx_descs[rx_head]; | 1000 | rx_desc = &isert_conn->rx_descs[rx_head]; |
935 | rx_wr->wr_id = (uintptr_t)rx_desc; | 1001 | rx_wr->wr_id = (uintptr_t)rx_desc; |
936 | rx_wr->sg_list = &rx_desc->rx_sg; | 1002 | rx_wr->sg_list = &rx_desc->rx_sg; |
937 | rx_wr->num_sge = 1; | 1003 | rx_wr->num_sge = 1; |
@@ -943,14 +1009,14 @@ isert_post_recv(struct isert_conn *isert_conn, u32 count) | |||
943 | rx_wr->next = NULL; /* mark end of work requests list */ | 1009 | rx_wr->next = NULL; /* mark end of work requests list */ |
944 | 1010 | ||
945 | isert_conn->post_recv_buf_count += count; | 1011 | isert_conn->post_recv_buf_count += count; |
946 | ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr, | 1012 | ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr, |
947 | &rx_wr_failed); | 1013 | &rx_wr_failed); |
948 | if (ret) { | 1014 | if (ret) { |
949 | isert_err("ib_post_recv() failed with ret: %d\n", ret); | 1015 | isert_err("ib_post_recv() failed with ret: %d\n", ret); |
950 | isert_conn->post_recv_buf_count -= count; | 1016 | isert_conn->post_recv_buf_count -= count; |
951 | } else { | 1017 | } else { |
952 | isert_dbg("Posted %d RX buffers\n", count); | 1018 | isert_dbg("Posted %d RX buffers\n", count); |
953 | isert_conn->conn_rx_desc_head = rx_head; | 1019 | isert_conn->rx_desc_head = rx_head; |
954 | } | 1020 | } |
955 | return ret; | 1021 | return ret; |
956 | } | 1022 | } |
@@ -958,7 +1024,7 @@ isert_post_recv(struct isert_conn *isert_conn, u32 count) | |||
958 | static int | 1024 | static int |
959 | isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc) | 1025 | isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc) |
960 | { | 1026 | { |
961 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | 1027 | struct ib_device *ib_dev = isert_conn->cm_id->device; |
962 | struct ib_send_wr send_wr, *send_wr_failed; | 1028 | struct ib_send_wr send_wr, *send_wr_failed; |
963 | int ret; | 1029 | int ret; |
964 | 1030 | ||
@@ -972,7 +1038,7 @@ isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc) | |||
972 | send_wr.opcode = IB_WR_SEND; | 1038 | send_wr.opcode = IB_WR_SEND; |
973 | send_wr.send_flags = IB_SEND_SIGNALED; | 1039 | send_wr.send_flags = IB_SEND_SIGNALED; |
974 | 1040 | ||
975 | ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed); | 1041 | ret = ib_post_send(isert_conn->qp, &send_wr, &send_wr_failed); |
976 | if (ret) | 1042 | if (ret) |
977 | isert_err("ib_post_send() failed, ret: %d\n", ret); | 1043 | isert_err("ib_post_send() failed, ret: %d\n", ret); |
978 | 1044 | ||
@@ -984,7 +1050,8 @@ isert_create_send_desc(struct isert_conn *isert_conn, | |||
984 | struct isert_cmd *isert_cmd, | 1050 | struct isert_cmd *isert_cmd, |
985 | struct iser_tx_desc *tx_desc) | 1051 | struct iser_tx_desc *tx_desc) |
986 | { | 1052 | { |
987 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | 1053 | struct isert_device *device = isert_conn->device; |
1054 | struct ib_device *ib_dev = device->ib_device; | ||
988 | 1055 | ||
989 | ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr, | 1056 | ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr, |
990 | ISER_HEADERS_LEN, DMA_TO_DEVICE); | 1057 | ISER_HEADERS_LEN, DMA_TO_DEVICE); |
@@ -995,8 +1062,8 @@ isert_create_send_desc(struct isert_conn *isert_conn, | |||
995 | tx_desc->num_sge = 1; | 1062 | tx_desc->num_sge = 1; |
996 | tx_desc->isert_cmd = isert_cmd; | 1063 | tx_desc->isert_cmd = isert_cmd; |
997 | 1064 | ||
998 | if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) { | 1065 | if (tx_desc->tx_sg[0].lkey != device->mr->lkey) { |
999 | tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey; | 1066 | tx_desc->tx_sg[0].lkey = device->mr->lkey; |
1000 | isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc); | 1067 | isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc); |
1001 | } | 1068 | } |
1002 | } | 1069 | } |
@@ -1005,7 +1072,8 @@ static int | |||
1005 | isert_init_tx_hdrs(struct isert_conn *isert_conn, | 1072 | isert_init_tx_hdrs(struct isert_conn *isert_conn, |
1006 | struct iser_tx_desc *tx_desc) | 1073 | struct iser_tx_desc *tx_desc) |
1007 | { | 1074 | { |
1008 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | 1075 | struct isert_device *device = isert_conn->device; |
1076 | struct ib_device *ib_dev = device->ib_device; | ||
1009 | u64 dma_addr; | 1077 | u64 dma_addr; |
1010 | 1078 | ||
1011 | dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc, | 1079 | dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc, |
@@ -1018,7 +1086,7 @@ isert_init_tx_hdrs(struct isert_conn *isert_conn, | |||
1018 | tx_desc->dma_addr = dma_addr; | 1086 | tx_desc->dma_addr = dma_addr; |
1019 | tx_desc->tx_sg[0].addr = tx_desc->dma_addr; | 1087 | tx_desc->tx_sg[0].addr = tx_desc->dma_addr; |
1020 | tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; | 1088 | tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; |
1021 | tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey; | 1089 | tx_desc->tx_sg[0].lkey = device->mr->lkey; |
1022 | 1090 | ||
1023 | isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n", | 1091 | isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n", |
1024 | tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length, | 1092 | tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length, |
@@ -1051,7 +1119,7 @@ isert_rdma_post_recvl(struct isert_conn *isert_conn) | |||
1051 | memset(&sge, 0, sizeof(struct ib_sge)); | 1119 | memset(&sge, 0, sizeof(struct ib_sge)); |
1052 | sge.addr = isert_conn->login_req_dma; | 1120 | sge.addr = isert_conn->login_req_dma; |
1053 | sge.length = ISER_RX_LOGIN_SIZE; | 1121 | sge.length = ISER_RX_LOGIN_SIZE; |
1054 | sge.lkey = isert_conn->conn_mr->lkey; | 1122 | sge.lkey = isert_conn->device->mr->lkey; |
1055 | 1123 | ||
1056 | isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n", | 1124 | isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n", |
1057 | sge.addr, sge.length, sge.lkey); | 1125 | sge.addr, sge.length, sge.lkey); |
@@ -1062,7 +1130,7 @@ isert_rdma_post_recvl(struct isert_conn *isert_conn) | |||
1062 | rx_wr.num_sge = 1; | 1130 | rx_wr.num_sge = 1; |
1063 | 1131 | ||
1064 | isert_conn->post_recv_buf_count++; | 1132 | isert_conn->post_recv_buf_count++; |
1065 | ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail); | 1133 | ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_fail); |
1066 | if (ret) { | 1134 | if (ret) { |
1067 | isert_err("ib_post_recv() failed: %d\n", ret); | 1135 | isert_err("ib_post_recv() failed: %d\n", ret); |
1068 | isert_conn->post_recv_buf_count--; | 1136 | isert_conn->post_recv_buf_count--; |
@@ -1076,8 +1144,9 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, | |||
1076 | u32 length) | 1144 | u32 length) |
1077 | { | 1145 | { |
1078 | struct isert_conn *isert_conn = conn->context; | 1146 | struct isert_conn *isert_conn = conn->context; |
1079 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | 1147 | struct isert_device *device = isert_conn->device; |
1080 | struct iser_tx_desc *tx_desc = &isert_conn->conn_login_tx_desc; | 1148 | struct ib_device *ib_dev = device->ib_device; |
1149 | struct iser_tx_desc *tx_desc = &isert_conn->login_tx_desc; | ||
1081 | int ret; | 1150 | int ret; |
1082 | 1151 | ||
1083 | isert_create_send_desc(isert_conn, NULL, tx_desc); | 1152 | isert_create_send_desc(isert_conn, NULL, tx_desc); |
@@ -1100,13 +1169,13 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, | |||
1100 | 1169 | ||
1101 | tx_dsg->addr = isert_conn->login_rsp_dma; | 1170 | tx_dsg->addr = isert_conn->login_rsp_dma; |
1102 | tx_dsg->length = length; | 1171 | tx_dsg->length = length; |
1103 | tx_dsg->lkey = isert_conn->conn_mr->lkey; | 1172 | tx_dsg->lkey = isert_conn->device->mr->lkey; |
1104 | tx_desc->num_sge = 2; | 1173 | tx_desc->num_sge = 2; |
1105 | } | 1174 | } |
1106 | if (!login->login_failed) { | 1175 | if (!login->login_failed) { |
1107 | if (login->login_complete) { | 1176 | if (login->login_complete) { |
1108 | if (!conn->sess->sess_ops->SessionType && | 1177 | if (!conn->sess->sess_ops->SessionType && |
1109 | isert_conn->conn_device->use_fastreg) { | 1178 | isert_conn->device->use_fastreg) { |
1110 | ret = isert_conn_create_fastreg_pool(isert_conn); | 1179 | ret = isert_conn_create_fastreg_pool(isert_conn); |
1111 | if (ret) { | 1180 | if (ret) { |
1112 | isert_err("Conn: %p failed to create" | 1181 | isert_err("Conn: %p failed to create" |
@@ -1124,9 +1193,9 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, | |||
1124 | return ret; | 1193 | return ret; |
1125 | 1194 | ||
1126 | /* Now we are in FULL_FEATURE phase */ | 1195 | /* Now we are in FULL_FEATURE phase */ |
1127 | mutex_lock(&isert_conn->conn_mutex); | 1196 | mutex_lock(&isert_conn->mutex); |
1128 | isert_conn->state = ISER_CONN_FULL_FEATURE; | 1197 | isert_conn->state = ISER_CONN_FULL_FEATURE; |
1129 | mutex_unlock(&isert_conn->conn_mutex); | 1198 | mutex_unlock(&isert_conn->mutex); |
1130 | goto post_send; | 1199 | goto post_send; |
1131 | } | 1200 | } |
1132 | 1201 | ||
@@ -1185,7 +1254,7 @@ isert_rx_login_req(struct isert_conn *isert_conn) | |||
1185 | memcpy(login->req_buf, &rx_desc->data[0], size); | 1254 | memcpy(login->req_buf, &rx_desc->data[0], size); |
1186 | 1255 | ||
1187 | if (login->first_request) { | 1256 | if (login->first_request) { |
1188 | complete(&isert_conn->conn_login_comp); | 1257 | complete(&isert_conn->login_comp); |
1189 | return; | 1258 | return; |
1190 | } | 1259 | } |
1191 | schedule_delayed_work(&conn->login_work, 0); | 1260 | schedule_delayed_work(&conn->login_work, 0); |
@@ -1194,7 +1263,7 @@ isert_rx_login_req(struct isert_conn *isert_conn) | |||
1194 | static struct iscsi_cmd | 1263 | static struct iscsi_cmd |
1195 | *isert_allocate_cmd(struct iscsi_conn *conn) | 1264 | *isert_allocate_cmd(struct iscsi_conn *conn) |
1196 | { | 1265 | { |
1197 | struct isert_conn *isert_conn = (struct isert_conn *)conn->context; | 1266 | struct isert_conn *isert_conn = conn->context; |
1198 | struct isert_cmd *isert_cmd; | 1267 | struct isert_cmd *isert_cmd; |
1199 | struct iscsi_cmd *cmd; | 1268 | struct iscsi_cmd *cmd; |
1200 | 1269 | ||
@@ -1379,13 +1448,12 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, | |||
1379 | { | 1448 | { |
1380 | struct iscsi_hdr *hdr = &rx_desc->iscsi_header; | 1449 | struct iscsi_hdr *hdr = &rx_desc->iscsi_header; |
1381 | struct iscsi_conn *conn = isert_conn->conn; | 1450 | struct iscsi_conn *conn = isert_conn->conn; |
1382 | struct iscsi_session *sess = conn->sess; | ||
1383 | struct iscsi_cmd *cmd; | 1451 | struct iscsi_cmd *cmd; |
1384 | struct isert_cmd *isert_cmd; | 1452 | struct isert_cmd *isert_cmd; |
1385 | int ret = -EINVAL; | 1453 | int ret = -EINVAL; |
1386 | u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK); | 1454 | u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK); |
1387 | 1455 | ||
1388 | if (sess->sess_ops->SessionType && | 1456 | if (conn->sess->sess_ops->SessionType && |
1389 | (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) { | 1457 | (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) { |
1390 | isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery," | 1458 | isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery," |
1391 | " ignoring\n", opcode); | 1459 | " ignoring\n", opcode); |
@@ -1497,10 +1565,11 @@ isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn) | |||
1497 | } | 1565 | } |
1498 | 1566 | ||
1499 | static void | 1567 | static void |
1500 | isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn, | 1568 | isert_rcv_completion(struct iser_rx_desc *desc, |
1501 | u32 xfer_len) | 1569 | struct isert_conn *isert_conn, |
1570 | u32 xfer_len) | ||
1502 | { | 1571 | { |
1503 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | 1572 | struct ib_device *ib_dev = isert_conn->cm_id->device; |
1504 | struct iscsi_hdr *hdr; | 1573 | struct iscsi_hdr *hdr; |
1505 | u64 rx_dma; | 1574 | u64 rx_dma; |
1506 | int rx_buflen, outstanding; | 1575 | int rx_buflen, outstanding; |
@@ -1532,9 +1601,9 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn, | |||
1532 | if (login && !login->first_request) | 1601 | if (login && !login->first_request) |
1533 | isert_rx_login_req(isert_conn); | 1602 | isert_rx_login_req(isert_conn); |
1534 | } | 1603 | } |
1535 | mutex_lock(&isert_conn->conn_mutex); | 1604 | mutex_lock(&isert_conn->mutex); |
1536 | complete(&isert_conn->login_req_comp); | 1605 | complete(&isert_conn->login_req_comp); |
1537 | mutex_unlock(&isert_conn->conn_mutex); | 1606 | mutex_unlock(&isert_conn->mutex); |
1538 | } else { | 1607 | } else { |
1539 | isert_rx_do_work(desc, isert_conn); | 1608 | isert_rx_do_work(desc, isert_conn); |
1540 | } | 1609 | } |
@@ -1566,7 +1635,7 @@ isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, | |||
1566 | struct scatterlist *sg, u32 nents, u32 length, u32 offset, | 1635 | struct scatterlist *sg, u32 nents, u32 length, u32 offset, |
1567 | enum iser_ib_op_code op, struct isert_data_buf *data) | 1636 | enum iser_ib_op_code op, struct isert_data_buf *data) |
1568 | { | 1637 | { |
1569 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | 1638 | struct ib_device *ib_dev = isert_conn->cm_id->device; |
1570 | 1639 | ||
1571 | data->dma_dir = op == ISER_IB_RDMA_WRITE ? | 1640 | data->dma_dir = op == ISER_IB_RDMA_WRITE ? |
1572 | DMA_TO_DEVICE : DMA_FROM_DEVICE; | 1641 | DMA_TO_DEVICE : DMA_FROM_DEVICE; |
@@ -1597,7 +1666,7 @@ isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, | |||
1597 | static void | 1666 | static void |
1598 | isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data) | 1667 | isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data) |
1599 | { | 1668 | { |
1600 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | 1669 | struct ib_device *ib_dev = isert_conn->cm_id->device; |
1601 | 1670 | ||
1602 | ib_dma_unmap_sg(ib_dev, data->sg, data->nents, data->dma_dir); | 1671 | ib_dma_unmap_sg(ib_dev, data->sg, data->nents, data->dma_dir); |
1603 | memset(data, 0, sizeof(*data)); | 1672 | memset(data, 0, sizeof(*data)); |
@@ -1634,7 +1703,6 @@ static void | |||
1634 | isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) | 1703 | isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) |
1635 | { | 1704 | { |
1636 | struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; | 1705 | struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; |
1637 | LIST_HEAD(unmap_list); | ||
1638 | 1706 | ||
1639 | isert_dbg("Cmd %p\n", isert_cmd); | 1707 | isert_dbg("Cmd %p\n", isert_cmd); |
1640 | 1708 | ||
@@ -1644,9 +1712,9 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) | |||
1644 | isert_unmap_data_buf(isert_conn, &wr->prot); | 1712 | isert_unmap_data_buf(isert_conn, &wr->prot); |
1645 | wr->fr_desc->ind &= ~ISERT_PROTECTED; | 1713 | wr->fr_desc->ind &= ~ISERT_PROTECTED; |
1646 | } | 1714 | } |
1647 | spin_lock_bh(&isert_conn->conn_lock); | 1715 | spin_lock_bh(&isert_conn->pool_lock); |
1648 | list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool); | 1716 | list_add_tail(&wr->fr_desc->list, &isert_conn->fr_pool); |
1649 | spin_unlock_bh(&isert_conn->conn_lock); | 1717 | spin_unlock_bh(&isert_conn->pool_lock); |
1650 | wr->fr_desc = NULL; | 1718 | wr->fr_desc = NULL; |
1651 | } | 1719 | } |
1652 | 1720 | ||
@@ -1665,7 +1733,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err) | |||
1665 | struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; | 1733 | struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; |
1666 | struct isert_conn *isert_conn = isert_cmd->conn; | 1734 | struct isert_conn *isert_conn = isert_cmd->conn; |
1667 | struct iscsi_conn *conn = isert_conn->conn; | 1735 | struct iscsi_conn *conn = isert_conn->conn; |
1668 | struct isert_device *device = isert_conn->conn_device; | 1736 | struct isert_device *device = isert_conn->device; |
1669 | struct iscsi_text_rsp *hdr; | 1737 | struct iscsi_text_rsp *hdr; |
1670 | 1738 | ||
1671 | isert_dbg("Cmd %p\n", isert_cmd); | 1739 | isert_dbg("Cmd %p\n", isert_cmd); |
@@ -1815,7 +1883,7 @@ isert_completion_rdma_write(struct iser_tx_desc *tx_desc, | |||
1815 | struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; | 1883 | struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; |
1816 | struct se_cmd *se_cmd = &cmd->se_cmd; | 1884 | struct se_cmd *se_cmd = &cmd->se_cmd; |
1817 | struct isert_conn *isert_conn = isert_cmd->conn; | 1885 | struct isert_conn *isert_conn = isert_cmd->conn; |
1818 | struct isert_device *device = isert_conn->conn_device; | 1886 | struct isert_device *device = isert_conn->device; |
1819 | int ret = 0; | 1887 | int ret = 0; |
1820 | 1888 | ||
1821 | if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) { | 1889 | if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) { |
@@ -1841,7 +1909,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc, | |||
1841 | struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; | 1909 | struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; |
1842 | struct se_cmd *se_cmd = &cmd->se_cmd; | 1910 | struct se_cmd *se_cmd = &cmd->se_cmd; |
1843 | struct isert_conn *isert_conn = isert_cmd->conn; | 1911 | struct isert_conn *isert_conn = isert_cmd->conn; |
1844 | struct isert_device *device = isert_conn->conn_device; | 1912 | struct isert_device *device = isert_conn->device; |
1845 | int ret = 0; | 1913 | int ret = 0; |
1846 | 1914 | ||
1847 | if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) { | 1915 | if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) { |
@@ -1861,11 +1929,13 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc, | |||
1861 | cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; | 1929 | cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; |
1862 | spin_unlock_bh(&cmd->istate_lock); | 1930 | spin_unlock_bh(&cmd->istate_lock); |
1863 | 1931 | ||
1864 | if (ret) | 1932 | if (ret) { |
1933 | target_put_sess_cmd(se_cmd->se_sess, se_cmd); | ||
1865 | transport_send_check_condition_and_sense(se_cmd, | 1934 | transport_send_check_condition_and_sense(se_cmd, |
1866 | se_cmd->pi_err, 0); | 1935 | se_cmd->pi_err, 0); |
1867 | else | 1936 | } else { |
1868 | target_execute_cmd(se_cmd); | 1937 | target_execute_cmd(se_cmd); |
1938 | } | ||
1869 | } | 1939 | } |
1870 | 1940 | ||
1871 | static void | 1941 | static void |
@@ -1874,7 +1944,7 @@ isert_do_control_comp(struct work_struct *work) | |||
1874 | struct isert_cmd *isert_cmd = container_of(work, | 1944 | struct isert_cmd *isert_cmd = container_of(work, |
1875 | struct isert_cmd, comp_work); | 1945 | struct isert_cmd, comp_work); |
1876 | struct isert_conn *isert_conn = isert_cmd->conn; | 1946 | struct isert_conn *isert_conn = isert_cmd->conn; |
1877 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | 1947 | struct ib_device *ib_dev = isert_conn->cm_id->device; |
1878 | struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; | 1948 | struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; |
1879 | 1949 | ||
1880 | isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state); | 1950 | isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state); |
@@ -1922,10 +1992,10 @@ isert_response_completion(struct iser_tx_desc *tx_desc, | |||
1922 | } | 1992 | } |
1923 | 1993 | ||
1924 | static void | 1994 | static void |
1925 | isert_send_completion(struct iser_tx_desc *tx_desc, | 1995 | isert_snd_completion(struct iser_tx_desc *tx_desc, |
1926 | struct isert_conn *isert_conn) | 1996 | struct isert_conn *isert_conn) |
1927 | { | 1997 | { |
1928 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | 1998 | struct ib_device *ib_dev = isert_conn->cm_id->device; |
1929 | struct isert_cmd *isert_cmd = tx_desc->isert_cmd; | 1999 | struct isert_cmd *isert_cmd = tx_desc->isert_cmd; |
1930 | struct isert_rdma_wr *wr; | 2000 | struct isert_rdma_wr *wr; |
1931 | 2001 | ||
@@ -1938,10 +2008,6 @@ isert_send_completion(struct iser_tx_desc *tx_desc, | |||
1938 | isert_dbg("Cmd %p iser_ib_op %d\n", isert_cmd, wr->iser_ib_op); | 2008 | isert_dbg("Cmd %p iser_ib_op %d\n", isert_cmd, wr->iser_ib_op); |
1939 | 2009 | ||
1940 | switch (wr->iser_ib_op) { | 2010 | switch (wr->iser_ib_op) { |
1941 | case ISER_IB_RECV: | ||
1942 | isert_err("Got ISER_IB_RECV\n"); | ||
1943 | dump_stack(); | ||
1944 | break; | ||
1945 | case ISER_IB_SEND: | 2011 | case ISER_IB_SEND: |
1946 | isert_response_completion(tx_desc, isert_cmd, | 2012 | isert_response_completion(tx_desc, isert_cmd, |
1947 | isert_conn, ib_dev); | 2013 | isert_conn, ib_dev); |
@@ -1973,8 +2039,8 @@ isert_send_completion(struct iser_tx_desc *tx_desc, | |||
1973 | static inline bool | 2039 | static inline bool |
1974 | is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id) | 2040 | is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id) |
1975 | { | 2041 | { |
1976 | void *start = isert_conn->conn_rx_descs; | 2042 | void *start = isert_conn->rx_descs; |
1977 | int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->conn_rx_descs); | 2043 | int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->rx_descs); |
1978 | 2044 | ||
1979 | if (wr_id >= start && wr_id < start + len) | 2045 | if (wr_id >= start && wr_id < start + len) |
1980 | return false; | 2046 | return false; |
@@ -1986,11 +2052,11 @@ static void | |||
1986 | isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc) | 2052 | isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc) |
1987 | { | 2053 | { |
1988 | if (wc->wr_id == ISER_BEACON_WRID) { | 2054 | if (wc->wr_id == ISER_BEACON_WRID) { |
1989 | isert_info("conn %p completing conn_wait_comp_err\n", | 2055 | isert_info("conn %p completing wait_comp_err\n", |
1990 | isert_conn); | 2056 | isert_conn); |
1991 | complete(&isert_conn->conn_wait_comp_err); | 2057 | complete(&isert_conn->wait_comp_err); |
1992 | } else if (is_isert_tx_desc(isert_conn, (void *)(uintptr_t)wc->wr_id)) { | 2058 | } else if (is_isert_tx_desc(isert_conn, (void *)(uintptr_t)wc->wr_id)) { |
1993 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | 2059 | struct ib_device *ib_dev = isert_conn->cm_id->device; |
1994 | struct isert_cmd *isert_cmd; | 2060 | struct isert_cmd *isert_cmd; |
1995 | struct iser_tx_desc *desc; | 2061 | struct iser_tx_desc *desc; |
1996 | 2062 | ||
@@ -2018,10 +2084,10 @@ isert_handle_wc(struct ib_wc *wc) | |||
2018 | if (likely(wc->status == IB_WC_SUCCESS)) { | 2084 | if (likely(wc->status == IB_WC_SUCCESS)) { |
2019 | if (wc->opcode == IB_WC_RECV) { | 2085 | if (wc->opcode == IB_WC_RECV) { |
2020 | rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id; | 2086 | rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id; |
2021 | isert_rx_completion(rx_desc, isert_conn, wc->byte_len); | 2087 | isert_rcv_completion(rx_desc, isert_conn, wc->byte_len); |
2022 | } else { | 2088 | } else { |
2023 | tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id; | 2089 | tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id; |
2024 | isert_send_completion(tx_desc, isert_conn); | 2090 | isert_snd_completion(tx_desc, isert_conn); |
2025 | } | 2091 | } |
2026 | } else { | 2092 | } else { |
2027 | if (wc->status != IB_WC_WR_FLUSH_ERR) | 2093 | if (wc->status != IB_WC_WR_FLUSH_ERR) |
@@ -2070,7 +2136,7 @@ isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd) | |||
2070 | struct ib_send_wr *wr_failed; | 2136 | struct ib_send_wr *wr_failed; |
2071 | int ret; | 2137 | int ret; |
2072 | 2138 | ||
2073 | ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr, | 2139 | ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr, |
2074 | &wr_failed); | 2140 | &wr_failed); |
2075 | if (ret) { | 2141 | if (ret) { |
2076 | isert_err("ib_post_send failed with %d\n", ret); | 2142 | isert_err("ib_post_send failed with %d\n", ret); |
@@ -2083,7 +2149,7 @@ static int | |||
2083 | isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd) | 2149 | isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd) |
2084 | { | 2150 | { |
2085 | struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); | 2151 | struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); |
2086 | struct isert_conn *isert_conn = (struct isert_conn *)conn->context; | 2152 | struct isert_conn *isert_conn = conn->context; |
2087 | struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; | 2153 | struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; |
2088 | struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *) | 2154 | struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *) |
2089 | &isert_cmd->tx_desc.iscsi_header; | 2155 | &isert_cmd->tx_desc.iscsi_header; |
@@ -2097,7 +2163,8 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd) | |||
2097 | if (cmd->se_cmd.sense_buffer && | 2163 | if (cmd->se_cmd.sense_buffer && |
2098 | ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || | 2164 | ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || |
2099 | (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { | 2165 | (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { |
2100 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | 2166 | struct isert_device *device = isert_conn->device; |
2167 | struct ib_device *ib_dev = device->ib_device; | ||
2101 | struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; | 2168 | struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; |
2102 | u32 padding, pdu_len; | 2169 | u32 padding, pdu_len; |
2103 | 2170 | ||
@@ -2116,7 +2183,7 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd) | |||
2116 | isert_cmd->pdu_buf_len = pdu_len; | 2183 | isert_cmd->pdu_buf_len = pdu_len; |
2117 | tx_dsg->addr = isert_cmd->pdu_buf_dma; | 2184 | tx_dsg->addr = isert_cmd->pdu_buf_dma; |
2118 | tx_dsg->length = pdu_len; | 2185 | tx_dsg->length = pdu_len; |
2119 | tx_dsg->lkey = isert_conn->conn_mr->lkey; | 2186 | tx_dsg->lkey = device->mr->lkey; |
2120 | isert_cmd->tx_desc.num_sge = 2; | 2187 | isert_cmd->tx_desc.num_sge = 2; |
2121 | } | 2188 | } |
2122 | 2189 | ||
@@ -2131,8 +2198,8 @@ static void | |||
2131 | isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) | 2198 | isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) |
2132 | { | 2199 | { |
2133 | struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); | 2200 | struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); |
2134 | struct isert_conn *isert_conn = (struct isert_conn *)conn->context; | 2201 | struct isert_conn *isert_conn = conn->context; |
2135 | struct isert_device *device = isert_conn->conn_device; | 2202 | struct isert_device *device = isert_conn->device; |
2136 | 2203 | ||
2137 | spin_lock_bh(&conn->cmd_lock); | 2204 | spin_lock_bh(&conn->cmd_lock); |
2138 | if (!list_empty(&cmd->i_conn_node)) | 2205 | if (!list_empty(&cmd->i_conn_node)) |
@@ -2148,8 +2215,8 @@ isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) | |||
2148 | static enum target_prot_op | 2215 | static enum target_prot_op |
2149 | isert_get_sup_prot_ops(struct iscsi_conn *conn) | 2216 | isert_get_sup_prot_ops(struct iscsi_conn *conn) |
2150 | { | 2217 | { |
2151 | struct isert_conn *isert_conn = (struct isert_conn *)conn->context; | 2218 | struct isert_conn *isert_conn = conn->context; |
2152 | struct isert_device *device = isert_conn->conn_device; | 2219 | struct isert_device *device = isert_conn->device; |
2153 | 2220 | ||
2154 | if (conn->tpg->tpg_attrib.t10_pi) { | 2221 | if (conn->tpg->tpg_attrib.t10_pi) { |
2155 | if (device->pi_capable) { | 2222 | if (device->pi_capable) { |
@@ -2170,7 +2237,7 @@ isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn, | |||
2170 | bool nopout_response) | 2237 | bool nopout_response) |
2171 | { | 2238 | { |
2172 | struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); | 2239 | struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); |
2173 | struct isert_conn *isert_conn = (struct isert_conn *)conn->context; | 2240 | struct isert_conn *isert_conn = conn->context; |
2174 | struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; | 2241 | struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; |
2175 | 2242 | ||
2176 | isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); | 2243 | isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); |
@@ -2189,7 +2256,7 @@ static int | |||
2189 | isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) | 2256 | isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) |
2190 | { | 2257 | { |
2191 | struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); | 2258 | struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); |
2192 | struct isert_conn *isert_conn = (struct isert_conn *)conn->context; | 2259 | struct isert_conn *isert_conn = conn->context; |
2193 | struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; | 2260 | struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; |
2194 | 2261 | ||
2195 | isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); | 2262 | isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); |
@@ -2207,7 +2274,7 @@ static int | |||
2207 | isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) | 2274 | isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) |
2208 | { | 2275 | { |
2209 | struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); | 2276 | struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); |
2210 | struct isert_conn *isert_conn = (struct isert_conn *)conn->context; | 2277 | struct isert_conn *isert_conn = conn->context; |
2211 | struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; | 2278 | struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; |
2212 | 2279 | ||
2213 | isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); | 2280 | isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); |
@@ -2225,9 +2292,10 @@ static int | |||
2225 | isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn) | 2292 | isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn) |
2226 | { | 2293 | { |
2227 | struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); | 2294 | struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); |
2228 | struct isert_conn *isert_conn = (struct isert_conn *)conn->context; | 2295 | struct isert_conn *isert_conn = conn->context; |
2229 | struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; | 2296 | struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; |
2230 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | 2297 | struct isert_device *device = isert_conn->device; |
2298 | struct ib_device *ib_dev = device->ib_device; | ||
2231 | struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; | 2299 | struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; |
2232 | struct iscsi_reject *hdr = | 2300 | struct iscsi_reject *hdr = |
2233 | (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header; | 2301 | (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header; |
@@ -2243,7 +2311,7 @@ isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn) | |||
2243 | isert_cmd->pdu_buf_len = ISCSI_HDR_LEN; | 2311 | isert_cmd->pdu_buf_len = ISCSI_HDR_LEN; |
2244 | tx_dsg->addr = isert_cmd->pdu_buf_dma; | 2312 | tx_dsg->addr = isert_cmd->pdu_buf_dma; |
2245 | tx_dsg->length = ISCSI_HDR_LEN; | 2313 | tx_dsg->length = ISCSI_HDR_LEN; |
2246 | tx_dsg->lkey = isert_conn->conn_mr->lkey; | 2314 | tx_dsg->lkey = device->mr->lkey; |
2247 | isert_cmd->tx_desc.num_sge = 2; | 2315 | isert_cmd->tx_desc.num_sge = 2; |
2248 | 2316 | ||
2249 | isert_init_send_wr(isert_conn, isert_cmd, send_wr); | 2317 | isert_init_send_wr(isert_conn, isert_cmd, send_wr); |
@@ -2257,7 +2325,7 @@ static int | |||
2257 | isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) | 2325 | isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) |
2258 | { | 2326 | { |
2259 | struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); | 2327 | struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); |
2260 | struct isert_conn *isert_conn = (struct isert_conn *)conn->context; | 2328 | struct isert_conn *isert_conn = conn->context; |
2261 | struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; | 2329 | struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; |
2262 | struct iscsi_text_rsp *hdr = | 2330 | struct iscsi_text_rsp *hdr = |
2263 | (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header; | 2331 | (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header; |
@@ -2273,7 +2341,8 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) | |||
2273 | isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); | 2341 | isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); |
2274 | 2342 | ||
2275 | if (txt_rsp_len) { | 2343 | if (txt_rsp_len) { |
2276 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | 2344 | struct isert_device *device = isert_conn->device; |
2345 | struct ib_device *ib_dev = device->ib_device; | ||
2277 | struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; | 2346 | struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; |
2278 | void *txt_rsp_buf = cmd->buf_ptr; | 2347 | void *txt_rsp_buf = cmd->buf_ptr; |
2279 | 2348 | ||
@@ -2283,7 +2352,7 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) | |||
2283 | isert_cmd->pdu_buf_len = txt_rsp_len; | 2352 | isert_cmd->pdu_buf_len = txt_rsp_len; |
2284 | tx_dsg->addr = isert_cmd->pdu_buf_dma; | 2353 | tx_dsg->addr = isert_cmd->pdu_buf_dma; |
2285 | tx_dsg->length = txt_rsp_len; | 2354 | tx_dsg->length = txt_rsp_len; |
2286 | tx_dsg->lkey = isert_conn->conn_mr->lkey; | 2355 | tx_dsg->lkey = device->mr->lkey; |
2287 | isert_cmd->tx_desc.num_sge = 2; | 2356 | isert_cmd->tx_desc.num_sge = 2; |
2288 | } | 2357 | } |
2289 | isert_init_send_wr(isert_conn, isert_cmd, send_wr); | 2358 | isert_init_send_wr(isert_conn, isert_cmd, send_wr); |
@@ -2300,7 +2369,8 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, | |||
2300 | { | 2369 | { |
2301 | struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; | 2370 | struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; |
2302 | struct scatterlist *sg_start, *tmp_sg; | 2371 | struct scatterlist *sg_start, *tmp_sg; |
2303 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | 2372 | struct isert_device *device = isert_conn->device; |
2373 | struct ib_device *ib_dev = device->ib_device; | ||
2304 | u32 sg_off, page_off; | 2374 | u32 sg_off, page_off; |
2305 | int i = 0, sg_nents; | 2375 | int i = 0, sg_nents; |
2306 | 2376 | ||
@@ -2324,7 +2394,7 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, | |||
2324 | ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off; | 2394 | ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off; |
2325 | ib_sge->length = min_t(u32, data_left, | 2395 | ib_sge->length = min_t(u32, data_left, |
2326 | ib_sg_dma_len(ib_dev, tmp_sg) - page_off); | 2396 | ib_sg_dma_len(ib_dev, tmp_sg) - page_off); |
2327 | ib_sge->lkey = isert_conn->conn_mr->lkey; | 2397 | ib_sge->lkey = device->mr->lkey; |
2328 | 2398 | ||
2329 | isert_dbg("RDMA ib_sge: addr: 0x%llx length: %u lkey: %x\n", | 2399 | isert_dbg("RDMA ib_sge: addr: 0x%llx length: %u lkey: %x\n", |
2330 | ib_sge->addr, ib_sge->length, ib_sge->lkey); | 2400 | ib_sge->addr, ib_sge->length, ib_sge->lkey); |
@@ -2346,7 +2416,7 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
2346 | { | 2416 | { |
2347 | struct se_cmd *se_cmd = &cmd->se_cmd; | 2417 | struct se_cmd *se_cmd = &cmd->se_cmd; |
2348 | struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); | 2418 | struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); |
2349 | struct isert_conn *isert_conn = (struct isert_conn *)conn->context; | 2419 | struct isert_conn *isert_conn = conn->context; |
2350 | struct isert_data_buf *data = &wr->data; | 2420 | struct isert_data_buf *data = &wr->data; |
2351 | struct ib_send_wr *send_wr; | 2421 | struct ib_send_wr *send_wr; |
2352 | struct ib_sge *ib_sge; | 2422 | struct ib_sge *ib_sge; |
@@ -2485,7 +2555,8 @@ isert_fast_reg_mr(struct isert_conn *isert_conn, | |||
2485 | enum isert_indicator ind, | 2555 | enum isert_indicator ind, |
2486 | struct ib_sge *sge) | 2556 | struct ib_sge *sge) |
2487 | { | 2557 | { |
2488 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | 2558 | struct isert_device *device = isert_conn->device; |
2559 | struct ib_device *ib_dev = device->ib_device; | ||
2489 | struct ib_mr *mr; | 2560 | struct ib_mr *mr; |
2490 | struct ib_fast_reg_page_list *frpl; | 2561 | struct ib_fast_reg_page_list *frpl; |
2491 | struct ib_send_wr fr_wr, inv_wr; | 2562 | struct ib_send_wr fr_wr, inv_wr; |
@@ -2494,7 +2565,7 @@ isert_fast_reg_mr(struct isert_conn *isert_conn, | |||
2494 | u32 page_off; | 2565 | u32 page_off; |
2495 | 2566 | ||
2496 | if (mem->dma_nents == 1) { | 2567 | if (mem->dma_nents == 1) { |
2497 | sge->lkey = isert_conn->conn_mr->lkey; | 2568 | sge->lkey = device->mr->lkey; |
2498 | sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]); | 2569 | sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]); |
2499 | sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]); | 2570 | sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]); |
2500 | isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n", | 2571 | isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n", |
@@ -2542,7 +2613,7 @@ isert_fast_reg_mr(struct isert_conn *isert_conn, | |||
2542 | else | 2613 | else |
2543 | wr->next = &fr_wr; | 2614 | wr->next = &fr_wr; |
2544 | 2615 | ||
2545 | ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr); | 2616 | ret = ib_post_send(isert_conn->qp, wr, &bad_wr); |
2546 | if (ret) { | 2617 | if (ret) { |
2547 | isert_err("fast registration failed, ret:%d\n", ret); | 2618 | isert_err("fast registration failed, ret:%d\n", ret); |
2548 | return ret; | 2619 | return ret; |
@@ -2655,7 +2726,7 @@ isert_reg_sig_mr(struct isert_conn *isert_conn, | |||
2655 | else | 2726 | else |
2656 | wr->next = &sig_wr; | 2727 | wr->next = &sig_wr; |
2657 | 2728 | ||
2658 | ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr); | 2729 | ret = ib_post_send(isert_conn->qp, wr, &bad_wr); |
2659 | if (ret) { | 2730 | if (ret) { |
2660 | isert_err("fast registration failed, ret:%d\n", ret); | 2731 | isert_err("fast registration failed, ret:%d\n", ret); |
2661 | goto err; | 2732 | goto err; |
@@ -2685,14 +2756,14 @@ isert_handle_prot_cmd(struct isert_conn *isert_conn, | |||
2685 | struct isert_cmd *isert_cmd, | 2756 | struct isert_cmd *isert_cmd, |
2686 | struct isert_rdma_wr *wr) | 2757 | struct isert_rdma_wr *wr) |
2687 | { | 2758 | { |
2688 | struct isert_device *device = isert_conn->conn_device; | 2759 | struct isert_device *device = isert_conn->device; |
2689 | struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd; | 2760 | struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd; |
2690 | int ret; | 2761 | int ret; |
2691 | 2762 | ||
2692 | if (!wr->fr_desc->pi_ctx) { | 2763 | if (!wr->fr_desc->pi_ctx) { |
2693 | ret = isert_create_pi_ctx(wr->fr_desc, | 2764 | ret = isert_create_pi_ctx(wr->fr_desc, |
2694 | device->ib_device, | 2765 | device->ib_device, |
2695 | isert_conn->conn_pd); | 2766 | device->pd); |
2696 | if (ret) { | 2767 | if (ret) { |
2697 | isert_err("conn %p failed to allocate pi_ctx\n", | 2768 | isert_err("conn %p failed to allocate pi_ctx\n", |
2698 | isert_conn); | 2769 | isert_conn); |
@@ -2763,11 +2834,11 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
2763 | return ret; | 2834 | return ret; |
2764 | 2835 | ||
2765 | if (wr->data.dma_nents != 1 || isert_prot_cmd(isert_conn, se_cmd)) { | 2836 | if (wr->data.dma_nents != 1 || isert_prot_cmd(isert_conn, se_cmd)) { |
2766 | spin_lock_irqsave(&isert_conn->conn_lock, flags); | 2837 | spin_lock_irqsave(&isert_conn->pool_lock, flags); |
2767 | fr_desc = list_first_entry(&isert_conn->conn_fr_pool, | 2838 | fr_desc = list_first_entry(&isert_conn->fr_pool, |
2768 | struct fast_reg_descriptor, list); | 2839 | struct fast_reg_descriptor, list); |
2769 | list_del(&fr_desc->list); | 2840 | list_del(&fr_desc->list); |
2770 | spin_unlock_irqrestore(&isert_conn->conn_lock, flags); | 2841 | spin_unlock_irqrestore(&isert_conn->pool_lock, flags); |
2771 | wr->fr_desc = fr_desc; | 2842 | wr->fr_desc = fr_desc; |
2772 | } | 2843 | } |
2773 | 2844 | ||
@@ -2814,9 +2885,9 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
2814 | 2885 | ||
2815 | unmap_cmd: | 2886 | unmap_cmd: |
2816 | if (fr_desc) { | 2887 | if (fr_desc) { |
2817 | spin_lock_irqsave(&isert_conn->conn_lock, flags); | 2888 | spin_lock_irqsave(&isert_conn->pool_lock, flags); |
2818 | list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool); | 2889 | list_add_tail(&fr_desc->list, &isert_conn->fr_pool); |
2819 | spin_unlock_irqrestore(&isert_conn->conn_lock, flags); | 2890 | spin_unlock_irqrestore(&isert_conn->pool_lock, flags); |
2820 | } | 2891 | } |
2821 | isert_unmap_data_buf(isert_conn, &wr->data); | 2892 | isert_unmap_data_buf(isert_conn, &wr->data); |
2822 | 2893 | ||
@@ -2829,8 +2900,8 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) | |||
2829 | struct se_cmd *se_cmd = &cmd->se_cmd; | 2900 | struct se_cmd *se_cmd = &cmd->se_cmd; |
2830 | struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); | 2901 | struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); |
2831 | struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; | 2902 | struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; |
2832 | struct isert_conn *isert_conn = (struct isert_conn *)conn->context; | 2903 | struct isert_conn *isert_conn = conn->context; |
2833 | struct isert_device *device = isert_conn->conn_device; | 2904 | struct isert_device *device = isert_conn->device; |
2834 | struct ib_send_wr *wr_failed; | 2905 | struct ib_send_wr *wr_failed; |
2835 | int rc; | 2906 | int rc; |
2836 | 2907 | ||
@@ -2859,7 +2930,7 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) | |||
2859 | wr->send_wr_num += 1; | 2930 | wr->send_wr_num += 1; |
2860 | } | 2931 | } |
2861 | 2932 | ||
2862 | rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); | 2933 | rc = ib_post_send(isert_conn->qp, wr->send_wr, &wr_failed); |
2863 | if (rc) | 2934 | if (rc) |
2864 | isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); | 2935 | isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); |
2865 | 2936 | ||
@@ -2879,8 +2950,8 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery) | |||
2879 | struct se_cmd *se_cmd = &cmd->se_cmd; | 2950 | struct se_cmd *se_cmd = &cmd->se_cmd; |
2880 | struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); | 2951 | struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); |
2881 | struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; | 2952 | struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; |
2882 | struct isert_conn *isert_conn = (struct isert_conn *)conn->context; | 2953 | struct isert_conn *isert_conn = conn->context; |
2883 | struct isert_device *device = isert_conn->conn_device; | 2954 | struct isert_device *device = isert_conn->device; |
2884 | struct ib_send_wr *wr_failed; | 2955 | struct ib_send_wr *wr_failed; |
2885 | int rc; | 2956 | int rc; |
2886 | 2957 | ||
@@ -2893,7 +2964,7 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery) | |||
2893 | return rc; | 2964 | return rc; |
2894 | } | 2965 | } |
2895 | 2966 | ||
2896 | rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); | 2967 | rc = ib_post_send(isert_conn->qp, wr->send_wr, &wr_failed); |
2897 | if (rc) | 2968 | if (rc) |
2898 | isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n"); | 2969 | isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n"); |
2899 | 2970 | ||
@@ -2987,7 +3058,7 @@ isert_setup_id(struct isert_np *isert_np) | |||
2987 | goto out_id; | 3058 | goto out_id; |
2988 | } | 3059 | } |
2989 | 3060 | ||
2990 | ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG); | 3061 | ret = rdma_listen(id, 0); |
2991 | if (ret) { | 3062 | if (ret) { |
2992 | isert_err("rdma_listen() failed: %d\n", ret); | 3063 | isert_err("rdma_listen() failed: %d\n", ret); |
2993 | goto out_id; | 3064 | goto out_id; |
@@ -3046,7 +3117,7 @@ out: | |||
3046 | static int | 3117 | static int |
3047 | isert_rdma_accept(struct isert_conn *isert_conn) | 3118 | isert_rdma_accept(struct isert_conn *isert_conn) |
3048 | { | 3119 | { |
3049 | struct rdma_cm_id *cm_id = isert_conn->conn_cm_id; | 3120 | struct rdma_cm_id *cm_id = isert_conn->cm_id; |
3050 | struct rdma_conn_param cp; | 3121 | struct rdma_conn_param cp; |
3051 | int ret; | 3122 | int ret; |
3052 | 3123 | ||
@@ -3067,7 +3138,7 @@ isert_rdma_accept(struct isert_conn *isert_conn) | |||
3067 | static int | 3138 | static int |
3068 | isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login) | 3139 | isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login) |
3069 | { | 3140 | { |
3070 | struct isert_conn *isert_conn = (struct isert_conn *)conn->context; | 3141 | struct isert_conn *isert_conn = conn->context; |
3071 | int ret; | 3142 | int ret; |
3072 | 3143 | ||
3073 | isert_info("before login_req comp conn: %p\n", isert_conn); | 3144 | isert_info("before login_req comp conn: %p\n", isert_conn); |
@@ -3090,8 +3161,8 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login) | |||
3090 | 3161 | ||
3091 | isert_rx_login_req(isert_conn); | 3162 | isert_rx_login_req(isert_conn); |
3092 | 3163 | ||
3093 | isert_info("before conn_login_comp conn: %p\n", conn); | 3164 | isert_info("before login_comp conn: %p\n", conn); |
3094 | ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp); | 3165 | ret = wait_for_completion_interruptible(&isert_conn->login_comp); |
3095 | if (ret) | 3166 | if (ret) |
3096 | return ret; | 3167 | return ret; |
3097 | 3168 | ||
@@ -3104,7 +3175,7 @@ static void | |||
3104 | isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn, | 3175 | isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn, |
3105 | struct isert_conn *isert_conn) | 3176 | struct isert_conn *isert_conn) |
3106 | { | 3177 | { |
3107 | struct rdma_cm_id *cm_id = isert_conn->conn_cm_id; | 3178 | struct rdma_cm_id *cm_id = isert_conn->cm_id; |
3108 | struct rdma_route *cm_route = &cm_id->route; | 3179 | struct rdma_route *cm_route = &cm_id->route; |
3109 | struct sockaddr_in *sock_in; | 3180 | struct sockaddr_in *sock_in; |
3110 | struct sockaddr_in6 *sock_in6; | 3181 | struct sockaddr_in6 *sock_in6; |
@@ -3137,13 +3208,13 @@ isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn, | |||
3137 | static int | 3208 | static int |
3138 | isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn) | 3209 | isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn) |
3139 | { | 3210 | { |
3140 | struct isert_np *isert_np = (struct isert_np *)np->np_context; | 3211 | struct isert_np *isert_np = np->np_context; |
3141 | struct isert_conn *isert_conn; | 3212 | struct isert_conn *isert_conn; |
3142 | int max_accept = 0, ret; | 3213 | int ret; |
3143 | 3214 | ||
3144 | accept_wait: | 3215 | accept_wait: |
3145 | ret = down_interruptible(&isert_np->np_sem); | 3216 | ret = down_interruptible(&isert_np->np_sem); |
3146 | if (ret || max_accept > 5) | 3217 | if (ret) |
3147 | return -ENODEV; | 3218 | return -ENODEV; |
3148 | 3219 | ||
3149 | spin_lock_bh(&np->np_thread_lock); | 3220 | spin_lock_bh(&np->np_thread_lock); |
@@ -3162,17 +3233,15 @@ accept_wait: | |||
3162 | mutex_lock(&isert_np->np_accept_mutex); | 3233 | mutex_lock(&isert_np->np_accept_mutex); |
3163 | if (list_empty(&isert_np->np_accept_list)) { | 3234 | if (list_empty(&isert_np->np_accept_list)) { |
3164 | mutex_unlock(&isert_np->np_accept_mutex); | 3235 | mutex_unlock(&isert_np->np_accept_mutex); |
3165 | max_accept++; | ||
3166 | goto accept_wait; | 3236 | goto accept_wait; |
3167 | } | 3237 | } |
3168 | isert_conn = list_first_entry(&isert_np->np_accept_list, | 3238 | isert_conn = list_first_entry(&isert_np->np_accept_list, |
3169 | struct isert_conn, conn_accept_node); | 3239 | struct isert_conn, accept_node); |
3170 | list_del_init(&isert_conn->conn_accept_node); | 3240 | list_del_init(&isert_conn->accept_node); |
3171 | mutex_unlock(&isert_np->np_accept_mutex); | 3241 | mutex_unlock(&isert_np->np_accept_mutex); |
3172 | 3242 | ||
3173 | conn->context = isert_conn; | 3243 | conn->context = isert_conn; |
3174 | isert_conn->conn = conn; | 3244 | isert_conn->conn = conn; |
3175 | max_accept = 0; | ||
3176 | 3245 | ||
3177 | isert_set_conn_info(np, conn, isert_conn); | 3246 | isert_set_conn_info(np, conn, isert_conn); |
3178 | 3247 | ||
@@ -3184,7 +3253,7 @@ accept_wait: | |||
3184 | static void | 3253 | static void |
3185 | isert_free_np(struct iscsi_np *np) | 3254 | isert_free_np(struct iscsi_np *np) |
3186 | { | 3255 | { |
3187 | struct isert_np *isert_np = (struct isert_np *)np->np_context; | 3256 | struct isert_np *isert_np = np->np_context; |
3188 | struct isert_conn *isert_conn, *n; | 3257 | struct isert_conn *isert_conn, *n; |
3189 | 3258 | ||
3190 | if (isert_np->np_cm_id) | 3259 | if (isert_np->np_cm_id) |
@@ -3202,7 +3271,7 @@ isert_free_np(struct iscsi_np *np) | |||
3202 | isert_info("Still have isert connections, cleaning up...\n"); | 3271 | isert_info("Still have isert connections, cleaning up...\n"); |
3203 | list_for_each_entry_safe(isert_conn, n, | 3272 | list_for_each_entry_safe(isert_conn, n, |
3204 | &isert_np->np_accept_list, | 3273 | &isert_np->np_accept_list, |
3205 | conn_accept_node) { | 3274 | accept_node) { |
3206 | isert_info("cleaning isert_conn %p state (%d)\n", | 3275 | isert_info("cleaning isert_conn %p state (%d)\n", |
3207 | isert_conn, isert_conn->state); | 3276 | isert_conn, isert_conn->state); |
3208 | isert_connect_release(isert_conn); | 3277 | isert_connect_release(isert_conn); |
@@ -3222,11 +3291,11 @@ static void isert_release_work(struct work_struct *work) | |||
3222 | 3291 | ||
3223 | isert_info("Starting release conn %p\n", isert_conn); | 3292 | isert_info("Starting release conn %p\n", isert_conn); |
3224 | 3293 | ||
3225 | wait_for_completion(&isert_conn->conn_wait); | 3294 | wait_for_completion(&isert_conn->wait); |
3226 | 3295 | ||
3227 | mutex_lock(&isert_conn->conn_mutex); | 3296 | mutex_lock(&isert_conn->mutex); |
3228 | isert_conn->state = ISER_CONN_DOWN; | 3297 | isert_conn->state = ISER_CONN_DOWN; |
3229 | mutex_unlock(&isert_conn->conn_mutex); | 3298 | mutex_unlock(&isert_conn->mutex); |
3230 | 3299 | ||
3231 | isert_info("Destroying conn %p\n", isert_conn); | 3300 | isert_info("Destroying conn %p\n", isert_conn); |
3232 | isert_put_conn(isert_conn); | 3301 | isert_put_conn(isert_conn); |
@@ -3264,15 +3333,15 @@ isert_wait4flush(struct isert_conn *isert_conn) | |||
3264 | 3333 | ||
3265 | isert_info("conn %p\n", isert_conn); | 3334 | isert_info("conn %p\n", isert_conn); |
3266 | 3335 | ||
3267 | init_completion(&isert_conn->conn_wait_comp_err); | 3336 | init_completion(&isert_conn->wait_comp_err); |
3268 | isert_conn->beacon.wr_id = ISER_BEACON_WRID; | 3337 | isert_conn->beacon.wr_id = ISER_BEACON_WRID; |
3269 | /* post an indication that all flush errors were consumed */ | 3338 | /* post an indication that all flush errors were consumed */ |
3270 | if (ib_post_recv(isert_conn->conn_qp, &isert_conn->beacon, &bad_wr)) { | 3339 | if (ib_post_recv(isert_conn->qp, &isert_conn->beacon, &bad_wr)) { |
3271 | isert_err("conn %p failed to post beacon", isert_conn); | 3340 | isert_err("conn %p failed to post beacon", isert_conn); |
3272 | return; | 3341 | return; |
3273 | } | 3342 | } |
3274 | 3343 | ||
3275 | wait_for_completion(&isert_conn->conn_wait_comp_err); | 3344 | wait_for_completion(&isert_conn->wait_comp_err); |
3276 | } | 3345 | } |
3277 | 3346 | ||
3278 | static void isert_wait_conn(struct iscsi_conn *conn) | 3347 | static void isert_wait_conn(struct iscsi_conn *conn) |
@@ -3281,17 +3350,17 @@ static void isert_wait_conn(struct iscsi_conn *conn) | |||
3281 | 3350 | ||
3282 | isert_info("Starting conn %p\n", isert_conn); | 3351 | isert_info("Starting conn %p\n", isert_conn); |
3283 | 3352 | ||
3284 | mutex_lock(&isert_conn->conn_mutex); | 3353 | mutex_lock(&isert_conn->mutex); |
3285 | /* | 3354 | /* |
3286 | * Only wait for conn_wait_comp_err if the isert_conn made it | 3355 | * Only wait for wait_comp_err if the isert_conn made it |
3287 | * into full feature phase.. | 3356 | * into full feature phase.. |
3288 | */ | 3357 | */ |
3289 | if (isert_conn->state == ISER_CONN_INIT) { | 3358 | if (isert_conn->state == ISER_CONN_INIT) { |
3290 | mutex_unlock(&isert_conn->conn_mutex); | 3359 | mutex_unlock(&isert_conn->mutex); |
3291 | return; | 3360 | return; |
3292 | } | 3361 | } |
3293 | isert_conn_terminate(isert_conn); | 3362 | isert_conn_terminate(isert_conn); |
3294 | mutex_unlock(&isert_conn->conn_mutex); | 3363 | mutex_unlock(&isert_conn->mutex); |
3295 | 3364 | ||
3296 | isert_wait4cmds(conn); | 3365 | isert_wait4cmds(conn); |
3297 | isert_wait4flush(isert_conn); | 3366 | isert_wait4flush(isert_conn); |
@@ -3370,7 +3439,7 @@ static void __exit isert_exit(void) | |||
3370 | } | 3439 | } |
3371 | 3440 | ||
3372 | MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure"); | 3441 | MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure"); |
3373 | MODULE_VERSION("0.1"); | 3442 | MODULE_VERSION("1.0"); |
3374 | MODULE_AUTHOR("nab@Linux-iSCSI.org"); | 3443 | MODULE_AUTHOR("nab@Linux-iSCSI.org"); |
3375 | MODULE_LICENSE("GPL"); | 3444 | MODULE_LICENSE("GPL"); |
3376 | 3445 | ||
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index 8dc8415d152d..9ec23a786c02 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h | |||
@@ -31,7 +31,6 @@ | |||
31 | #define isert_err(fmt, arg...) \ | 31 | #define isert_err(fmt, arg...) \ |
32 | pr_err(PFX "%s: " fmt, __func__ , ## arg) | 32 | pr_err(PFX "%s: " fmt, __func__ , ## arg) |
33 | 33 | ||
34 | #define ISERT_RDMA_LISTEN_BACKLOG 10 | ||
35 | #define ISCSI_ISER_SG_TABLESIZE 256 | 34 | #define ISCSI_ISER_SG_TABLESIZE 256 |
36 | #define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL | 35 | #define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL |
37 | #define ISER_BEACON_WRID 0xfffffffffffffffeULL | 36 | #define ISER_BEACON_WRID 0xfffffffffffffffeULL |
@@ -160,27 +159,25 @@ struct isert_conn { | |||
160 | u64 login_req_dma; | 159 | u64 login_req_dma; |
161 | int login_req_len; | 160 | int login_req_len; |
162 | u64 login_rsp_dma; | 161 | u64 login_rsp_dma; |
163 | unsigned int conn_rx_desc_head; | 162 | unsigned int rx_desc_head; |
164 | struct iser_rx_desc *conn_rx_descs; | 163 | struct iser_rx_desc *rx_descs; |
165 | struct ib_recv_wr conn_rx_wr[ISERT_MIN_POSTED_RX]; | 164 | struct ib_recv_wr rx_wr[ISERT_MIN_POSTED_RX]; |
166 | struct iscsi_conn *conn; | 165 | struct iscsi_conn *conn; |
167 | struct list_head conn_accept_node; | 166 | struct list_head accept_node; |
168 | struct completion conn_login_comp; | 167 | struct completion login_comp; |
169 | struct completion login_req_comp; | 168 | struct completion login_req_comp; |
170 | struct iser_tx_desc conn_login_tx_desc; | 169 | struct iser_tx_desc login_tx_desc; |
171 | struct rdma_cm_id *conn_cm_id; | 170 | struct rdma_cm_id *cm_id; |
172 | struct ib_pd *conn_pd; | 171 | struct ib_qp *qp; |
173 | struct ib_mr *conn_mr; | 172 | struct isert_device *device; |
174 | struct ib_qp *conn_qp; | 173 | struct mutex mutex; |
175 | struct isert_device *conn_device; | 174 | struct completion wait; |
176 | struct mutex conn_mutex; | 175 | struct completion wait_comp_err; |
177 | struct completion conn_wait; | 176 | struct kref kref; |
178 | struct completion conn_wait_comp_err; | 177 | struct list_head fr_pool; |
179 | struct kref conn_kref; | 178 | int fr_pool_size; |
180 | struct list_head conn_fr_pool; | ||
181 | int conn_fr_pool_size; | ||
182 | /* lock to protect fastreg pool */ | 179 | /* lock to protect fastreg pool */ |
183 | spinlock_t conn_lock; | 180 | spinlock_t pool_lock; |
184 | struct work_struct release_work; | 181 | struct work_struct release_work; |
185 | struct ib_recv_wr beacon; | 182 | struct ib_recv_wr beacon; |
186 | bool logout_posted; | 183 | bool logout_posted; |
@@ -211,6 +208,8 @@ struct isert_device { | |||
211 | bool pi_capable; | 208 | bool pi_capable; |
212 | int refcount; | 209 | int refcount; |
213 | struct ib_device *ib_device; | 210 | struct ib_device *ib_device; |
211 | struct ib_pd *pd; | ||
212 | struct ib_mr *mr; | ||
214 | struct isert_comp *comps; | 213 | struct isert_comp *comps; |
215 | int comps_used; | 214 | int comps_used; |
216 | struct list_head dev_node; | 215 | struct list_head dev_node; |
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 4b9b866e6b0d..9b84b4c0a000 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c | |||
@@ -93,7 +93,7 @@ MODULE_PARM_DESC(srpt_service_guid, | |||
93 | " instead of using the node_guid of the first HCA."); | 93 | " instead of using the node_guid of the first HCA."); |
94 | 94 | ||
95 | static struct ib_client srpt_client; | 95 | static struct ib_client srpt_client; |
96 | static struct target_fabric_configfs *srpt_target; | 96 | static const struct target_core_fabric_ops srpt_template; |
97 | static void srpt_release_channel(struct srpt_rdma_ch *ch); | 97 | static void srpt_release_channel(struct srpt_rdma_ch *ch); |
98 | static int srpt_queue_status(struct se_cmd *cmd); | 98 | static int srpt_queue_status(struct se_cmd *cmd); |
99 | 99 | ||
@@ -3845,7 +3845,7 @@ static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn, | |||
3845 | int res; | 3845 | int res; |
3846 | 3846 | ||
3847 | /* Initialize sport->port_wwn and sport->port_tpg_1 */ | 3847 | /* Initialize sport->port_wwn and sport->port_tpg_1 */ |
3848 | res = core_tpg_register(&srpt_target->tf_ops, &sport->port_wwn, | 3848 | res = core_tpg_register(&srpt_template, &sport->port_wwn, |
3849 | &sport->port_tpg_1, sport, TRANSPORT_TPG_TYPE_NORMAL); | 3849 | &sport->port_tpg_1, sport, TRANSPORT_TPG_TYPE_NORMAL); |
3850 | if (res) | 3850 | if (res) |
3851 | return ERR_PTR(res); | 3851 | return ERR_PTR(res); |
@@ -3913,7 +3913,9 @@ static struct configfs_attribute *srpt_wwn_attrs[] = { | |||
3913 | NULL, | 3913 | NULL, |
3914 | }; | 3914 | }; |
3915 | 3915 | ||
3916 | static struct target_core_fabric_ops srpt_template = { | 3916 | static const struct target_core_fabric_ops srpt_template = { |
3917 | .module = THIS_MODULE, | ||
3918 | .name = "srpt", | ||
3917 | .get_fabric_name = srpt_get_fabric_name, | 3919 | .get_fabric_name = srpt_get_fabric_name, |
3918 | .get_fabric_proto_ident = srpt_get_fabric_proto_ident, | 3920 | .get_fabric_proto_ident = srpt_get_fabric_proto_ident, |
3919 | .tpg_get_wwn = srpt_get_fabric_wwn, | 3921 | .tpg_get_wwn = srpt_get_fabric_wwn, |
@@ -3958,6 +3960,10 @@ static struct target_core_fabric_ops srpt_template = { | |||
3958 | .fabric_drop_np = NULL, | 3960 | .fabric_drop_np = NULL, |
3959 | .fabric_make_nodeacl = srpt_make_nodeacl, | 3961 | .fabric_make_nodeacl = srpt_make_nodeacl, |
3960 | .fabric_drop_nodeacl = srpt_drop_nodeacl, | 3962 | .fabric_drop_nodeacl = srpt_drop_nodeacl, |
3963 | |||
3964 | .tfc_wwn_attrs = srpt_wwn_attrs, | ||
3965 | .tfc_tpg_base_attrs = srpt_tpg_attrs, | ||
3966 | .tfc_tpg_attrib_attrs = srpt_tpg_attrib_attrs, | ||
3961 | }; | 3967 | }; |
3962 | 3968 | ||
3963 | /** | 3969 | /** |
@@ -3988,33 +3994,9 @@ static int __init srpt_init_module(void) | |||
3988 | goto out; | 3994 | goto out; |
3989 | } | 3995 | } |
3990 | 3996 | ||
3991 | srpt_target = target_fabric_configfs_init(THIS_MODULE, "srpt"); | 3997 | ret = target_register_template(&srpt_template); |
3992 | if (IS_ERR(srpt_target)) { | 3998 | if (ret) |
3993 | pr_err("couldn't register\n"); | ||
3994 | ret = PTR_ERR(srpt_target); | ||
3995 | goto out; | 3999 | goto out; |
3996 | } | ||
3997 | |||
3998 | srpt_target->tf_ops = srpt_template; | ||
3999 | |||
4000 | /* | ||
4001 | * Set up default attribute lists. | ||
4002 | */ | ||
4003 | srpt_target->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = srpt_wwn_attrs; | ||
4004 | srpt_target->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = srpt_tpg_attrs; | ||
4005 | srpt_target->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = srpt_tpg_attrib_attrs; | ||
4006 | srpt_target->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; | ||
4007 | srpt_target->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; | ||
4008 | srpt_target->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; | ||
4009 | srpt_target->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; | ||
4010 | srpt_target->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; | ||
4011 | srpt_target->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; | ||
4012 | |||
4013 | ret = target_fabric_configfs_register(srpt_target); | ||
4014 | if (ret < 0) { | ||
4015 | pr_err("couldn't register\n"); | ||
4016 | goto out_free_target; | ||
4017 | } | ||
4018 | 4000 | ||
4019 | ret = ib_register_client(&srpt_client); | 4001 | ret = ib_register_client(&srpt_client); |
4020 | if (ret) { | 4002 | if (ret) { |
@@ -4025,11 +4007,7 @@ static int __init srpt_init_module(void) | |||
4025 | return 0; | 4007 | return 0; |
4026 | 4008 | ||
4027 | out_unregister_target: | 4009 | out_unregister_target: |
4028 | target_fabric_configfs_deregister(srpt_target); | 4010 | target_unregister_template(&srpt_template); |
4029 | srpt_target = NULL; | ||
4030 | out_free_target: | ||
4031 | if (srpt_target) | ||
4032 | target_fabric_configfs_free(srpt_target); | ||
4033 | out: | 4011 | out: |
4034 | return ret; | 4012 | return ret; |
4035 | } | 4013 | } |
@@ -4037,8 +4015,7 @@ out: | |||
4037 | static void __exit srpt_cleanup_module(void) | 4015 | static void __exit srpt_cleanup_module(void) |
4038 | { | 4016 | { |
4039 | ib_unregister_client(&srpt_client); | 4017 | ib_unregister_client(&srpt_client); |
4040 | target_fabric_configfs_deregister(srpt_target); | 4018 | target_unregister_template(&srpt_template); |
4041 | srpt_target = NULL; | ||
4042 | } | 4019 | } |
4043 | 4020 | ||
4044 | module_init(srpt_init_module); | 4021 | module_init(srpt_init_module); |
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 57418258c101..fe8a8d157e22 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c | |||
@@ -3065,7 +3065,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, | |||
3065 | { | 3065 | { |
3066 | struct qla_hw_data *ha = vha->hw; | 3066 | struct qla_hw_data *ha = vha->hw; |
3067 | struct se_cmd *se_cmd; | 3067 | struct se_cmd *se_cmd; |
3068 | struct target_core_fabric_ops *tfo; | 3068 | const struct target_core_fabric_ops *tfo; |
3069 | struct qla_tgt_cmd *cmd; | 3069 | struct qla_tgt_cmd *cmd; |
3070 | 3070 | ||
3071 | if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) { | 3071 | if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) { |
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index ab4879e12ea7..68c2002e78bf 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c | |||
@@ -53,9 +53,8 @@ | |||
53 | static struct workqueue_struct *tcm_qla2xxx_free_wq; | 53 | static struct workqueue_struct *tcm_qla2xxx_free_wq; |
54 | static struct workqueue_struct *tcm_qla2xxx_cmd_wq; | 54 | static struct workqueue_struct *tcm_qla2xxx_cmd_wq; |
55 | 55 | ||
56 | /* Local pointer to allocated TCM configfs fabric module */ | 56 | static const struct target_core_fabric_ops tcm_qla2xxx_ops; |
57 | static struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs; | 57 | static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops; |
58 | static struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs; | ||
59 | 58 | ||
60 | /* | 59 | /* |
61 | * Parse WWN. | 60 | * Parse WWN. |
@@ -336,6 +335,14 @@ static int tcm_qla2xxx_check_demo_mode_login_only(struct se_portal_group *se_tpg | |||
336 | return tpg->tpg_attrib.demo_mode_login_only; | 335 | return tpg->tpg_attrib.demo_mode_login_only; |
337 | } | 336 | } |
338 | 337 | ||
338 | static int tcm_qla2xxx_check_prot_fabric_only(struct se_portal_group *se_tpg) | ||
339 | { | ||
340 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, | ||
341 | struct tcm_qla2xxx_tpg, se_tpg); | ||
342 | |||
343 | return tpg->tpg_attrib.fabric_prot_type; | ||
344 | } | ||
345 | |||
339 | static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl( | 346 | static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl( |
340 | struct se_portal_group *se_tpg) | 347 | struct se_portal_group *se_tpg) |
341 | { | 348 | { |
@@ -1082,8 +1089,53 @@ static ssize_t tcm_qla2xxx_tpg_store_enable( | |||
1082 | 1089 | ||
1083 | TF_TPG_BASE_ATTR(tcm_qla2xxx, enable, S_IRUGO | S_IWUSR); | 1090 | TF_TPG_BASE_ATTR(tcm_qla2xxx, enable, S_IRUGO | S_IWUSR); |
1084 | 1091 | ||
1092 | static ssize_t tcm_qla2xxx_tpg_show_dynamic_sessions( | ||
1093 | struct se_portal_group *se_tpg, | ||
1094 | char *page) | ||
1095 | { | ||
1096 | return target_show_dynamic_sessions(se_tpg, page); | ||
1097 | } | ||
1098 | |||
1099 | TF_TPG_BASE_ATTR_RO(tcm_qla2xxx, dynamic_sessions); | ||
1100 | |||
1101 | static ssize_t tcm_qla2xxx_tpg_store_fabric_prot_type( | ||
1102 | struct se_portal_group *se_tpg, | ||
1103 | const char *page, | ||
1104 | size_t count) | ||
1105 | { | ||
1106 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, | ||
1107 | struct tcm_qla2xxx_tpg, se_tpg); | ||
1108 | unsigned long val; | ||
1109 | int ret = kstrtoul(page, 0, &val); | ||
1110 | |||
1111 | if (ret) { | ||
1112 | pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret); | ||
1113 | return ret; | ||
1114 | } | ||
1115 | if (val != 0 && val != 1 && val != 3) { | ||
1116 | pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val); | ||
1117 | return -EINVAL; | ||
1118 | } | ||
1119 | tpg->tpg_attrib.fabric_prot_type = val; | ||
1120 | |||
1121 | return count; | ||
1122 | } | ||
1123 | |||
1124 | static ssize_t tcm_qla2xxx_tpg_show_fabric_prot_type( | ||
1125 | struct se_portal_group *se_tpg, | ||
1126 | char *page) | ||
1127 | { | ||
1128 | struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, | ||
1129 | struct tcm_qla2xxx_tpg, se_tpg); | ||
1130 | |||
1131 | return sprintf(page, "%d\n", tpg->tpg_attrib.fabric_prot_type); | ||
1132 | } | ||
1133 | TF_TPG_BASE_ATTR(tcm_qla2xxx, fabric_prot_type, S_IRUGO | S_IWUSR); | ||
1134 | |||
1085 | static struct configfs_attribute *tcm_qla2xxx_tpg_attrs[] = { | 1135 | static struct configfs_attribute *tcm_qla2xxx_tpg_attrs[] = { |
1086 | &tcm_qla2xxx_tpg_enable.attr, | 1136 | &tcm_qla2xxx_tpg_enable.attr, |
1137 | &tcm_qla2xxx_tpg_dynamic_sessions.attr, | ||
1138 | &tcm_qla2xxx_tpg_fabric_prot_type.attr, | ||
1087 | NULL, | 1139 | NULL, |
1088 | }; | 1140 | }; |
1089 | 1141 | ||
@@ -1124,7 +1176,7 @@ static struct se_portal_group *tcm_qla2xxx_make_tpg( | |||
1124 | tpg->tpg_attrib.cache_dynamic_acls = 1; | 1176 | tpg->tpg_attrib.cache_dynamic_acls = 1; |
1125 | tpg->tpg_attrib.demo_mode_login_only = 1; | 1177 | tpg->tpg_attrib.demo_mode_login_only = 1; |
1126 | 1178 | ||
1127 | ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn, | 1179 | ret = core_tpg_register(&tcm_qla2xxx_ops, wwn, |
1128 | &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); | 1180 | &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); |
1129 | if (ret < 0) { | 1181 | if (ret < 0) { |
1130 | kfree(tpg); | 1182 | kfree(tpg); |
@@ -1244,7 +1296,7 @@ static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg( | |||
1244 | tpg->tpg_attrib.cache_dynamic_acls = 1; | 1296 | tpg->tpg_attrib.cache_dynamic_acls = 1; |
1245 | tpg->tpg_attrib.demo_mode_login_only = 1; | 1297 | tpg->tpg_attrib.demo_mode_login_only = 1; |
1246 | 1298 | ||
1247 | ret = core_tpg_register(&tcm_qla2xxx_npiv_fabric_configfs->tf_ops, wwn, | 1299 | ret = core_tpg_register(&tcm_qla2xxx_npiv_ops, wwn, |
1248 | &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); | 1300 | &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); |
1249 | if (ret < 0) { | 1301 | if (ret < 0) { |
1250 | kfree(tpg); | 1302 | kfree(tpg); |
@@ -1560,7 +1612,7 @@ static int tcm_qla2xxx_check_initiator_node_acl( | |||
1560 | 1612 | ||
1561 | se_sess = transport_init_session_tags(num_tags, | 1613 | se_sess = transport_init_session_tags(num_tags, |
1562 | sizeof(struct qla_tgt_cmd), | 1614 | sizeof(struct qla_tgt_cmd), |
1563 | TARGET_PROT_NORMAL); | 1615 | TARGET_PROT_ALL); |
1564 | if (IS_ERR(se_sess)) { | 1616 | if (IS_ERR(se_sess)) { |
1565 | pr_err("Unable to initialize struct se_session\n"); | 1617 | pr_err("Unable to initialize struct se_session\n"); |
1566 | return PTR_ERR(se_sess); | 1618 | return PTR_ERR(se_sess); |
@@ -1934,7 +1986,9 @@ static struct configfs_attribute *tcm_qla2xxx_wwn_attrs[] = { | |||
1934 | NULL, | 1986 | NULL, |
1935 | }; | 1987 | }; |
1936 | 1988 | ||
1937 | static struct target_core_fabric_ops tcm_qla2xxx_ops = { | 1989 | static const struct target_core_fabric_ops tcm_qla2xxx_ops = { |
1990 | .module = THIS_MODULE, | ||
1991 | .name = "qla2xxx", | ||
1938 | .get_fabric_name = tcm_qla2xxx_get_fabric_name, | 1992 | .get_fabric_name = tcm_qla2xxx_get_fabric_name, |
1939 | .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident, | 1993 | .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident, |
1940 | .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn, | 1994 | .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn, |
@@ -1949,6 +2003,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = { | |||
1949 | tcm_qla2xxx_check_demo_write_protect, | 2003 | tcm_qla2xxx_check_demo_write_protect, |
1950 | .tpg_check_prod_mode_write_protect = | 2004 | .tpg_check_prod_mode_write_protect = |
1951 | tcm_qla2xxx_check_prod_write_protect, | 2005 | tcm_qla2xxx_check_prod_write_protect, |
2006 | .tpg_check_prot_fabric_only = tcm_qla2xxx_check_prot_fabric_only, | ||
1952 | .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only, | 2007 | .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only, |
1953 | .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl, | 2008 | .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl, |
1954 | .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, | 2009 | .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, |
@@ -1983,9 +2038,15 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = { | |||
1983 | .fabric_drop_np = NULL, | 2038 | .fabric_drop_np = NULL, |
1984 | .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl, | 2039 | .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl, |
1985 | .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl, | 2040 | .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl, |
2041 | |||
2042 | .tfc_wwn_attrs = tcm_qla2xxx_wwn_attrs, | ||
2043 | .tfc_tpg_base_attrs = tcm_qla2xxx_tpg_attrs, | ||
2044 | .tfc_tpg_attrib_attrs = tcm_qla2xxx_tpg_attrib_attrs, | ||
1986 | }; | 2045 | }; |
1987 | 2046 | ||
1988 | static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = { | 2047 | static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = { |
2048 | .module = THIS_MODULE, | ||
2049 | .name = "qla2xxx_npiv", | ||
1989 | .get_fabric_name = tcm_qla2xxx_npiv_get_fabric_name, | 2050 | .get_fabric_name = tcm_qla2xxx_npiv_get_fabric_name, |
1990 | .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident, | 2051 | .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident, |
1991 | .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn, | 2052 | .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn, |
@@ -2033,94 +2094,26 @@ static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = { | |||
2033 | .fabric_drop_np = NULL, | 2094 | .fabric_drop_np = NULL, |
2034 | .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl, | 2095 | .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl, |
2035 | .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl, | 2096 | .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl, |
2097 | |||
2098 | .tfc_wwn_attrs = tcm_qla2xxx_wwn_attrs, | ||
2099 | .tfc_tpg_base_attrs = tcm_qla2xxx_npiv_tpg_attrs, | ||
2036 | }; | 2100 | }; |
2037 | 2101 | ||
2038 | static int tcm_qla2xxx_register_configfs(void) | 2102 | static int tcm_qla2xxx_register_configfs(void) |
2039 | { | 2103 | { |
2040 | struct target_fabric_configfs *fabric, *npiv_fabric; | ||
2041 | int ret; | 2104 | int ret; |
2042 | 2105 | ||
2043 | pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on " | 2106 | pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on " |
2044 | UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname, | 2107 | UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname, |
2045 | utsname()->machine); | 2108 | utsname()->machine); |
2046 | /* | 2109 | |
2047 | * Register the top level struct config_item_type with TCM core | 2110 | ret = target_register_template(&tcm_qla2xxx_ops); |
2048 | */ | 2111 | if (ret) |
2049 | fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx"); | ||
2050 | if (IS_ERR(fabric)) { | ||
2051 | pr_err("target_fabric_configfs_init() failed\n"); | ||
2052 | return PTR_ERR(fabric); | ||
2053 | } | ||
2054 | /* | ||
2055 | * Setup fabric->tf_ops from our local tcm_qla2xxx_ops | ||
2056 | */ | ||
2057 | fabric->tf_ops = tcm_qla2xxx_ops; | ||
2058 | /* | ||
2059 | * Setup default attribute lists for various fabric->tf_cit_tmpl | ||
2060 | */ | ||
2061 | fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; | ||
2062 | fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs; | ||
2063 | fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = | ||
2064 | tcm_qla2xxx_tpg_attrib_attrs; | ||
2065 | fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; | ||
2066 | fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; | ||
2067 | fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; | ||
2068 | fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; | ||
2069 | fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; | ||
2070 | fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; | ||
2071 | /* | ||
2072 | * Register the fabric for use within TCM | ||
2073 | */ | ||
2074 | ret = target_fabric_configfs_register(fabric); | ||
2075 | if (ret < 0) { | ||
2076 | pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n"); | ||
2077 | return ret; | 2112 | return ret; |
2078 | } | ||
2079 | /* | ||
2080 | * Setup our local pointer to *fabric | ||
2081 | */ | ||
2082 | tcm_qla2xxx_fabric_configfs = fabric; | ||
2083 | pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_fabric_configfs\n"); | ||
2084 | 2113 | ||
2085 | /* | 2114 | ret = target_register_template(&tcm_qla2xxx_npiv_ops); |
2086 | * Register the top level struct config_item_type for NPIV with TCM core | 2115 | if (ret) |
2087 | */ | ||
2088 | npiv_fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx_npiv"); | ||
2089 | if (IS_ERR(npiv_fabric)) { | ||
2090 | pr_err("target_fabric_configfs_init() failed\n"); | ||
2091 | ret = PTR_ERR(npiv_fabric); | ||
2092 | goto out_fabric; | ||
2093 | } | ||
2094 | /* | ||
2095 | * Setup fabric->tf_ops from our local tcm_qla2xxx_npiv_ops | ||
2096 | */ | ||
2097 | npiv_fabric->tf_ops = tcm_qla2xxx_npiv_ops; | ||
2098 | /* | ||
2099 | * Setup default attribute lists for various npiv_fabric->tf_cit_tmpl | ||
2100 | */ | ||
2101 | npiv_fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; | ||
2102 | npiv_fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = | ||
2103 | tcm_qla2xxx_npiv_tpg_attrs; | ||
2104 | npiv_fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; | ||
2105 | npiv_fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; | ||
2106 | npiv_fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; | ||
2107 | npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; | ||
2108 | npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; | ||
2109 | npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; | ||
2110 | npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; | ||
2111 | /* | ||
2112 | * Register the npiv_fabric for use within TCM | ||
2113 | */ | ||
2114 | ret = target_fabric_configfs_register(npiv_fabric); | ||
2115 | if (ret < 0) { | ||
2116 | pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n"); | ||
2117 | goto out_fabric; | 2116 | goto out_fabric; |
2118 | } | ||
2119 | /* | ||
2120 | * Setup our local pointer to *npiv_fabric | ||
2121 | */ | ||
2122 | tcm_qla2xxx_npiv_fabric_configfs = npiv_fabric; | ||
2123 | pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_npiv_fabric_configfs\n"); | ||
2124 | 2117 | ||
2125 | tcm_qla2xxx_free_wq = alloc_workqueue("tcm_qla2xxx_free", | 2118 | tcm_qla2xxx_free_wq = alloc_workqueue("tcm_qla2xxx_free", |
2126 | WQ_MEM_RECLAIM, 0); | 2119 | WQ_MEM_RECLAIM, 0); |
@@ -2140,9 +2133,9 @@ static int tcm_qla2xxx_register_configfs(void) | |||
2140 | out_free_wq: | 2133 | out_free_wq: |
2141 | destroy_workqueue(tcm_qla2xxx_free_wq); | 2134 | destroy_workqueue(tcm_qla2xxx_free_wq); |
2142 | out_fabric_npiv: | 2135 | out_fabric_npiv: |
2143 | target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs); | 2136 | target_unregister_template(&tcm_qla2xxx_npiv_ops); |
2144 | out_fabric: | 2137 | out_fabric: |
2145 | target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs); | 2138 | target_unregister_template(&tcm_qla2xxx_ops); |
2146 | return ret; | 2139 | return ret; |
2147 | } | 2140 | } |
2148 | 2141 | ||
@@ -2151,13 +2144,8 @@ static void tcm_qla2xxx_deregister_configfs(void) | |||
2151 | destroy_workqueue(tcm_qla2xxx_cmd_wq); | 2144 | destroy_workqueue(tcm_qla2xxx_cmd_wq); |
2152 | destroy_workqueue(tcm_qla2xxx_free_wq); | 2145 | destroy_workqueue(tcm_qla2xxx_free_wq); |
2153 | 2146 | ||
2154 | target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs); | 2147 | target_unregister_template(&tcm_qla2xxx_ops); |
2155 | tcm_qla2xxx_fabric_configfs = NULL; | 2148 | target_unregister_template(&tcm_qla2xxx_npiv_ops); |
2156 | pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_fabric_configfs\n"); | ||
2157 | |||
2158 | target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs); | ||
2159 | tcm_qla2xxx_npiv_fabric_configfs = NULL; | ||
2160 | pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_npiv_fabric_configfs\n"); | ||
2161 | } | 2149 | } |
2162 | 2150 | ||
2163 | static int __init tcm_qla2xxx_init(void) | 2151 | static int __init tcm_qla2xxx_init(void) |
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h index 10c002145648..23295115c9fc 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h | |||
@@ -33,6 +33,7 @@ struct tcm_qla2xxx_tpg_attrib { | |||
33 | int demo_mode_write_protect; | 33 | int demo_mode_write_protect; |
34 | int prod_mode_write_protect; | 34 | int prod_mode_write_protect; |
35 | int demo_mode_login_only; | 35 | int demo_mode_login_only; |
36 | int fabric_prot_type; | ||
36 | }; | 37 | }; |
37 | 38 | ||
38 | struct tcm_qla2xxx_tpg { | 39 | struct tcm_qla2xxx_tpg { |
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig index 81d44c477a5b..257361280510 100644 --- a/drivers/target/Kconfig +++ b/drivers/target/Kconfig | |||
@@ -31,12 +31,13 @@ config TCM_PSCSI | |||
31 | Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered | 31 | Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered |
32 | passthrough access to Linux/SCSI device | 32 | passthrough access to Linux/SCSI device |
33 | 33 | ||
34 | config TCM_USER | 34 | config TCM_USER2 |
35 | tristate "TCM/USER Subsystem Plugin for Linux" | 35 | tristate "TCM/USER Subsystem Plugin for Linux" |
36 | depends on UIO && NET | 36 | depends on UIO && NET |
37 | help | 37 | help |
38 | Say Y here to enable the TCM/USER subsystem plugin for a userspace | 38 | Say Y here to enable the TCM/USER subsystem plugin for a userspace |
39 | process to handle requests | 39 | process to handle requests. This is version 2 of the ABI; version 1 |
40 | is obsolete. | ||
40 | 41 | ||
41 | source "drivers/target/loopback/Kconfig" | 42 | source "drivers/target/loopback/Kconfig" |
42 | source "drivers/target/tcm_fc/Kconfig" | 43 | source "drivers/target/tcm_fc/Kconfig" |
diff --git a/drivers/target/Makefile b/drivers/target/Makefile index bbb4a7d638ef..e619c0266a79 100644 --- a/drivers/target/Makefile +++ b/drivers/target/Makefile | |||
@@ -22,7 +22,7 @@ obj-$(CONFIG_TARGET_CORE) += target_core_mod.o | |||
22 | obj-$(CONFIG_TCM_IBLOCK) += target_core_iblock.o | 22 | obj-$(CONFIG_TCM_IBLOCK) += target_core_iblock.o |
23 | obj-$(CONFIG_TCM_FILEIO) += target_core_file.o | 23 | obj-$(CONFIG_TCM_FILEIO) += target_core_file.o |
24 | obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o | 24 | obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o |
25 | obj-$(CONFIG_TCM_USER) += target_core_user.o | 25 | obj-$(CONFIG_TCM_USER2) += target_core_user.o |
26 | 26 | ||
27 | # Fabric modules | 27 | # Fabric modules |
28 | obj-$(CONFIG_LOOPBACK_TARGET) += loopback/ | 28 | obj-$(CONFIG_LOOPBACK_TARGET) += loopback/ |
diff --git a/drivers/target/iscsi/Makefile b/drivers/target/iscsi/Makefile index 13a92403fe3e..0f43be9c3453 100644 --- a/drivers/target/iscsi/Makefile +++ b/drivers/target/iscsi/Makefile | |||
@@ -1,6 +1,5 @@ | |||
1 | iscsi_target_mod-y += iscsi_target_parameters.o \ | 1 | iscsi_target_mod-y += iscsi_target_parameters.o \ |
2 | iscsi_target_seq_pdu_list.o \ | 2 | iscsi_target_seq_pdu_list.o \ |
3 | iscsi_target_tq.o \ | ||
4 | iscsi_target_auth.o \ | 3 | iscsi_target_auth.o \ |
5 | iscsi_target_datain_values.o \ | 4 | iscsi_target_datain_values.o \ |
6 | iscsi_target_device.o \ | 5 | iscsi_target_device.o \ |
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 77d64251af40..34871a628b11 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c | |||
@@ -33,8 +33,6 @@ | |||
33 | #include <target/iscsi/iscsi_target_core.h> | 33 | #include <target/iscsi/iscsi_target_core.h> |
34 | #include "iscsi_target_parameters.h" | 34 | #include "iscsi_target_parameters.h" |
35 | #include "iscsi_target_seq_pdu_list.h" | 35 | #include "iscsi_target_seq_pdu_list.h" |
36 | #include "iscsi_target_tq.h" | ||
37 | #include "iscsi_target_configfs.h" | ||
38 | #include "iscsi_target_datain_values.h" | 36 | #include "iscsi_target_datain_values.h" |
39 | #include "iscsi_target_erl0.h" | 37 | #include "iscsi_target_erl0.h" |
40 | #include "iscsi_target_erl1.h" | 38 | #include "iscsi_target_erl1.h" |
@@ -537,7 +535,7 @@ static struct iscsit_transport iscsi_target_transport = { | |||
537 | 535 | ||
538 | static int __init iscsi_target_init_module(void) | 536 | static int __init iscsi_target_init_module(void) |
539 | { | 537 | { |
540 | int ret = 0; | 538 | int ret = 0, size; |
541 | 539 | ||
542 | pr_debug("iSCSI-Target "ISCSIT_VERSION"\n"); | 540 | pr_debug("iSCSI-Target "ISCSIT_VERSION"\n"); |
543 | 541 | ||
@@ -546,24 +544,21 @@ static int __init iscsi_target_init_module(void) | |||
546 | pr_err("Unable to allocate memory for iscsit_global\n"); | 544 | pr_err("Unable to allocate memory for iscsit_global\n"); |
547 | return -1; | 545 | return -1; |
548 | } | 546 | } |
547 | spin_lock_init(&iscsit_global->ts_bitmap_lock); | ||
549 | mutex_init(&auth_id_lock); | 548 | mutex_init(&auth_id_lock); |
550 | spin_lock_init(&sess_idr_lock); | 549 | spin_lock_init(&sess_idr_lock); |
551 | idr_init(&tiqn_idr); | 550 | idr_init(&tiqn_idr); |
552 | idr_init(&sess_idr); | 551 | idr_init(&sess_idr); |
553 | 552 | ||
554 | ret = iscsi_target_register_configfs(); | 553 | ret = target_register_template(&iscsi_ops); |
555 | if (ret < 0) | 554 | if (ret) |
556 | goto out; | 555 | goto out; |
557 | 556 | ||
558 | ret = iscsi_thread_set_init(); | 557 | size = BITS_TO_LONGS(ISCSIT_BITMAP_BITS) * sizeof(long); |
559 | if (ret < 0) | 558 | iscsit_global->ts_bitmap = vzalloc(size); |
559 | if (!iscsit_global->ts_bitmap) { | ||
560 | pr_err("Unable to allocate iscsit_global->ts_bitmap\n"); | ||
560 | goto configfs_out; | 561 | goto configfs_out; |
561 | |||
562 | if (iscsi_allocate_thread_sets(TARGET_THREAD_SET_COUNT) != | ||
563 | TARGET_THREAD_SET_COUNT) { | ||
564 | pr_err("iscsi_allocate_thread_sets() returned" | ||
565 | " unexpected value!\n"); | ||
566 | goto ts_out1; | ||
567 | } | 562 | } |
568 | 563 | ||
569 | lio_qr_cache = kmem_cache_create("lio_qr_cache", | 564 | lio_qr_cache = kmem_cache_create("lio_qr_cache", |
@@ -572,7 +567,7 @@ static int __init iscsi_target_init_module(void) | |||
572 | if (!lio_qr_cache) { | 567 | if (!lio_qr_cache) { |
573 | pr_err("nable to kmem_cache_create() for" | 568 | pr_err("nable to kmem_cache_create() for" |
574 | " lio_qr_cache\n"); | 569 | " lio_qr_cache\n"); |
575 | goto ts_out2; | 570 | goto bitmap_out; |
576 | } | 571 | } |
577 | 572 | ||
578 | lio_dr_cache = kmem_cache_create("lio_dr_cache", | 573 | lio_dr_cache = kmem_cache_create("lio_dr_cache", |
@@ -617,12 +612,13 @@ dr_out: | |||
617 | kmem_cache_destroy(lio_dr_cache); | 612 | kmem_cache_destroy(lio_dr_cache); |
618 | qr_out: | 613 | qr_out: |
619 | kmem_cache_destroy(lio_qr_cache); | 614 | kmem_cache_destroy(lio_qr_cache); |
620 | ts_out2: | 615 | bitmap_out: |
621 | iscsi_deallocate_thread_sets(); | 616 | vfree(iscsit_global->ts_bitmap); |
622 | ts_out1: | ||
623 | iscsi_thread_set_free(); | ||
624 | configfs_out: | 617 | configfs_out: |
625 | iscsi_target_deregister_configfs(); | 618 | /* XXX: this probably wants it to be it's own unwind step.. */ |
619 | if (iscsit_global->discovery_tpg) | ||
620 | iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1); | ||
621 | target_unregister_template(&iscsi_ops); | ||
626 | out: | 622 | out: |
627 | kfree(iscsit_global); | 623 | kfree(iscsit_global); |
628 | return -ENOMEM; | 624 | return -ENOMEM; |
@@ -630,8 +626,6 @@ out: | |||
630 | 626 | ||
631 | static void __exit iscsi_target_cleanup_module(void) | 627 | static void __exit iscsi_target_cleanup_module(void) |
632 | { | 628 | { |
633 | iscsi_deallocate_thread_sets(); | ||
634 | iscsi_thread_set_free(); | ||
635 | iscsit_release_discovery_tpg(); | 629 | iscsit_release_discovery_tpg(); |
636 | iscsit_unregister_transport(&iscsi_target_transport); | 630 | iscsit_unregister_transport(&iscsi_target_transport); |
637 | kmem_cache_destroy(lio_qr_cache); | 631 | kmem_cache_destroy(lio_qr_cache); |
@@ -639,8 +633,15 @@ static void __exit iscsi_target_cleanup_module(void) | |||
639 | kmem_cache_destroy(lio_ooo_cache); | 633 | kmem_cache_destroy(lio_ooo_cache); |
640 | kmem_cache_destroy(lio_r2t_cache); | 634 | kmem_cache_destroy(lio_r2t_cache); |
641 | 635 | ||
642 | iscsi_target_deregister_configfs(); | 636 | /* |
637 | * Shutdown discovery sessions and disable discovery TPG | ||
638 | */ | ||
639 | if (iscsit_global->discovery_tpg) | ||
640 | iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1); | ||
643 | 641 | ||
642 | target_unregister_template(&iscsi_ops); | ||
643 | |||
644 | vfree(iscsit_global->ts_bitmap); | ||
644 | kfree(iscsit_global); | 645 | kfree(iscsit_global); |
645 | } | 646 | } |
646 | 647 | ||
@@ -990,7 +991,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
990 | /* | 991 | /* |
991 | * Initialize struct se_cmd descriptor from target_core_mod infrastructure | 992 | * Initialize struct se_cmd descriptor from target_core_mod infrastructure |
992 | */ | 993 | */ |
993 | transport_init_se_cmd(&cmd->se_cmd, &lio_target_fabric_configfs->tf_ops, | 994 | transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops, |
994 | conn->sess->se_sess, be32_to_cpu(hdr->data_length), | 995 | conn->sess->se_sess, be32_to_cpu(hdr->data_length), |
995 | cmd->data_direction, sam_task_attr, | 996 | cmd->data_direction, sam_task_attr, |
996 | cmd->sense_buffer + 2); | 997 | cmd->sense_buffer + 2); |
@@ -1805,8 +1806,7 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
1805 | u8 tcm_function; | 1806 | u8 tcm_function; |
1806 | int ret; | 1807 | int ret; |
1807 | 1808 | ||
1808 | transport_init_se_cmd(&cmd->se_cmd, | 1809 | transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops, |
1809 | &lio_target_fabric_configfs->tf_ops, | ||
1810 | conn->sess->se_sess, 0, DMA_NONE, | 1810 | conn->sess->se_sess, 0, DMA_NONE, |
1811 | TCM_SIMPLE_TAG, cmd->sense_buffer + 2); | 1811 | TCM_SIMPLE_TAG, cmd->sense_buffer + 2); |
1812 | 1812 | ||
@@ -2155,7 +2155,6 @@ reject: | |||
2155 | cmd->text_in_ptr = NULL; | 2155 | cmd->text_in_ptr = NULL; |
2156 | return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf); | 2156 | return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf); |
2157 | } | 2157 | } |
2158 | EXPORT_SYMBOL(iscsit_handle_text_cmd); | ||
2159 | 2158 | ||
2160 | int iscsit_logout_closesession(struct iscsi_cmd *cmd, struct iscsi_conn *conn) | 2159 | int iscsit_logout_closesession(struct iscsi_cmd *cmd, struct iscsi_conn *conn) |
2161 | { | 2160 | { |
@@ -3715,17 +3714,16 @@ static int iscsit_send_reject( | |||
3715 | 3714 | ||
3716 | void iscsit_thread_get_cpumask(struct iscsi_conn *conn) | 3715 | void iscsit_thread_get_cpumask(struct iscsi_conn *conn) |
3717 | { | 3716 | { |
3718 | struct iscsi_thread_set *ts = conn->thread_set; | ||
3719 | int ord, cpu; | 3717 | int ord, cpu; |
3720 | /* | 3718 | /* |
3721 | * thread_id is assigned from iscsit_global->ts_bitmap from | 3719 | * bitmap_id is assigned from iscsit_global->ts_bitmap from |
3722 | * within iscsi_thread_set.c:iscsi_allocate_thread_sets() | 3720 | * within iscsit_start_kthreads() |
3723 | * | 3721 | * |
3724 | * Here we use thread_id to determine which CPU that this | 3722 | * Here we use bitmap_id to determine which CPU that this |
3725 | * iSCSI connection's iscsi_thread_set will be scheduled to | 3723 | * iSCSI connection's RX/TX threads will be scheduled to |
3726 | * execute upon. | 3724 | * execute upon. |
3727 | */ | 3725 | */ |
3728 | ord = ts->thread_id % cpumask_weight(cpu_online_mask); | 3726 | ord = conn->bitmap_id % cpumask_weight(cpu_online_mask); |
3729 | for_each_online_cpu(cpu) { | 3727 | for_each_online_cpu(cpu) { |
3730 | if (ord-- == 0) { | 3728 | if (ord-- == 0) { |
3731 | cpumask_set_cpu(cpu, conn->conn_cpumask); | 3729 | cpumask_set_cpu(cpu, conn->conn_cpumask); |
@@ -3914,7 +3912,7 @@ check_rsp_state: | |||
3914 | switch (state) { | 3912 | switch (state) { |
3915 | case ISTATE_SEND_LOGOUTRSP: | 3913 | case ISTATE_SEND_LOGOUTRSP: |
3916 | if (!iscsit_logout_post_handler(cmd, conn)) | 3914 | if (!iscsit_logout_post_handler(cmd, conn)) |
3917 | goto restart; | 3915 | return -ECONNRESET; |
3918 | /* fall through */ | 3916 | /* fall through */ |
3919 | case ISTATE_SEND_STATUS: | 3917 | case ISTATE_SEND_STATUS: |
3920 | case ISTATE_SEND_ASYNCMSG: | 3918 | case ISTATE_SEND_ASYNCMSG: |
@@ -3942,8 +3940,6 @@ check_rsp_state: | |||
3942 | 3940 | ||
3943 | err: | 3941 | err: |
3944 | return -1; | 3942 | return -1; |
3945 | restart: | ||
3946 | return -EAGAIN; | ||
3947 | } | 3943 | } |
3948 | 3944 | ||
3949 | static int iscsit_handle_response_queue(struct iscsi_conn *conn) | 3945 | static int iscsit_handle_response_queue(struct iscsi_conn *conn) |
@@ -3970,21 +3966,13 @@ static int iscsit_handle_response_queue(struct iscsi_conn *conn) | |||
3970 | int iscsi_target_tx_thread(void *arg) | 3966 | int iscsi_target_tx_thread(void *arg) |
3971 | { | 3967 | { |
3972 | int ret = 0; | 3968 | int ret = 0; |
3973 | struct iscsi_conn *conn; | 3969 | struct iscsi_conn *conn = arg; |
3974 | struct iscsi_thread_set *ts = arg; | ||
3975 | /* | 3970 | /* |
3976 | * Allow ourselves to be interrupted by SIGINT so that a | 3971 | * Allow ourselves to be interrupted by SIGINT so that a |
3977 | * connection recovery / failure event can be triggered externally. | 3972 | * connection recovery / failure event can be triggered externally. |
3978 | */ | 3973 | */ |
3979 | allow_signal(SIGINT); | 3974 | allow_signal(SIGINT); |
3980 | 3975 | ||
3981 | restart: | ||
3982 | conn = iscsi_tx_thread_pre_handler(ts); | ||
3983 | if (!conn) | ||
3984 | goto out; | ||
3985 | |||
3986 | ret = 0; | ||
3987 | |||
3988 | while (!kthread_should_stop()) { | 3976 | while (!kthread_should_stop()) { |
3989 | /* | 3977 | /* |
3990 | * Ensure that both TX and RX per connection kthreads | 3978 | * Ensure that both TX and RX per connection kthreads |
@@ -3993,11 +3981,9 @@ restart: | |||
3993 | iscsit_thread_check_cpumask(conn, current, 1); | 3981 | iscsit_thread_check_cpumask(conn, current, 1); |
3994 | 3982 | ||
3995 | wait_event_interruptible(conn->queues_wq, | 3983 | wait_event_interruptible(conn->queues_wq, |
3996 | !iscsit_conn_all_queues_empty(conn) || | 3984 | !iscsit_conn_all_queues_empty(conn)); |
3997 | ts->status == ISCSI_THREAD_SET_RESET); | ||
3998 | 3985 | ||
3999 | if ((ts->status == ISCSI_THREAD_SET_RESET) || | 3986 | if (signal_pending(current)) |
4000 | signal_pending(current)) | ||
4001 | goto transport_err; | 3987 | goto transport_err; |
4002 | 3988 | ||
4003 | get_immediate: | 3989 | get_immediate: |
@@ -4008,15 +3994,14 @@ get_immediate: | |||
4008 | ret = iscsit_handle_response_queue(conn); | 3994 | ret = iscsit_handle_response_queue(conn); |
4009 | if (ret == 1) | 3995 | if (ret == 1) |
4010 | goto get_immediate; | 3996 | goto get_immediate; |
4011 | else if (ret == -EAGAIN) | 3997 | else if (ret == -ECONNRESET) |
4012 | goto restart; | 3998 | goto out; |
4013 | else if (ret < 0) | 3999 | else if (ret < 0) |
4014 | goto transport_err; | 4000 | goto transport_err; |
4015 | } | 4001 | } |
4016 | 4002 | ||
4017 | transport_err: | 4003 | transport_err: |
4018 | iscsit_take_action_for_connection_exit(conn); | 4004 | iscsit_take_action_for_connection_exit(conn); |
4019 | goto restart; | ||
4020 | out: | 4005 | out: |
4021 | return 0; | 4006 | return 0; |
4022 | } | 4007 | } |
@@ -4111,8 +4096,7 @@ int iscsi_target_rx_thread(void *arg) | |||
4111 | int ret; | 4096 | int ret; |
4112 | u8 buffer[ISCSI_HDR_LEN], opcode; | 4097 | u8 buffer[ISCSI_HDR_LEN], opcode; |
4113 | u32 checksum = 0, digest = 0; | 4098 | u32 checksum = 0, digest = 0; |
4114 | struct iscsi_conn *conn = NULL; | 4099 | struct iscsi_conn *conn = arg; |
4115 | struct iscsi_thread_set *ts = arg; | ||
4116 | struct kvec iov; | 4100 | struct kvec iov; |
4117 | /* | 4101 | /* |
4118 | * Allow ourselves to be interrupted by SIGINT so that a | 4102 | * Allow ourselves to be interrupted by SIGINT so that a |
@@ -4120,11 +4104,6 @@ int iscsi_target_rx_thread(void *arg) | |||
4120 | */ | 4104 | */ |
4121 | allow_signal(SIGINT); | 4105 | allow_signal(SIGINT); |
4122 | 4106 | ||
4123 | restart: | ||
4124 | conn = iscsi_rx_thread_pre_handler(ts); | ||
4125 | if (!conn) | ||
4126 | goto out; | ||
4127 | |||
4128 | if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) { | 4107 | if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) { |
4129 | struct completion comp; | 4108 | struct completion comp; |
4130 | int rc; | 4109 | int rc; |
@@ -4134,7 +4113,7 @@ restart: | |||
4134 | if (rc < 0) | 4113 | if (rc < 0) |
4135 | goto transport_err; | 4114 | goto transport_err; |
4136 | 4115 | ||
4137 | goto out; | 4116 | goto transport_err; |
4138 | } | 4117 | } |
4139 | 4118 | ||
4140 | while (!kthread_should_stop()) { | 4119 | while (!kthread_should_stop()) { |
@@ -4210,8 +4189,6 @@ transport_err: | |||
4210 | if (!signal_pending(current)) | 4189 | if (!signal_pending(current)) |
4211 | atomic_set(&conn->transport_failed, 1); | 4190 | atomic_set(&conn->transport_failed, 1); |
4212 | iscsit_take_action_for_connection_exit(conn); | 4191 | iscsit_take_action_for_connection_exit(conn); |
4213 | goto restart; | ||
4214 | out: | ||
4215 | return 0; | 4192 | return 0; |
4216 | } | 4193 | } |
4217 | 4194 | ||
@@ -4273,7 +4250,24 @@ int iscsit_close_connection( | |||
4273 | if (conn->conn_transport->transport_type == ISCSI_TCP) | 4250 | if (conn->conn_transport->transport_type == ISCSI_TCP) |
4274 | complete(&conn->conn_logout_comp); | 4251 | complete(&conn->conn_logout_comp); |
4275 | 4252 | ||
4276 | iscsi_release_thread_set(conn); | 4253 | if (!strcmp(current->comm, ISCSI_RX_THREAD_NAME)) { |
4254 | if (conn->tx_thread && | ||
4255 | cmpxchg(&conn->tx_thread_active, true, false)) { | ||
4256 | send_sig(SIGINT, conn->tx_thread, 1); | ||
4257 | kthread_stop(conn->tx_thread); | ||
4258 | } | ||
4259 | } else if (!strcmp(current->comm, ISCSI_TX_THREAD_NAME)) { | ||
4260 | if (conn->rx_thread && | ||
4261 | cmpxchg(&conn->rx_thread_active, true, false)) { | ||
4262 | send_sig(SIGINT, conn->rx_thread, 1); | ||
4263 | kthread_stop(conn->rx_thread); | ||
4264 | } | ||
4265 | } | ||
4266 | |||
4267 | spin_lock(&iscsit_global->ts_bitmap_lock); | ||
4268 | bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id, | ||
4269 | get_order(1)); | ||
4270 | spin_unlock(&iscsit_global->ts_bitmap_lock); | ||
4277 | 4271 | ||
4278 | iscsit_stop_timers_for_cmds(conn); | 4272 | iscsit_stop_timers_for_cmds(conn); |
4279 | iscsit_stop_nopin_response_timer(conn); | 4273 | iscsit_stop_nopin_response_timer(conn); |
@@ -4383,8 +4377,6 @@ int iscsit_close_connection( | |||
4383 | 4377 | ||
4384 | iscsit_put_transport(conn->conn_transport); | 4378 | iscsit_put_transport(conn->conn_transport); |
4385 | 4379 | ||
4386 | conn->thread_set = NULL; | ||
4387 | |||
4388 | pr_debug("Moving to TARG_CONN_STATE_FREE.\n"); | 4380 | pr_debug("Moving to TARG_CONN_STATE_FREE.\n"); |
4389 | conn->conn_state = TARG_CONN_STATE_FREE; | 4381 | conn->conn_state = TARG_CONN_STATE_FREE; |
4390 | kfree(conn); | 4382 | kfree(conn); |
@@ -4551,15 +4543,13 @@ static void iscsit_logout_post_handler_closesession( | |||
4551 | struct iscsi_conn *conn) | 4543 | struct iscsi_conn *conn) |
4552 | { | 4544 | { |
4553 | struct iscsi_session *sess = conn->sess; | 4545 | struct iscsi_session *sess = conn->sess; |
4554 | 4546 | int sleep = cmpxchg(&conn->tx_thread_active, true, false); | |
4555 | iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD); | ||
4556 | iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD); | ||
4557 | 4547 | ||
4558 | atomic_set(&conn->conn_logout_remove, 0); | 4548 | atomic_set(&conn->conn_logout_remove, 0); |
4559 | complete(&conn->conn_logout_comp); | 4549 | complete(&conn->conn_logout_comp); |
4560 | 4550 | ||
4561 | iscsit_dec_conn_usage_count(conn); | 4551 | iscsit_dec_conn_usage_count(conn); |
4562 | iscsit_stop_session(sess, 1, 1); | 4552 | iscsit_stop_session(sess, sleep, sleep); |
4563 | iscsit_dec_session_usage_count(sess); | 4553 | iscsit_dec_session_usage_count(sess); |
4564 | target_put_session(sess->se_sess); | 4554 | target_put_session(sess->se_sess); |
4565 | } | 4555 | } |
@@ -4567,13 +4557,12 @@ static void iscsit_logout_post_handler_closesession( | |||
4567 | static void iscsit_logout_post_handler_samecid( | 4557 | static void iscsit_logout_post_handler_samecid( |
4568 | struct iscsi_conn *conn) | 4558 | struct iscsi_conn *conn) |
4569 | { | 4559 | { |
4570 | iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD); | 4560 | int sleep = cmpxchg(&conn->tx_thread_active, true, false); |
4571 | iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD); | ||
4572 | 4561 | ||
4573 | atomic_set(&conn->conn_logout_remove, 0); | 4562 | atomic_set(&conn->conn_logout_remove, 0); |
4574 | complete(&conn->conn_logout_comp); | 4563 | complete(&conn->conn_logout_comp); |
4575 | 4564 | ||
4576 | iscsit_cause_connection_reinstatement(conn, 1); | 4565 | iscsit_cause_connection_reinstatement(conn, sleep); |
4577 | iscsit_dec_conn_usage_count(conn); | 4566 | iscsit_dec_conn_usage_count(conn); |
4578 | } | 4567 | } |
4579 | 4568 | ||
diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h index e936d56fb523..7d0f9c00d9c2 100644 --- a/drivers/target/iscsi/iscsi_target.h +++ b/drivers/target/iscsi/iscsi_target.h | |||
@@ -35,7 +35,7 @@ extern void iscsit_stop_session(struct iscsi_session *, int, int); | |||
35 | extern int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *, int); | 35 | extern int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *, int); |
36 | 36 | ||
37 | extern struct iscsit_global *iscsit_global; | 37 | extern struct iscsit_global *iscsit_global; |
38 | extern struct target_fabric_configfs *lio_target_fabric_configfs; | 38 | extern const struct target_core_fabric_ops iscsi_ops; |
39 | 39 | ||
40 | extern struct kmem_cache *lio_dr_cache; | 40 | extern struct kmem_cache *lio_dr_cache; |
41 | extern struct kmem_cache *lio_ooo_cache; | 41 | extern struct kmem_cache *lio_ooo_cache; |
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index 48384b675e62..469fce44ebad 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c | |||
@@ -37,9 +37,6 @@ | |||
37 | #include "iscsi_target_util.h" | 37 | #include "iscsi_target_util.h" |
38 | #include "iscsi_target.h" | 38 | #include "iscsi_target.h" |
39 | #include <target/iscsi/iscsi_target_stat.h> | 39 | #include <target/iscsi/iscsi_target_stat.h> |
40 | #include "iscsi_target_configfs.h" | ||
41 | |||
42 | struct target_fabric_configfs *lio_target_fabric_configfs; | ||
43 | 40 | ||
44 | struct lio_target_configfs_attribute { | 41 | struct lio_target_configfs_attribute { |
45 | struct configfs_attribute attr; | 42 | struct configfs_attribute attr; |
@@ -1052,6 +1049,11 @@ TPG_ATTR(default_erl, S_IRUGO | S_IWUSR); | |||
1052 | */ | 1049 | */ |
1053 | DEF_TPG_ATTRIB(t10_pi); | 1050 | DEF_TPG_ATTRIB(t10_pi); |
1054 | TPG_ATTR(t10_pi, S_IRUGO | S_IWUSR); | 1051 | TPG_ATTR(t10_pi, S_IRUGO | S_IWUSR); |
1052 | /* | ||
1053 | * Define iscsi_tpg_attrib_s_fabric_prot_type | ||
1054 | */ | ||
1055 | DEF_TPG_ATTRIB(fabric_prot_type); | ||
1056 | TPG_ATTR(fabric_prot_type, S_IRUGO | S_IWUSR); | ||
1055 | 1057 | ||
1056 | static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = { | 1058 | static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = { |
1057 | &iscsi_tpg_attrib_authentication.attr, | 1059 | &iscsi_tpg_attrib_authentication.attr, |
@@ -1065,6 +1067,7 @@ static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = { | |||
1065 | &iscsi_tpg_attrib_demo_mode_discovery.attr, | 1067 | &iscsi_tpg_attrib_demo_mode_discovery.attr, |
1066 | &iscsi_tpg_attrib_default_erl.attr, | 1068 | &iscsi_tpg_attrib_default_erl.attr, |
1067 | &iscsi_tpg_attrib_t10_pi.attr, | 1069 | &iscsi_tpg_attrib_t10_pi.attr, |
1070 | &iscsi_tpg_attrib_fabric_prot_type.attr, | ||
1068 | NULL, | 1071 | NULL, |
1069 | }; | 1072 | }; |
1070 | 1073 | ||
@@ -1410,8 +1413,18 @@ out: | |||
1410 | 1413 | ||
1411 | TF_TPG_BASE_ATTR(lio_target, enable, S_IRUGO | S_IWUSR); | 1414 | TF_TPG_BASE_ATTR(lio_target, enable, S_IRUGO | S_IWUSR); |
1412 | 1415 | ||
1416 | static ssize_t lio_target_tpg_show_dynamic_sessions( | ||
1417 | struct se_portal_group *se_tpg, | ||
1418 | char *page) | ||
1419 | { | ||
1420 | return target_show_dynamic_sessions(se_tpg, page); | ||
1421 | } | ||
1422 | |||
1423 | TF_TPG_BASE_ATTR_RO(lio_target, dynamic_sessions); | ||
1424 | |||
1413 | static struct configfs_attribute *lio_target_tpg_attrs[] = { | 1425 | static struct configfs_attribute *lio_target_tpg_attrs[] = { |
1414 | &lio_target_tpg_enable.attr, | 1426 | &lio_target_tpg_enable.attr, |
1427 | &lio_target_tpg_dynamic_sessions.attr, | ||
1415 | NULL, | 1428 | NULL, |
1416 | }; | 1429 | }; |
1417 | 1430 | ||
@@ -1450,10 +1463,8 @@ static struct se_portal_group *lio_target_tiqn_addtpg( | |||
1450 | if (!tpg) | 1463 | if (!tpg) |
1451 | return NULL; | 1464 | return NULL; |
1452 | 1465 | ||
1453 | ret = core_tpg_register( | 1466 | ret = core_tpg_register(&iscsi_ops, wwn, &tpg->tpg_se_tpg, |
1454 | &lio_target_fabric_configfs->tf_ops, | 1467 | tpg, TRANSPORT_TPG_TYPE_NORMAL); |
1455 | wwn, &tpg->tpg_se_tpg, tpg, | ||
1456 | TRANSPORT_TPG_TYPE_NORMAL); | ||
1457 | if (ret < 0) | 1468 | if (ret < 0) |
1458 | return NULL; | 1469 | return NULL; |
1459 | 1470 | ||
@@ -1872,6 +1883,20 @@ static int lio_tpg_check_prod_mode_write_protect( | |||
1872 | return tpg->tpg_attrib.prod_mode_write_protect; | 1883 | return tpg->tpg_attrib.prod_mode_write_protect; |
1873 | } | 1884 | } |
1874 | 1885 | ||
1886 | static int lio_tpg_check_prot_fabric_only( | ||
1887 | struct se_portal_group *se_tpg) | ||
1888 | { | ||
1889 | struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; | ||
1890 | /* | ||
1891 | * Only report fabric_prot_type if t10_pi has also been enabled | ||
1892 | * for incoming ib_isert sessions. | ||
1893 | */ | ||
1894 | if (!tpg->tpg_attrib.t10_pi) | ||
1895 | return 0; | ||
1896 | |||
1897 | return tpg->tpg_attrib.fabric_prot_type; | ||
1898 | } | ||
1899 | |||
1875 | static void lio_tpg_release_fabric_acl( | 1900 | static void lio_tpg_release_fabric_acl( |
1876 | struct se_portal_group *se_tpg, | 1901 | struct se_portal_group *se_tpg, |
1877 | struct se_node_acl *se_acl) | 1902 | struct se_node_acl *se_acl) |
@@ -1953,115 +1978,60 @@ static void lio_release_cmd(struct se_cmd *se_cmd) | |||
1953 | iscsit_release_cmd(cmd); | 1978 | iscsit_release_cmd(cmd); |
1954 | } | 1979 | } |
1955 | 1980 | ||
1956 | /* End functions for target_core_fabric_ops */ | 1981 | const struct target_core_fabric_ops iscsi_ops = { |
1957 | 1982 | .module = THIS_MODULE, | |
1958 | int iscsi_target_register_configfs(void) | 1983 | .name = "iscsi", |
1959 | { | 1984 | .get_fabric_name = iscsi_get_fabric_name, |
1960 | struct target_fabric_configfs *fabric; | 1985 | .get_fabric_proto_ident = iscsi_get_fabric_proto_ident, |
1961 | int ret; | 1986 | .tpg_get_wwn = lio_tpg_get_endpoint_wwn, |
1962 | 1987 | .tpg_get_tag = lio_tpg_get_tag, | |
1963 | lio_target_fabric_configfs = NULL; | 1988 | .tpg_get_default_depth = lio_tpg_get_default_depth, |
1964 | fabric = target_fabric_configfs_init(THIS_MODULE, "iscsi"); | 1989 | .tpg_get_pr_transport_id = iscsi_get_pr_transport_id, |
1965 | if (IS_ERR(fabric)) { | 1990 | .tpg_get_pr_transport_id_len = iscsi_get_pr_transport_id_len, |
1966 | pr_err("target_fabric_configfs_init() for" | 1991 | .tpg_parse_pr_out_transport_id = iscsi_parse_pr_out_transport_id, |
1967 | " LIO-Target failed!\n"); | 1992 | .tpg_check_demo_mode = lio_tpg_check_demo_mode, |
1968 | return PTR_ERR(fabric); | 1993 | .tpg_check_demo_mode_cache = lio_tpg_check_demo_mode_cache, |
1969 | } | 1994 | .tpg_check_demo_mode_write_protect = |
1970 | /* | 1995 | lio_tpg_check_demo_mode_write_protect, |
1971 | * Setup the fabric API of function pointers used by target_core_mod.. | 1996 | .tpg_check_prod_mode_write_protect = |
1972 | */ | 1997 | lio_tpg_check_prod_mode_write_protect, |
1973 | fabric->tf_ops.get_fabric_name = &iscsi_get_fabric_name; | 1998 | .tpg_check_prot_fabric_only = &lio_tpg_check_prot_fabric_only, |
1974 | fabric->tf_ops.get_fabric_proto_ident = &iscsi_get_fabric_proto_ident; | 1999 | .tpg_alloc_fabric_acl = lio_tpg_alloc_fabric_acl, |
1975 | fabric->tf_ops.tpg_get_wwn = &lio_tpg_get_endpoint_wwn; | 2000 | .tpg_release_fabric_acl = lio_tpg_release_fabric_acl, |
1976 | fabric->tf_ops.tpg_get_tag = &lio_tpg_get_tag; | 2001 | .tpg_get_inst_index = lio_tpg_get_inst_index, |
1977 | fabric->tf_ops.tpg_get_default_depth = &lio_tpg_get_default_depth; | 2002 | .check_stop_free = lio_check_stop_free, |
1978 | fabric->tf_ops.tpg_get_pr_transport_id = &iscsi_get_pr_transport_id; | 2003 | .release_cmd = lio_release_cmd, |
1979 | fabric->tf_ops.tpg_get_pr_transport_id_len = | 2004 | .shutdown_session = lio_tpg_shutdown_session, |
1980 | &iscsi_get_pr_transport_id_len; | 2005 | .close_session = lio_tpg_close_session, |
1981 | fabric->tf_ops.tpg_parse_pr_out_transport_id = | 2006 | .sess_get_index = lio_sess_get_index, |
1982 | &iscsi_parse_pr_out_transport_id; | 2007 | .sess_get_initiator_sid = lio_sess_get_initiator_sid, |
1983 | fabric->tf_ops.tpg_check_demo_mode = &lio_tpg_check_demo_mode; | 2008 | .write_pending = lio_write_pending, |
1984 | fabric->tf_ops.tpg_check_demo_mode_cache = | 2009 | .write_pending_status = lio_write_pending_status, |
1985 | &lio_tpg_check_demo_mode_cache; | 2010 | .set_default_node_attributes = lio_set_default_node_attributes, |
1986 | fabric->tf_ops.tpg_check_demo_mode_write_protect = | 2011 | .get_task_tag = iscsi_get_task_tag, |
1987 | &lio_tpg_check_demo_mode_write_protect; | 2012 | .get_cmd_state = iscsi_get_cmd_state, |
1988 | fabric->tf_ops.tpg_check_prod_mode_write_protect = | 2013 | .queue_data_in = lio_queue_data_in, |
1989 | &lio_tpg_check_prod_mode_write_protect; | 2014 | .queue_status = lio_queue_status, |
1990 | fabric->tf_ops.tpg_alloc_fabric_acl = &lio_tpg_alloc_fabric_acl; | 2015 | .queue_tm_rsp = lio_queue_tm_rsp, |
1991 | fabric->tf_ops.tpg_release_fabric_acl = &lio_tpg_release_fabric_acl; | 2016 | .aborted_task = lio_aborted_task, |
1992 | fabric->tf_ops.tpg_get_inst_index = &lio_tpg_get_inst_index; | 2017 | .fabric_make_wwn = lio_target_call_coreaddtiqn, |
1993 | fabric->tf_ops.check_stop_free = &lio_check_stop_free, | 2018 | .fabric_drop_wwn = lio_target_call_coredeltiqn, |
1994 | fabric->tf_ops.release_cmd = &lio_release_cmd; | 2019 | .fabric_make_tpg = lio_target_tiqn_addtpg, |
1995 | fabric->tf_ops.shutdown_session = &lio_tpg_shutdown_session; | 2020 | .fabric_drop_tpg = lio_target_tiqn_deltpg, |
1996 | fabric->tf_ops.close_session = &lio_tpg_close_session; | 2021 | .fabric_make_np = lio_target_call_addnptotpg, |
1997 | fabric->tf_ops.sess_get_index = &lio_sess_get_index; | 2022 | .fabric_drop_np = lio_target_call_delnpfromtpg, |
1998 | fabric->tf_ops.sess_get_initiator_sid = &lio_sess_get_initiator_sid; | 2023 | .fabric_make_nodeacl = lio_target_make_nodeacl, |
1999 | fabric->tf_ops.write_pending = &lio_write_pending; | 2024 | .fabric_drop_nodeacl = lio_target_drop_nodeacl, |
2000 | fabric->tf_ops.write_pending_status = &lio_write_pending_status; | 2025 | |
2001 | fabric->tf_ops.set_default_node_attributes = | 2026 | .tfc_discovery_attrs = lio_target_discovery_auth_attrs, |
2002 | &lio_set_default_node_attributes; | 2027 | .tfc_wwn_attrs = lio_target_wwn_attrs, |
2003 | fabric->tf_ops.get_task_tag = &iscsi_get_task_tag; | 2028 | .tfc_tpg_base_attrs = lio_target_tpg_attrs, |
2004 | fabric->tf_ops.get_cmd_state = &iscsi_get_cmd_state; | 2029 | .tfc_tpg_attrib_attrs = lio_target_tpg_attrib_attrs, |
2005 | fabric->tf_ops.queue_data_in = &lio_queue_data_in; | 2030 | .tfc_tpg_auth_attrs = lio_target_tpg_auth_attrs, |
2006 | fabric->tf_ops.queue_status = &lio_queue_status; | 2031 | .tfc_tpg_param_attrs = lio_target_tpg_param_attrs, |
2007 | fabric->tf_ops.queue_tm_rsp = &lio_queue_tm_rsp; | 2032 | .tfc_tpg_np_base_attrs = lio_target_portal_attrs, |
2008 | fabric->tf_ops.aborted_task = &lio_aborted_task; | 2033 | .tfc_tpg_nacl_base_attrs = lio_target_initiator_attrs, |
2009 | /* | 2034 | .tfc_tpg_nacl_attrib_attrs = lio_target_nacl_attrib_attrs, |
2010 | * Setup function pointers for generic logic in target_core_fabric_configfs.c | 2035 | .tfc_tpg_nacl_auth_attrs = lio_target_nacl_auth_attrs, |
2011 | */ | 2036 | .tfc_tpg_nacl_param_attrs = lio_target_nacl_param_attrs, |
2012 | fabric->tf_ops.fabric_make_wwn = &lio_target_call_coreaddtiqn; | 2037 | }; |
2013 | fabric->tf_ops.fabric_drop_wwn = &lio_target_call_coredeltiqn; | ||
2014 | fabric->tf_ops.fabric_make_tpg = &lio_target_tiqn_addtpg; | ||
2015 | fabric->tf_ops.fabric_drop_tpg = &lio_target_tiqn_deltpg; | ||
2016 | fabric->tf_ops.fabric_post_link = NULL; | ||
2017 | fabric->tf_ops.fabric_pre_unlink = NULL; | ||
2018 | fabric->tf_ops.fabric_make_np = &lio_target_call_addnptotpg; | ||
2019 | fabric->tf_ops.fabric_drop_np = &lio_target_call_delnpfromtpg; | ||
2020 | fabric->tf_ops.fabric_make_nodeacl = &lio_target_make_nodeacl; | ||
2021 | fabric->tf_ops.fabric_drop_nodeacl = &lio_target_drop_nodeacl; | ||
2022 | /* | ||
2023 | * Setup default attribute lists for various fabric->tf_cit_tmpl | ||
2024 | * sturct config_item_type's | ||
2025 | */ | ||
2026 | fabric->tf_cit_tmpl.tfc_discovery_cit.ct_attrs = lio_target_discovery_auth_attrs; | ||
2027 | fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = lio_target_wwn_attrs; | ||
2028 | fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = lio_target_tpg_attrs; | ||
2029 | fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = lio_target_tpg_attrib_attrs; | ||
2030 | fabric->tf_cit_tmpl.tfc_tpg_auth_cit.ct_attrs = lio_target_tpg_auth_attrs; | ||
2031 | fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = lio_target_tpg_param_attrs; | ||
2032 | fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = lio_target_portal_attrs; | ||
2033 | fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = lio_target_initiator_attrs; | ||
2034 | fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = lio_target_nacl_attrib_attrs; | ||
2035 | fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = lio_target_nacl_auth_attrs; | ||
2036 | fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = lio_target_nacl_param_attrs; | ||
2037 | |||
2038 | ret = target_fabric_configfs_register(fabric); | ||
2039 | if (ret < 0) { | ||
2040 | pr_err("target_fabric_configfs_register() for" | ||
2041 | " LIO-Target failed!\n"); | ||
2042 | target_fabric_configfs_free(fabric); | ||
2043 | return ret; | ||
2044 | } | ||
2045 | |||
2046 | lio_target_fabric_configfs = fabric; | ||
2047 | pr_debug("LIO_TARGET[0] - Set fabric ->" | ||
2048 | " lio_target_fabric_configfs\n"); | ||
2049 | return 0; | ||
2050 | } | ||
2051 | |||
2052 | |||
2053 | void iscsi_target_deregister_configfs(void) | ||
2054 | { | ||
2055 | if (!lio_target_fabric_configfs) | ||
2056 | return; | ||
2057 | /* | ||
2058 | * Shutdown discovery sessions and disable discovery TPG | ||
2059 | */ | ||
2060 | if (iscsit_global->discovery_tpg) | ||
2061 | iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1); | ||
2062 | |||
2063 | target_fabric_configfs_deregister(lio_target_fabric_configfs); | ||
2064 | lio_target_fabric_configfs = NULL; | ||
2065 | pr_debug("LIO_TARGET[0] - Cleared" | ||
2066 | " lio_target_fabric_configfs\n"); | ||
2067 | } | ||
diff --git a/drivers/target/iscsi/iscsi_target_configfs.h b/drivers/target/iscsi/iscsi_target_configfs.h deleted file mode 100644 index 8cd5a63c4edc..000000000000 --- a/drivers/target/iscsi/iscsi_target_configfs.h +++ /dev/null | |||
@@ -1,7 +0,0 @@ | |||
1 | #ifndef ISCSI_TARGET_CONFIGFS_H | ||
2 | #define ISCSI_TARGET_CONFIGFS_H | ||
3 | |||
4 | extern int iscsi_target_register_configfs(void); | ||
5 | extern void iscsi_target_deregister_configfs(void); | ||
6 | |||
7 | #endif /* ISCSI_TARGET_CONFIGFS_H */ | ||
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c index bdd8731a4daa..959a14c9dd5d 100644 --- a/drivers/target/iscsi/iscsi_target_erl0.c +++ b/drivers/target/iscsi/iscsi_target_erl0.c | |||
@@ -23,7 +23,6 @@ | |||
23 | 23 | ||
24 | #include <target/iscsi/iscsi_target_core.h> | 24 | #include <target/iscsi/iscsi_target_core.h> |
25 | #include "iscsi_target_seq_pdu_list.h" | 25 | #include "iscsi_target_seq_pdu_list.h" |
26 | #include "iscsi_target_tq.h" | ||
27 | #include "iscsi_target_erl0.h" | 26 | #include "iscsi_target_erl0.h" |
28 | #include "iscsi_target_erl1.h" | 27 | #include "iscsi_target_erl1.h" |
29 | #include "iscsi_target_erl2.h" | 28 | #include "iscsi_target_erl2.h" |
@@ -860,7 +859,10 @@ void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *conn) | |||
860 | } | 859 | } |
861 | spin_unlock_bh(&conn->state_lock); | 860 | spin_unlock_bh(&conn->state_lock); |
862 | 861 | ||
863 | iscsi_thread_set_force_reinstatement(conn); | 862 | if (conn->tx_thread && conn->tx_thread_active) |
863 | send_sig(SIGINT, conn->tx_thread, 1); | ||
864 | if (conn->rx_thread && conn->rx_thread_active) | ||
865 | send_sig(SIGINT, conn->rx_thread, 1); | ||
864 | 866 | ||
865 | sleep: | 867 | sleep: |
866 | wait_for_completion(&conn->conn_wait_rcfr_comp); | 868 | wait_for_completion(&conn->conn_wait_rcfr_comp); |
@@ -885,10 +887,10 @@ void iscsit_cause_connection_reinstatement(struct iscsi_conn *conn, int sleep) | |||
885 | return; | 887 | return; |
886 | } | 888 | } |
887 | 889 | ||
888 | if (iscsi_thread_set_force_reinstatement(conn) < 0) { | 890 | if (conn->tx_thread && conn->tx_thread_active) |
889 | spin_unlock_bh(&conn->state_lock); | 891 | send_sig(SIGINT, conn->tx_thread, 1); |
890 | return; | 892 | if (conn->rx_thread && conn->rx_thread_active) |
891 | } | 893 | send_sig(SIGINT, conn->rx_thread, 1); |
892 | 894 | ||
893 | atomic_set(&conn->connection_reinstatement, 1); | 895 | atomic_set(&conn->connection_reinstatement, 1); |
894 | if (!sleep) { | 896 | if (!sleep) { |
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 153fb66ac1b8..8ce94ff744e6 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c | |||
@@ -26,7 +26,6 @@ | |||
26 | 26 | ||
27 | #include <target/iscsi/iscsi_target_core.h> | 27 | #include <target/iscsi/iscsi_target_core.h> |
28 | #include <target/iscsi/iscsi_target_stat.h> | 28 | #include <target/iscsi/iscsi_target_stat.h> |
29 | #include "iscsi_target_tq.h" | ||
30 | #include "iscsi_target_device.h" | 29 | #include "iscsi_target_device.h" |
31 | #include "iscsi_target_nego.h" | 30 | #include "iscsi_target_nego.h" |
32 | #include "iscsi_target_erl0.h" | 31 | #include "iscsi_target_erl0.h" |
@@ -699,6 +698,51 @@ static void iscsi_post_login_start_timers(struct iscsi_conn *conn) | |||
699 | iscsit_start_nopin_timer(conn); | 698 | iscsit_start_nopin_timer(conn); |
700 | } | 699 | } |
701 | 700 | ||
701 | static int iscsit_start_kthreads(struct iscsi_conn *conn) | ||
702 | { | ||
703 | int ret = 0; | ||
704 | |||
705 | spin_lock(&iscsit_global->ts_bitmap_lock); | ||
706 | conn->bitmap_id = bitmap_find_free_region(iscsit_global->ts_bitmap, | ||
707 | ISCSIT_BITMAP_BITS, get_order(1)); | ||
708 | spin_unlock(&iscsit_global->ts_bitmap_lock); | ||
709 | |||
710 | if (conn->bitmap_id < 0) { | ||
711 | pr_err("bitmap_find_free_region() failed for" | ||
712 | " iscsit_start_kthreads()\n"); | ||
713 | return -ENOMEM; | ||
714 | } | ||
715 | |||
716 | conn->tx_thread = kthread_run(iscsi_target_tx_thread, conn, | ||
717 | "%s", ISCSI_TX_THREAD_NAME); | ||
718 | if (IS_ERR(conn->tx_thread)) { | ||
719 | pr_err("Unable to start iscsi_target_tx_thread\n"); | ||
720 | ret = PTR_ERR(conn->tx_thread); | ||
721 | goto out_bitmap; | ||
722 | } | ||
723 | conn->tx_thread_active = true; | ||
724 | |||
725 | conn->rx_thread = kthread_run(iscsi_target_rx_thread, conn, | ||
726 | "%s", ISCSI_RX_THREAD_NAME); | ||
727 | if (IS_ERR(conn->rx_thread)) { | ||
728 | pr_err("Unable to start iscsi_target_rx_thread\n"); | ||
729 | ret = PTR_ERR(conn->rx_thread); | ||
730 | goto out_tx; | ||
731 | } | ||
732 | conn->rx_thread_active = true; | ||
733 | |||
734 | return 0; | ||
735 | out_tx: | ||
736 | kthread_stop(conn->tx_thread); | ||
737 | conn->tx_thread_active = false; | ||
738 | out_bitmap: | ||
739 | spin_lock(&iscsit_global->ts_bitmap_lock); | ||
740 | bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id, | ||
741 | get_order(1)); | ||
742 | spin_unlock(&iscsit_global->ts_bitmap_lock); | ||
743 | return ret; | ||
744 | } | ||
745 | |||
702 | int iscsi_post_login_handler( | 746 | int iscsi_post_login_handler( |
703 | struct iscsi_np *np, | 747 | struct iscsi_np *np, |
704 | struct iscsi_conn *conn, | 748 | struct iscsi_conn *conn, |
@@ -709,7 +753,7 @@ int iscsi_post_login_handler( | |||
709 | struct se_session *se_sess = sess->se_sess; | 753 | struct se_session *se_sess = sess->se_sess; |
710 | struct iscsi_portal_group *tpg = sess->tpg; | 754 | struct iscsi_portal_group *tpg = sess->tpg; |
711 | struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; | 755 | struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; |
712 | struct iscsi_thread_set *ts; | 756 | int rc; |
713 | 757 | ||
714 | iscsit_inc_conn_usage_count(conn); | 758 | iscsit_inc_conn_usage_count(conn); |
715 | 759 | ||
@@ -724,7 +768,6 @@ int iscsi_post_login_handler( | |||
724 | /* | 768 | /* |
725 | * SCSI Initiator -> SCSI Target Port Mapping | 769 | * SCSI Initiator -> SCSI Target Port Mapping |
726 | */ | 770 | */ |
727 | ts = iscsi_get_thread_set(); | ||
728 | if (!zero_tsih) { | 771 | if (!zero_tsih) { |
729 | iscsi_set_session_parameters(sess->sess_ops, | 772 | iscsi_set_session_parameters(sess->sess_ops, |
730 | conn->param_list, 0); | 773 | conn->param_list, 0); |
@@ -751,9 +794,11 @@ int iscsi_post_login_handler( | |||
751 | sess->sess_ops->InitiatorName); | 794 | sess->sess_ops->InitiatorName); |
752 | spin_unlock_bh(&sess->conn_lock); | 795 | spin_unlock_bh(&sess->conn_lock); |
753 | 796 | ||
754 | iscsi_post_login_start_timers(conn); | 797 | rc = iscsit_start_kthreads(conn); |
798 | if (rc) | ||
799 | return rc; | ||
755 | 800 | ||
756 | iscsi_activate_thread_set(conn, ts); | 801 | iscsi_post_login_start_timers(conn); |
757 | /* | 802 | /* |
758 | * Determine CPU mask to ensure connection's RX and TX kthreads | 803 | * Determine CPU mask to ensure connection's RX and TX kthreads |
759 | * are scheduled on the same CPU. | 804 | * are scheduled on the same CPU. |
@@ -810,8 +855,11 @@ int iscsi_post_login_handler( | |||
810 | " iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt); | 855 | " iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt); |
811 | spin_unlock_bh(&se_tpg->session_lock); | 856 | spin_unlock_bh(&se_tpg->session_lock); |
812 | 857 | ||
858 | rc = iscsit_start_kthreads(conn); | ||
859 | if (rc) | ||
860 | return rc; | ||
861 | |||
813 | iscsi_post_login_start_timers(conn); | 862 | iscsi_post_login_start_timers(conn); |
814 | iscsi_activate_thread_set(conn, ts); | ||
815 | /* | 863 | /* |
816 | * Determine CPU mask to ensure connection's RX and TX kthreads | 864 | * Determine CPU mask to ensure connection's RX and TX kthreads |
817 | * are scheduled on the same CPU. | 865 | * are scheduled on the same CPU. |
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c index bdd127c0e3ae..e8a240818353 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.c +++ b/drivers/target/iscsi/iscsi_target_tpg.c | |||
@@ -68,10 +68,8 @@ int iscsit_load_discovery_tpg(void) | |||
68 | return -1; | 68 | return -1; |
69 | } | 69 | } |
70 | 70 | ||
71 | ret = core_tpg_register( | 71 | ret = core_tpg_register(&iscsi_ops, NULL, &tpg->tpg_se_tpg, |
72 | &lio_target_fabric_configfs->tf_ops, | 72 | tpg, TRANSPORT_TPG_TYPE_DISCOVERY); |
73 | NULL, &tpg->tpg_se_tpg, tpg, | ||
74 | TRANSPORT_TPG_TYPE_DISCOVERY); | ||
75 | if (ret < 0) { | 73 | if (ret < 0) { |
76 | kfree(tpg); | 74 | kfree(tpg); |
77 | return -1; | 75 | return -1; |
@@ -228,6 +226,7 @@ static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg) | |||
228 | a->demo_mode_discovery = TA_DEMO_MODE_DISCOVERY; | 226 | a->demo_mode_discovery = TA_DEMO_MODE_DISCOVERY; |
229 | a->default_erl = TA_DEFAULT_ERL; | 227 | a->default_erl = TA_DEFAULT_ERL; |
230 | a->t10_pi = TA_DEFAULT_T10_PI; | 228 | a->t10_pi = TA_DEFAULT_T10_PI; |
229 | a->fabric_prot_type = TA_DEFAULT_FABRIC_PROT_TYPE; | ||
231 | } | 230 | } |
232 | 231 | ||
233 | int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg) | 232 | int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg) |
@@ -878,3 +877,21 @@ int iscsit_ta_t10_pi( | |||
878 | 877 | ||
879 | return 0; | 878 | return 0; |
880 | } | 879 | } |
880 | |||
881 | int iscsit_ta_fabric_prot_type( | ||
882 | struct iscsi_portal_group *tpg, | ||
883 | u32 prot_type) | ||
884 | { | ||
885 | struct iscsi_tpg_attrib *a = &tpg->tpg_attrib; | ||
886 | |||
887 | if ((prot_type != 0) && (prot_type != 1) && (prot_type != 3)) { | ||
888 | pr_err("Illegal value for fabric_prot_type: %u\n", prot_type); | ||
889 | return -EINVAL; | ||
890 | } | ||
891 | |||
892 | a->fabric_prot_type = prot_type; | ||
893 | pr_debug("iSCSI_TPG[%hu] - T10 Fabric Protection Type: %u\n", | ||
894 | tpg->tpgt, prot_type); | ||
895 | |||
896 | return 0; | ||
897 | } | ||
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h index e7265337bc43..95ff5bdecd71 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.h +++ b/drivers/target/iscsi/iscsi_target_tpg.h | |||
@@ -39,5 +39,6 @@ extern int iscsit_ta_prod_mode_write_protect(struct iscsi_portal_group *, u32); | |||
39 | extern int iscsit_ta_demo_mode_discovery(struct iscsi_portal_group *, u32); | 39 | extern int iscsit_ta_demo_mode_discovery(struct iscsi_portal_group *, u32); |
40 | extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32); | 40 | extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32); |
41 | extern int iscsit_ta_t10_pi(struct iscsi_portal_group *, u32); | 41 | extern int iscsit_ta_t10_pi(struct iscsi_portal_group *, u32); |
42 | extern int iscsit_ta_fabric_prot_type(struct iscsi_portal_group *, u32); | ||
42 | 43 | ||
43 | #endif /* ISCSI_TARGET_TPG_H */ | 44 | #endif /* ISCSI_TARGET_TPG_H */ |
diff --git a/drivers/target/iscsi/iscsi_target_tq.c b/drivers/target/iscsi/iscsi_target_tq.c deleted file mode 100644 index 26aa50996473..000000000000 --- a/drivers/target/iscsi/iscsi_target_tq.c +++ /dev/null | |||
@@ -1,495 +0,0 @@ | |||
1 | /******************************************************************************* | ||
2 | * This file contains the iSCSI Login Thread and Thread Queue functions. | ||
3 | * | ||
4 | * (c) Copyright 2007-2013 Datera, Inc. | ||
5 | * | ||
6 | * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | ******************************************************************************/ | ||
18 | |||
19 | #include <linux/kthread.h> | ||
20 | #include <linux/list.h> | ||
21 | #include <linux/bitmap.h> | ||
22 | |||
23 | #include <target/iscsi/iscsi_target_core.h> | ||
24 | #include "iscsi_target_tq.h" | ||
25 | #include "iscsi_target.h" | ||
26 | |||
27 | static LIST_HEAD(inactive_ts_list); | ||
28 | static DEFINE_SPINLOCK(inactive_ts_lock); | ||
29 | static DEFINE_SPINLOCK(ts_bitmap_lock); | ||
30 | |||
31 | static void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *ts) | ||
32 | { | ||
33 | if (!list_empty(&ts->ts_list)) { | ||
34 | WARN_ON(1); | ||
35 | return; | ||
36 | } | ||
37 | spin_lock(&inactive_ts_lock); | ||
38 | list_add_tail(&ts->ts_list, &inactive_ts_list); | ||
39 | iscsit_global->inactive_ts++; | ||
40 | spin_unlock(&inactive_ts_lock); | ||
41 | } | ||
42 | |||
43 | static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void) | ||
44 | { | ||
45 | struct iscsi_thread_set *ts; | ||
46 | |||
47 | spin_lock(&inactive_ts_lock); | ||
48 | if (list_empty(&inactive_ts_list)) { | ||
49 | spin_unlock(&inactive_ts_lock); | ||
50 | return NULL; | ||
51 | } | ||
52 | |||
53 | ts = list_first_entry(&inactive_ts_list, struct iscsi_thread_set, ts_list); | ||
54 | |||
55 | list_del_init(&ts->ts_list); | ||
56 | iscsit_global->inactive_ts--; | ||
57 | spin_unlock(&inactive_ts_lock); | ||
58 | |||
59 | return ts; | ||
60 | } | ||
61 | |||
62 | int iscsi_allocate_thread_sets(u32 thread_pair_count) | ||
63 | { | ||
64 | int allocated_thread_pair_count = 0, i, thread_id; | ||
65 | struct iscsi_thread_set *ts = NULL; | ||
66 | |||
67 | for (i = 0; i < thread_pair_count; i++) { | ||
68 | ts = kzalloc(sizeof(struct iscsi_thread_set), GFP_KERNEL); | ||
69 | if (!ts) { | ||
70 | pr_err("Unable to allocate memory for" | ||
71 | " thread set.\n"); | ||
72 | return allocated_thread_pair_count; | ||
73 | } | ||
74 | /* | ||
75 | * Locate the next available regision in the thread_set_bitmap | ||
76 | */ | ||
77 | spin_lock(&ts_bitmap_lock); | ||
78 | thread_id = bitmap_find_free_region(iscsit_global->ts_bitmap, | ||
79 | iscsit_global->ts_bitmap_count, get_order(1)); | ||
80 | spin_unlock(&ts_bitmap_lock); | ||
81 | if (thread_id < 0) { | ||
82 | pr_err("bitmap_find_free_region() failed for" | ||
83 | " thread_set_bitmap\n"); | ||
84 | kfree(ts); | ||
85 | return allocated_thread_pair_count; | ||
86 | } | ||
87 | |||
88 | ts->thread_id = thread_id; | ||
89 | ts->status = ISCSI_THREAD_SET_FREE; | ||
90 | INIT_LIST_HEAD(&ts->ts_list); | ||
91 | spin_lock_init(&ts->ts_state_lock); | ||
92 | init_completion(&ts->rx_restart_comp); | ||
93 | init_completion(&ts->tx_restart_comp); | ||
94 | init_completion(&ts->rx_start_comp); | ||
95 | init_completion(&ts->tx_start_comp); | ||
96 | sema_init(&ts->ts_activate_sem, 0); | ||
97 | |||
98 | ts->create_threads = 1; | ||
99 | ts->tx_thread = kthread_run(iscsi_target_tx_thread, ts, "%s", | ||
100 | ISCSI_TX_THREAD_NAME); | ||
101 | if (IS_ERR(ts->tx_thread)) { | ||
102 | dump_stack(); | ||
103 | pr_err("Unable to start iscsi_target_tx_thread\n"); | ||
104 | break; | ||
105 | } | ||
106 | |||
107 | ts->rx_thread = kthread_run(iscsi_target_rx_thread, ts, "%s", | ||
108 | ISCSI_RX_THREAD_NAME); | ||
109 | if (IS_ERR(ts->rx_thread)) { | ||
110 | kthread_stop(ts->tx_thread); | ||
111 | pr_err("Unable to start iscsi_target_rx_thread\n"); | ||
112 | break; | ||
113 | } | ||
114 | ts->create_threads = 0; | ||
115 | |||
116 | iscsi_add_ts_to_inactive_list(ts); | ||
117 | allocated_thread_pair_count++; | ||
118 | } | ||
119 | |||
120 | pr_debug("Spawned %d thread set(s) (%d total threads).\n", | ||
121 | allocated_thread_pair_count, allocated_thread_pair_count * 2); | ||
122 | return allocated_thread_pair_count; | ||
123 | } | ||
124 | |||
125 | static void iscsi_deallocate_thread_one(struct iscsi_thread_set *ts) | ||
126 | { | ||
127 | spin_lock_bh(&ts->ts_state_lock); | ||
128 | ts->status = ISCSI_THREAD_SET_DIE; | ||
129 | |||
130 | if (ts->rx_thread) { | ||
131 | complete(&ts->rx_start_comp); | ||
132 | spin_unlock_bh(&ts->ts_state_lock); | ||
133 | kthread_stop(ts->rx_thread); | ||
134 | spin_lock_bh(&ts->ts_state_lock); | ||
135 | } | ||
136 | if (ts->tx_thread) { | ||
137 | complete(&ts->tx_start_comp); | ||
138 | spin_unlock_bh(&ts->ts_state_lock); | ||
139 | kthread_stop(ts->tx_thread); | ||
140 | spin_lock_bh(&ts->ts_state_lock); | ||
141 | } | ||
142 | spin_unlock_bh(&ts->ts_state_lock); | ||
143 | /* | ||
144 | * Release this thread_id in the thread_set_bitmap | ||
145 | */ | ||
146 | spin_lock(&ts_bitmap_lock); | ||
147 | bitmap_release_region(iscsit_global->ts_bitmap, | ||
148 | ts->thread_id, get_order(1)); | ||
149 | spin_unlock(&ts_bitmap_lock); | ||
150 | |||
151 | kfree(ts); | ||
152 | } | ||
153 | |||
154 | void iscsi_deallocate_thread_sets(void) | ||
155 | { | ||
156 | struct iscsi_thread_set *ts = NULL; | ||
157 | u32 released_count = 0; | ||
158 | |||
159 | while ((ts = iscsi_get_ts_from_inactive_list())) { | ||
160 | |||
161 | iscsi_deallocate_thread_one(ts); | ||
162 | released_count++; | ||
163 | } | ||
164 | |||
165 | if (released_count) | ||
166 | pr_debug("Stopped %d thread set(s) (%d total threads)." | ||
167 | "\n", released_count, released_count * 2); | ||
168 | } | ||
169 | |||
170 | static void iscsi_deallocate_extra_thread_sets(void) | ||
171 | { | ||
172 | u32 orig_count, released_count = 0; | ||
173 | struct iscsi_thread_set *ts = NULL; | ||
174 | |||
175 | orig_count = TARGET_THREAD_SET_COUNT; | ||
176 | |||
177 | while ((iscsit_global->inactive_ts + 1) > orig_count) { | ||
178 | ts = iscsi_get_ts_from_inactive_list(); | ||
179 | if (!ts) | ||
180 | break; | ||
181 | |||
182 | iscsi_deallocate_thread_one(ts); | ||
183 | released_count++; | ||
184 | } | ||
185 | |||
186 | if (released_count) | ||
187 | pr_debug("Stopped %d thread set(s) (%d total threads)." | ||
188 | "\n", released_count, released_count * 2); | ||
189 | } | ||
190 | |||
191 | void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set *ts) | ||
192 | { | ||
193 | spin_lock_bh(&ts->ts_state_lock); | ||
194 | conn->thread_set = ts; | ||
195 | ts->conn = conn; | ||
196 | ts->status = ISCSI_THREAD_SET_ACTIVE; | ||
197 | spin_unlock_bh(&ts->ts_state_lock); | ||
198 | |||
199 | complete(&ts->rx_start_comp); | ||
200 | complete(&ts->tx_start_comp); | ||
201 | |||
202 | down(&ts->ts_activate_sem); | ||
203 | } | ||
204 | |||
205 | struct iscsi_thread_set *iscsi_get_thread_set(void) | ||
206 | { | ||
207 | struct iscsi_thread_set *ts; | ||
208 | |||
209 | get_set: | ||
210 | ts = iscsi_get_ts_from_inactive_list(); | ||
211 | if (!ts) { | ||
212 | iscsi_allocate_thread_sets(1); | ||
213 | goto get_set; | ||
214 | } | ||
215 | |||
216 | ts->delay_inactive = 1; | ||
217 | ts->signal_sent = 0; | ||
218 | ts->thread_count = 2; | ||
219 | init_completion(&ts->rx_restart_comp); | ||
220 | init_completion(&ts->tx_restart_comp); | ||
221 | sema_init(&ts->ts_activate_sem, 0); | ||
222 | |||
223 | return ts; | ||
224 | } | ||
225 | |||
226 | void iscsi_set_thread_clear(struct iscsi_conn *conn, u8 thread_clear) | ||
227 | { | ||
228 | struct iscsi_thread_set *ts = NULL; | ||
229 | |||
230 | if (!conn->thread_set) { | ||
231 | pr_err("struct iscsi_conn->thread_set is NULL\n"); | ||
232 | return; | ||
233 | } | ||
234 | ts = conn->thread_set; | ||
235 | |||
236 | spin_lock_bh(&ts->ts_state_lock); | ||
237 | ts->thread_clear &= ~thread_clear; | ||
238 | |||
239 | if ((thread_clear & ISCSI_CLEAR_RX_THREAD) && | ||
240 | (ts->blocked_threads & ISCSI_BLOCK_RX_THREAD)) | ||
241 | complete(&ts->rx_restart_comp); | ||
242 | else if ((thread_clear & ISCSI_CLEAR_TX_THREAD) && | ||
243 | (ts->blocked_threads & ISCSI_BLOCK_TX_THREAD)) | ||
244 | complete(&ts->tx_restart_comp); | ||
245 | spin_unlock_bh(&ts->ts_state_lock); | ||
246 | } | ||
247 | |||
248 | void iscsi_set_thread_set_signal(struct iscsi_conn *conn, u8 signal_sent) | ||
249 | { | ||
250 | struct iscsi_thread_set *ts = NULL; | ||
251 | |||
252 | if (!conn->thread_set) { | ||
253 | pr_err("struct iscsi_conn->thread_set is NULL\n"); | ||
254 | return; | ||
255 | } | ||
256 | ts = conn->thread_set; | ||
257 | |||
258 | spin_lock_bh(&ts->ts_state_lock); | ||
259 | ts->signal_sent |= signal_sent; | ||
260 | spin_unlock_bh(&ts->ts_state_lock); | ||
261 | } | ||
262 | |||
263 | int iscsi_release_thread_set(struct iscsi_conn *conn) | ||
264 | { | ||
265 | int thread_called = 0; | ||
266 | struct iscsi_thread_set *ts = NULL; | ||
267 | |||
268 | if (!conn || !conn->thread_set) { | ||
269 | pr_err("connection or thread set pointer is NULL\n"); | ||
270 | BUG(); | ||
271 | } | ||
272 | ts = conn->thread_set; | ||
273 | |||
274 | spin_lock_bh(&ts->ts_state_lock); | ||
275 | ts->status = ISCSI_THREAD_SET_RESET; | ||
276 | |||
277 | if (!strncmp(current->comm, ISCSI_RX_THREAD_NAME, | ||
278 | strlen(ISCSI_RX_THREAD_NAME))) | ||
279 | thread_called = ISCSI_RX_THREAD; | ||
280 | else if (!strncmp(current->comm, ISCSI_TX_THREAD_NAME, | ||
281 | strlen(ISCSI_TX_THREAD_NAME))) | ||
282 | thread_called = ISCSI_TX_THREAD; | ||
283 | |||
284 | if (ts->rx_thread && (thread_called == ISCSI_TX_THREAD) && | ||
285 | (ts->thread_clear & ISCSI_CLEAR_RX_THREAD)) { | ||
286 | |||
287 | if (!(ts->signal_sent & ISCSI_SIGNAL_RX_THREAD)) { | ||
288 | send_sig(SIGINT, ts->rx_thread, 1); | ||
289 | ts->signal_sent |= ISCSI_SIGNAL_RX_THREAD; | ||
290 | } | ||
291 | ts->blocked_threads |= ISCSI_BLOCK_RX_THREAD; | ||
292 | spin_unlock_bh(&ts->ts_state_lock); | ||
293 | wait_for_completion(&ts->rx_restart_comp); | ||
294 | spin_lock_bh(&ts->ts_state_lock); | ||
295 | ts->blocked_threads &= ~ISCSI_BLOCK_RX_THREAD; | ||
296 | } | ||
297 | if (ts->tx_thread && (thread_called == ISCSI_RX_THREAD) && | ||
298 | (ts->thread_clear & ISCSI_CLEAR_TX_THREAD)) { | ||
299 | |||
300 | if (!(ts->signal_sent & ISCSI_SIGNAL_TX_THREAD)) { | ||
301 | send_sig(SIGINT, ts->tx_thread, 1); | ||
302 | ts->signal_sent |= ISCSI_SIGNAL_TX_THREAD; | ||
303 | } | ||
304 | ts->blocked_threads |= ISCSI_BLOCK_TX_THREAD; | ||
305 | spin_unlock_bh(&ts->ts_state_lock); | ||
306 | wait_for_completion(&ts->tx_restart_comp); | ||
307 | spin_lock_bh(&ts->ts_state_lock); | ||
308 | ts->blocked_threads &= ~ISCSI_BLOCK_TX_THREAD; | ||
309 | } | ||
310 | |||
311 | ts->conn = NULL; | ||
312 | ts->status = ISCSI_THREAD_SET_FREE; | ||
313 | spin_unlock_bh(&ts->ts_state_lock); | ||
314 | |||
315 | return 0; | ||
316 | } | ||
317 | |||
318 | int iscsi_thread_set_force_reinstatement(struct iscsi_conn *conn) | ||
319 | { | ||
320 | struct iscsi_thread_set *ts; | ||
321 | |||
322 | if (!conn->thread_set) | ||
323 | return -1; | ||
324 | ts = conn->thread_set; | ||
325 | |||
326 | spin_lock_bh(&ts->ts_state_lock); | ||
327 | if (ts->status != ISCSI_THREAD_SET_ACTIVE) { | ||
328 | spin_unlock_bh(&ts->ts_state_lock); | ||
329 | return -1; | ||
330 | } | ||
331 | |||
332 | if (ts->tx_thread && (!(ts->signal_sent & ISCSI_SIGNAL_TX_THREAD))) { | ||
333 | send_sig(SIGINT, ts->tx_thread, 1); | ||
334 | ts->signal_sent |= ISCSI_SIGNAL_TX_THREAD; | ||
335 | } | ||
336 | if (ts->rx_thread && (!(ts->signal_sent & ISCSI_SIGNAL_RX_THREAD))) { | ||
337 | send_sig(SIGINT, ts->rx_thread, 1); | ||
338 | ts->signal_sent |= ISCSI_SIGNAL_RX_THREAD; | ||
339 | } | ||
340 | spin_unlock_bh(&ts->ts_state_lock); | ||
341 | |||
342 | return 0; | ||
343 | } | ||
344 | |||
345 | static void iscsi_check_to_add_additional_sets(void) | ||
346 | { | ||
347 | int thread_sets_add; | ||
348 | |||
349 | spin_lock(&inactive_ts_lock); | ||
350 | thread_sets_add = iscsit_global->inactive_ts; | ||
351 | spin_unlock(&inactive_ts_lock); | ||
352 | if (thread_sets_add == 1) | ||
353 | iscsi_allocate_thread_sets(1); | ||
354 | } | ||
355 | |||
356 | static int iscsi_signal_thread_pre_handler(struct iscsi_thread_set *ts) | ||
357 | { | ||
358 | spin_lock_bh(&ts->ts_state_lock); | ||
359 | if (ts->status == ISCSI_THREAD_SET_DIE || kthread_should_stop() || | ||
360 | signal_pending(current)) { | ||
361 | spin_unlock_bh(&ts->ts_state_lock); | ||
362 | return -1; | ||
363 | } | ||
364 | spin_unlock_bh(&ts->ts_state_lock); | ||
365 | |||
366 | return 0; | ||
367 | } | ||
368 | |||
369 | struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *ts) | ||
370 | { | ||
371 | int ret; | ||
372 | |||
373 | spin_lock_bh(&ts->ts_state_lock); | ||
374 | if (ts->create_threads) { | ||
375 | spin_unlock_bh(&ts->ts_state_lock); | ||
376 | goto sleep; | ||
377 | } | ||
378 | |||
379 | if (ts->status != ISCSI_THREAD_SET_DIE) | ||
380 | flush_signals(current); | ||
381 | |||
382 | if (ts->delay_inactive && (--ts->thread_count == 0)) { | ||
383 | spin_unlock_bh(&ts->ts_state_lock); | ||
384 | |||
385 | if (!iscsit_global->in_shutdown) | ||
386 | iscsi_deallocate_extra_thread_sets(); | ||
387 | |||
388 | iscsi_add_ts_to_inactive_list(ts); | ||
389 | spin_lock_bh(&ts->ts_state_lock); | ||
390 | } | ||
391 | |||
392 | if ((ts->status == ISCSI_THREAD_SET_RESET) && | ||
393 | (ts->thread_clear & ISCSI_CLEAR_RX_THREAD)) | ||
394 | complete(&ts->rx_restart_comp); | ||
395 | |||
396 | ts->thread_clear &= ~ISCSI_CLEAR_RX_THREAD; | ||
397 | spin_unlock_bh(&ts->ts_state_lock); | ||
398 | sleep: | ||
399 | ret = wait_for_completion_interruptible(&ts->rx_start_comp); | ||
400 | if (ret != 0) | ||
401 | return NULL; | ||
402 | |||
403 | if (iscsi_signal_thread_pre_handler(ts) < 0) | ||
404 | return NULL; | ||
405 | |||
406 | iscsi_check_to_add_additional_sets(); | ||
407 | |||
408 | spin_lock_bh(&ts->ts_state_lock); | ||
409 | if (!ts->conn) { | ||
410 | pr_err("struct iscsi_thread_set->conn is NULL for" | ||
411 | " RX thread_id: %s/%d\n", current->comm, current->pid); | ||
412 | spin_unlock_bh(&ts->ts_state_lock); | ||
413 | return NULL; | ||
414 | } | ||
415 | ts->thread_clear |= ISCSI_CLEAR_RX_THREAD; | ||
416 | spin_unlock_bh(&ts->ts_state_lock); | ||
417 | |||
418 | up(&ts->ts_activate_sem); | ||
419 | |||
420 | return ts->conn; | ||
421 | } | ||
422 | |||
423 | struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *ts) | ||
424 | { | ||
425 | int ret; | ||
426 | |||
427 | spin_lock_bh(&ts->ts_state_lock); | ||
428 | if (ts->create_threads) { | ||
429 | spin_unlock_bh(&ts->ts_state_lock); | ||
430 | goto sleep; | ||
431 | } | ||
432 | |||
433 | if (ts->status != ISCSI_THREAD_SET_DIE) | ||
434 | flush_signals(current); | ||
435 | |||
436 | if (ts->delay_inactive && (--ts->thread_count == 0)) { | ||
437 | spin_unlock_bh(&ts->ts_state_lock); | ||
438 | |||
439 | if (!iscsit_global->in_shutdown) | ||
440 | iscsi_deallocate_extra_thread_sets(); | ||
441 | |||
442 | iscsi_add_ts_to_inactive_list(ts); | ||
443 | spin_lock_bh(&ts->ts_state_lock); | ||
444 | } | ||
445 | if ((ts->status == ISCSI_THREAD_SET_RESET) && | ||
446 | (ts->thread_clear & ISCSI_CLEAR_TX_THREAD)) | ||
447 | complete(&ts->tx_restart_comp); | ||
448 | |||
449 | ts->thread_clear &= ~ISCSI_CLEAR_TX_THREAD; | ||
450 | spin_unlock_bh(&ts->ts_state_lock); | ||
451 | sleep: | ||
452 | ret = wait_for_completion_interruptible(&ts->tx_start_comp); | ||
453 | if (ret != 0) | ||
454 | return NULL; | ||
455 | |||
456 | if (iscsi_signal_thread_pre_handler(ts) < 0) | ||
457 | return NULL; | ||
458 | |||
459 | iscsi_check_to_add_additional_sets(); | ||
460 | |||
461 | spin_lock_bh(&ts->ts_state_lock); | ||
462 | if (!ts->conn) { | ||
463 | pr_err("struct iscsi_thread_set->conn is NULL for" | ||
464 | " TX thread_id: %s/%d\n", current->comm, current->pid); | ||
465 | spin_unlock_bh(&ts->ts_state_lock); | ||
466 | return NULL; | ||
467 | } | ||
468 | ts->thread_clear |= ISCSI_CLEAR_TX_THREAD; | ||
469 | spin_unlock_bh(&ts->ts_state_lock); | ||
470 | |||
471 | up(&ts->ts_activate_sem); | ||
472 | |||
473 | return ts->conn; | ||
474 | } | ||
475 | |||
476 | int iscsi_thread_set_init(void) | ||
477 | { | ||
478 | int size; | ||
479 | |||
480 | iscsit_global->ts_bitmap_count = ISCSI_TS_BITMAP_BITS; | ||
481 | |||
482 | size = BITS_TO_LONGS(iscsit_global->ts_bitmap_count) * sizeof(long); | ||
483 | iscsit_global->ts_bitmap = kzalloc(size, GFP_KERNEL); | ||
484 | if (!iscsit_global->ts_bitmap) { | ||
485 | pr_err("Unable to allocate iscsit_global->ts_bitmap\n"); | ||
486 | return -ENOMEM; | ||
487 | } | ||
488 | |||
489 | return 0; | ||
490 | } | ||
491 | |||
492 | void iscsi_thread_set_free(void) | ||
493 | { | ||
494 | kfree(iscsit_global->ts_bitmap); | ||
495 | } | ||
diff --git a/drivers/target/iscsi/iscsi_target_tq.h b/drivers/target/iscsi/iscsi_target_tq.h deleted file mode 100644 index cc1eede5ab3a..000000000000 --- a/drivers/target/iscsi/iscsi_target_tq.h +++ /dev/null | |||
@@ -1,84 +0,0 @@ | |||
1 | #ifndef ISCSI_THREAD_QUEUE_H | ||
2 | #define ISCSI_THREAD_QUEUE_H | ||
3 | |||
4 | /* | ||
5 | * Defines for thread sets. | ||
6 | */ | ||
7 | extern int iscsi_thread_set_force_reinstatement(struct iscsi_conn *); | ||
8 | extern int iscsi_allocate_thread_sets(u32); | ||
9 | extern void iscsi_deallocate_thread_sets(void); | ||
10 | extern void iscsi_activate_thread_set(struct iscsi_conn *, struct iscsi_thread_set *); | ||
11 | extern struct iscsi_thread_set *iscsi_get_thread_set(void); | ||
12 | extern void iscsi_set_thread_clear(struct iscsi_conn *, u8); | ||
13 | extern void iscsi_set_thread_set_signal(struct iscsi_conn *, u8); | ||
14 | extern int iscsi_release_thread_set(struct iscsi_conn *); | ||
15 | extern struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *); | ||
16 | extern struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *); | ||
17 | extern int iscsi_thread_set_init(void); | ||
18 | extern void iscsi_thread_set_free(void); | ||
19 | |||
20 | extern int iscsi_target_tx_thread(void *); | ||
21 | extern int iscsi_target_rx_thread(void *); | ||
22 | |||
23 | #define TARGET_THREAD_SET_COUNT 4 | ||
24 | |||
25 | #define ISCSI_RX_THREAD 1 | ||
26 | #define ISCSI_TX_THREAD 2 | ||
27 | #define ISCSI_RX_THREAD_NAME "iscsi_trx" | ||
28 | #define ISCSI_TX_THREAD_NAME "iscsi_ttx" | ||
29 | #define ISCSI_BLOCK_RX_THREAD 0x1 | ||
30 | #define ISCSI_BLOCK_TX_THREAD 0x2 | ||
31 | #define ISCSI_CLEAR_RX_THREAD 0x1 | ||
32 | #define ISCSI_CLEAR_TX_THREAD 0x2 | ||
33 | #define ISCSI_SIGNAL_RX_THREAD 0x1 | ||
34 | #define ISCSI_SIGNAL_TX_THREAD 0x2 | ||
35 | |||
36 | /* struct iscsi_thread_set->status */ | ||
37 | #define ISCSI_THREAD_SET_FREE 1 | ||
38 | #define ISCSI_THREAD_SET_ACTIVE 2 | ||
39 | #define ISCSI_THREAD_SET_DIE 3 | ||
40 | #define ISCSI_THREAD_SET_RESET 4 | ||
41 | #define ISCSI_THREAD_SET_DEALLOCATE_THREADS 5 | ||
42 | |||
43 | /* By default allow a maximum of 32K iSCSI connections */ | ||
44 | #define ISCSI_TS_BITMAP_BITS 32768 | ||
45 | |||
46 | struct iscsi_thread_set { | ||
47 | /* flags used for blocking and restarting sets */ | ||
48 | int blocked_threads; | ||
49 | /* flag for creating threads */ | ||
50 | int create_threads; | ||
51 | /* flag for delaying readding to inactive list */ | ||
52 | int delay_inactive; | ||
53 | /* status for thread set */ | ||
54 | int status; | ||
55 | /* which threads have had signals sent */ | ||
56 | int signal_sent; | ||
57 | /* flag for which threads exited first */ | ||
58 | int thread_clear; | ||
59 | /* Active threads in the thread set */ | ||
60 | int thread_count; | ||
61 | /* Unique thread ID */ | ||
62 | u32 thread_id; | ||
63 | /* pointer to connection if set is active */ | ||
64 | struct iscsi_conn *conn; | ||
65 | /* used for controlling ts state accesses */ | ||
66 | spinlock_t ts_state_lock; | ||
67 | /* used for restarting thread queue */ | ||
68 | struct completion rx_restart_comp; | ||
69 | /* used for restarting thread queue */ | ||
70 | struct completion tx_restart_comp; | ||
71 | /* used for normal unused blocking */ | ||
72 | struct completion rx_start_comp; | ||
73 | /* used for normal unused blocking */ | ||
74 | struct completion tx_start_comp; | ||
75 | /* OS descriptor for rx thread */ | ||
76 | struct task_struct *rx_thread; | ||
77 | /* OS descriptor for tx thread */ | ||
78 | struct task_struct *tx_thread; | ||
79 | /* struct iscsi_thread_set in list list head*/ | ||
80 | struct list_head ts_list; | ||
81 | struct semaphore ts_activate_sem; | ||
82 | }; | ||
83 | |||
84 | #endif /*** ISCSI_THREAD_QUEUE_H ***/ | ||
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index 390df8ed72b2..b18edda3e8af 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c | |||
@@ -33,7 +33,6 @@ | |||
33 | #include "iscsi_target_erl1.h" | 33 | #include "iscsi_target_erl1.h" |
34 | #include "iscsi_target_erl2.h" | 34 | #include "iscsi_target_erl2.h" |
35 | #include "iscsi_target_tpg.h" | 35 | #include "iscsi_target_tpg.h" |
36 | #include "iscsi_target_tq.h" | ||
37 | #include "iscsi_target_util.h" | 36 | #include "iscsi_target_util.h" |
38 | #include "iscsi_target.h" | 37 | #include "iscsi_target.h" |
39 | 38 | ||
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index c36bd7c29136..51f0c895c6a5 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c | |||
@@ -41,8 +41,7 @@ | |||
41 | 41 | ||
42 | #define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev) | 42 | #define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev) |
43 | 43 | ||
44 | /* Local pointer to allocated TCM configfs fabric module */ | 44 | static const struct target_core_fabric_ops loop_ops; |
45 | static struct target_fabric_configfs *tcm_loop_fabric_configfs; | ||
46 | 45 | ||
47 | static struct workqueue_struct *tcm_loop_workqueue; | 46 | static struct workqueue_struct *tcm_loop_workqueue; |
48 | static struct kmem_cache *tcm_loop_cmd_cache; | 47 | static struct kmem_cache *tcm_loop_cmd_cache; |
@@ -108,7 +107,7 @@ static struct device_driver tcm_loop_driverfs = { | |||
108 | /* | 107 | /* |
109 | * Used with root_device_register() in tcm_loop_alloc_core_bus() below | 108 | * Used with root_device_register() in tcm_loop_alloc_core_bus() below |
110 | */ | 109 | */ |
111 | struct device *tcm_loop_primary; | 110 | static struct device *tcm_loop_primary; |
112 | 111 | ||
113 | static void tcm_loop_submission_work(struct work_struct *work) | 112 | static void tcm_loop_submission_work(struct work_struct *work) |
114 | { | 113 | { |
@@ -697,6 +696,13 @@ static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg | |||
697 | return 0; | 696 | return 0; |
698 | } | 697 | } |
699 | 698 | ||
699 | static int tcm_loop_check_prot_fabric_only(struct se_portal_group *se_tpg) | ||
700 | { | ||
701 | struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, | ||
702 | tl_se_tpg); | ||
703 | return tl_tpg->tl_fabric_prot_type; | ||
704 | } | ||
705 | |||
700 | static struct se_node_acl *tcm_loop_tpg_alloc_fabric_acl( | 706 | static struct se_node_acl *tcm_loop_tpg_alloc_fabric_acl( |
701 | struct se_portal_group *se_tpg) | 707 | struct se_portal_group *se_tpg) |
702 | { | 708 | { |
@@ -912,6 +918,46 @@ static void tcm_loop_port_unlink( | |||
912 | 918 | ||
913 | /* End items for tcm_loop_port_cit */ | 919 | /* End items for tcm_loop_port_cit */ |
914 | 920 | ||
921 | static ssize_t tcm_loop_tpg_attrib_show_fabric_prot_type( | ||
922 | struct se_portal_group *se_tpg, | ||
923 | char *page) | ||
924 | { | ||
925 | struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, | ||
926 | tl_se_tpg); | ||
927 | |||
928 | return sprintf(page, "%d\n", tl_tpg->tl_fabric_prot_type); | ||
929 | } | ||
930 | |||
931 | static ssize_t tcm_loop_tpg_attrib_store_fabric_prot_type( | ||
932 | struct se_portal_group *se_tpg, | ||
933 | const char *page, | ||
934 | size_t count) | ||
935 | { | ||
936 | struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, | ||
937 | tl_se_tpg); | ||
938 | unsigned long val; | ||
939 | int ret = kstrtoul(page, 0, &val); | ||
940 | |||
941 | if (ret) { | ||
942 | pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret); | ||
943 | return ret; | ||
944 | } | ||
945 | if (val != 0 && val != 1 && val != 3) { | ||
946 | pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val); | ||
947 | return -EINVAL; | ||
948 | } | ||
949 | tl_tpg->tl_fabric_prot_type = val; | ||
950 | |||
951 | return count; | ||
952 | } | ||
953 | |||
954 | TF_TPG_ATTRIB_ATTR(tcm_loop, fabric_prot_type, S_IRUGO | S_IWUSR); | ||
955 | |||
956 | static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = { | ||
957 | &tcm_loop_tpg_attrib_fabric_prot_type.attr, | ||
958 | NULL, | ||
959 | }; | ||
960 | |||
915 | /* Start items for tcm_loop_nexus_cit */ | 961 | /* Start items for tcm_loop_nexus_cit */ |
916 | 962 | ||
917 | static int tcm_loop_make_nexus( | 963 | static int tcm_loop_make_nexus( |
@@ -937,7 +983,8 @@ static int tcm_loop_make_nexus( | |||
937 | /* | 983 | /* |
938 | * Initialize the struct se_session pointer | 984 | * Initialize the struct se_session pointer |
939 | */ | 985 | */ |
940 | tl_nexus->se_sess = transport_init_session(TARGET_PROT_ALL); | 986 | tl_nexus->se_sess = transport_init_session( |
987 | TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS); | ||
941 | if (IS_ERR(tl_nexus->se_sess)) { | 988 | if (IS_ERR(tl_nexus->se_sess)) { |
942 | ret = PTR_ERR(tl_nexus->se_sess); | 989 | ret = PTR_ERR(tl_nexus->se_sess); |
943 | goto out; | 990 | goto out; |
@@ -1165,21 +1212,19 @@ static struct se_portal_group *tcm_loop_make_naa_tpg( | |||
1165 | struct tcm_loop_hba *tl_hba = container_of(wwn, | 1212 | struct tcm_loop_hba *tl_hba = container_of(wwn, |
1166 | struct tcm_loop_hba, tl_hba_wwn); | 1213 | struct tcm_loop_hba, tl_hba_wwn); |
1167 | struct tcm_loop_tpg *tl_tpg; | 1214 | struct tcm_loop_tpg *tl_tpg; |
1168 | char *tpgt_str, *end_ptr; | ||
1169 | int ret; | 1215 | int ret; |
1170 | unsigned short int tpgt; | 1216 | unsigned long tpgt; |
1171 | 1217 | ||
1172 | tpgt_str = strstr(name, "tpgt_"); | 1218 | if (strstr(name, "tpgt_") != name) { |
1173 | if (!tpgt_str) { | ||
1174 | pr_err("Unable to locate \"tpgt_#\" directory" | 1219 | pr_err("Unable to locate \"tpgt_#\" directory" |
1175 | " group\n"); | 1220 | " group\n"); |
1176 | return ERR_PTR(-EINVAL); | 1221 | return ERR_PTR(-EINVAL); |
1177 | } | 1222 | } |
1178 | tpgt_str += 5; /* Skip ahead of "tpgt_" */ | 1223 | if (kstrtoul(name+5, 10, &tpgt)) |
1179 | tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0); | 1224 | return ERR_PTR(-EINVAL); |
1180 | 1225 | ||
1181 | if (tpgt >= TL_TPGS_PER_HBA) { | 1226 | if (tpgt >= TL_TPGS_PER_HBA) { |
1182 | pr_err("Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:" | 1227 | pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA:" |
1183 | " %u\n", tpgt, TL_TPGS_PER_HBA); | 1228 | " %u\n", tpgt, TL_TPGS_PER_HBA); |
1184 | return ERR_PTR(-EINVAL); | 1229 | return ERR_PTR(-EINVAL); |
1185 | } | 1230 | } |
@@ -1189,14 +1234,13 @@ static struct se_portal_group *tcm_loop_make_naa_tpg( | |||
1189 | /* | 1234 | /* |
1190 | * Register the tl_tpg as a emulated SAS TCM Target Endpoint | 1235 | * Register the tl_tpg as a emulated SAS TCM Target Endpoint |
1191 | */ | 1236 | */ |
1192 | ret = core_tpg_register(&tcm_loop_fabric_configfs->tf_ops, | 1237 | ret = core_tpg_register(&loop_ops, wwn, &tl_tpg->tl_se_tpg, tl_tpg, |
1193 | wwn, &tl_tpg->tl_se_tpg, tl_tpg, | ||
1194 | TRANSPORT_TPG_TYPE_NORMAL); | 1238 | TRANSPORT_TPG_TYPE_NORMAL); |
1195 | if (ret < 0) | 1239 | if (ret < 0) |
1196 | return ERR_PTR(-ENOMEM); | 1240 | return ERR_PTR(-ENOMEM); |
1197 | 1241 | ||
1198 | pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s" | 1242 | pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s" |
1199 | " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba), | 1243 | " Target Port %s,t,0x%04lx\n", tcm_loop_dump_proto_id(tl_hba), |
1200 | config_item_name(&wwn->wwn_group.cg_item), tpgt); | 1244 | config_item_name(&wwn->wwn_group.cg_item), tpgt); |
1201 | 1245 | ||
1202 | return &tl_tpg->tl_se_tpg; | 1246 | return &tl_tpg->tl_se_tpg; |
@@ -1338,127 +1382,51 @@ static struct configfs_attribute *tcm_loop_wwn_attrs[] = { | |||
1338 | 1382 | ||
1339 | /* End items for tcm_loop_cit */ | 1383 | /* End items for tcm_loop_cit */ |
1340 | 1384 | ||
1341 | static int tcm_loop_register_configfs(void) | 1385 | static const struct target_core_fabric_ops loop_ops = { |
1342 | { | 1386 | .module = THIS_MODULE, |
1343 | struct target_fabric_configfs *fabric; | 1387 | .name = "loopback", |
1344 | int ret; | 1388 | .get_fabric_name = tcm_loop_get_fabric_name, |
1345 | /* | 1389 | .get_fabric_proto_ident = tcm_loop_get_fabric_proto_ident, |
1346 | * Set the TCM Loop HBA counter to zero | 1390 | .tpg_get_wwn = tcm_loop_get_endpoint_wwn, |
1347 | */ | 1391 | .tpg_get_tag = tcm_loop_get_tag, |
1348 | tcm_loop_hba_no_cnt = 0; | 1392 | .tpg_get_default_depth = tcm_loop_get_default_depth, |
1349 | /* | 1393 | .tpg_get_pr_transport_id = tcm_loop_get_pr_transport_id, |
1350 | * Register the top level struct config_item_type with TCM core | 1394 | .tpg_get_pr_transport_id_len = tcm_loop_get_pr_transport_id_len, |
1351 | */ | 1395 | .tpg_parse_pr_out_transport_id = tcm_loop_parse_pr_out_transport_id, |
1352 | fabric = target_fabric_configfs_init(THIS_MODULE, "loopback"); | 1396 | .tpg_check_demo_mode = tcm_loop_check_demo_mode, |
1353 | if (IS_ERR(fabric)) { | 1397 | .tpg_check_demo_mode_cache = tcm_loop_check_demo_mode_cache, |
1354 | pr_err("tcm_loop_register_configfs() failed!\n"); | 1398 | .tpg_check_demo_mode_write_protect = |
1355 | return PTR_ERR(fabric); | 1399 | tcm_loop_check_demo_mode_write_protect, |
1356 | } | 1400 | .tpg_check_prod_mode_write_protect = |
1357 | /* | 1401 | tcm_loop_check_prod_mode_write_protect, |
1358 | * Setup the fabric API of function pointers used by target_core_mod | 1402 | .tpg_check_prot_fabric_only = tcm_loop_check_prot_fabric_only, |
1359 | */ | 1403 | .tpg_alloc_fabric_acl = tcm_loop_tpg_alloc_fabric_acl, |
1360 | fabric->tf_ops.get_fabric_name = &tcm_loop_get_fabric_name; | 1404 | .tpg_release_fabric_acl = tcm_loop_tpg_release_fabric_acl, |
1361 | fabric->tf_ops.get_fabric_proto_ident = &tcm_loop_get_fabric_proto_ident; | 1405 | .tpg_get_inst_index = tcm_loop_get_inst_index, |
1362 | fabric->tf_ops.tpg_get_wwn = &tcm_loop_get_endpoint_wwn; | 1406 | .check_stop_free = tcm_loop_check_stop_free, |
1363 | fabric->tf_ops.tpg_get_tag = &tcm_loop_get_tag; | 1407 | .release_cmd = tcm_loop_release_cmd, |
1364 | fabric->tf_ops.tpg_get_default_depth = &tcm_loop_get_default_depth; | 1408 | .shutdown_session = tcm_loop_shutdown_session, |
1365 | fabric->tf_ops.tpg_get_pr_transport_id = &tcm_loop_get_pr_transport_id; | 1409 | .close_session = tcm_loop_close_session, |
1366 | fabric->tf_ops.tpg_get_pr_transport_id_len = | 1410 | .sess_get_index = tcm_loop_sess_get_index, |
1367 | &tcm_loop_get_pr_transport_id_len; | 1411 | .write_pending = tcm_loop_write_pending, |
1368 | fabric->tf_ops.tpg_parse_pr_out_transport_id = | 1412 | .write_pending_status = tcm_loop_write_pending_status, |
1369 | &tcm_loop_parse_pr_out_transport_id; | 1413 | .set_default_node_attributes = tcm_loop_set_default_node_attributes, |
1370 | fabric->tf_ops.tpg_check_demo_mode = &tcm_loop_check_demo_mode; | 1414 | .get_task_tag = tcm_loop_get_task_tag, |
1371 | fabric->tf_ops.tpg_check_demo_mode_cache = | 1415 | .get_cmd_state = tcm_loop_get_cmd_state, |
1372 | &tcm_loop_check_demo_mode_cache; | 1416 | .queue_data_in = tcm_loop_queue_data_in, |
1373 | fabric->tf_ops.tpg_check_demo_mode_write_protect = | 1417 | .queue_status = tcm_loop_queue_status, |
1374 | &tcm_loop_check_demo_mode_write_protect; | 1418 | .queue_tm_rsp = tcm_loop_queue_tm_rsp, |
1375 | fabric->tf_ops.tpg_check_prod_mode_write_protect = | 1419 | .aborted_task = tcm_loop_aborted_task, |
1376 | &tcm_loop_check_prod_mode_write_protect; | 1420 | .fabric_make_wwn = tcm_loop_make_scsi_hba, |
1377 | /* | 1421 | .fabric_drop_wwn = tcm_loop_drop_scsi_hba, |
1378 | * The TCM loopback fabric module runs in demo-mode to a local | 1422 | .fabric_make_tpg = tcm_loop_make_naa_tpg, |
1379 | * virtual SCSI device, so fabric dependent initator ACLs are | 1423 | .fabric_drop_tpg = tcm_loop_drop_naa_tpg, |
1380 | * not required. | 1424 | .fabric_post_link = tcm_loop_port_link, |
1381 | */ | 1425 | .fabric_pre_unlink = tcm_loop_port_unlink, |
1382 | fabric->tf_ops.tpg_alloc_fabric_acl = &tcm_loop_tpg_alloc_fabric_acl; | 1426 | .tfc_wwn_attrs = tcm_loop_wwn_attrs, |
1383 | fabric->tf_ops.tpg_release_fabric_acl = | 1427 | .tfc_tpg_base_attrs = tcm_loop_tpg_attrs, |
1384 | &tcm_loop_tpg_release_fabric_acl; | 1428 | .tfc_tpg_attrib_attrs = tcm_loop_tpg_attrib_attrs, |
1385 | fabric->tf_ops.tpg_get_inst_index = &tcm_loop_get_inst_index; | 1429 | }; |
1386 | /* | ||
1387 | * Used for setting up remaining TCM resources in process context | ||
1388 | */ | ||
1389 | fabric->tf_ops.check_stop_free = &tcm_loop_check_stop_free; | ||
1390 | fabric->tf_ops.release_cmd = &tcm_loop_release_cmd; | ||
1391 | fabric->tf_ops.shutdown_session = &tcm_loop_shutdown_session; | ||
1392 | fabric->tf_ops.close_session = &tcm_loop_close_session; | ||
1393 | fabric->tf_ops.sess_get_index = &tcm_loop_sess_get_index; | ||
1394 | fabric->tf_ops.sess_get_initiator_sid = NULL; | ||
1395 | fabric->tf_ops.write_pending = &tcm_loop_write_pending; | ||
1396 | fabric->tf_ops.write_pending_status = &tcm_loop_write_pending_status; | ||
1397 | /* | ||
1398 | * Not used for TCM loopback | ||
1399 | */ | ||
1400 | fabric->tf_ops.set_default_node_attributes = | ||
1401 | &tcm_loop_set_default_node_attributes; | ||
1402 | fabric->tf_ops.get_task_tag = &tcm_loop_get_task_tag; | ||
1403 | fabric->tf_ops.get_cmd_state = &tcm_loop_get_cmd_state; | ||
1404 | fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in; | ||
1405 | fabric->tf_ops.queue_status = &tcm_loop_queue_status; | ||
1406 | fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp; | ||
1407 | fabric->tf_ops.aborted_task = &tcm_loop_aborted_task; | ||
1408 | |||
1409 | /* | ||
1410 | * Setup function pointers for generic logic in target_core_fabric_configfs.c | ||
1411 | */ | ||
1412 | fabric->tf_ops.fabric_make_wwn = &tcm_loop_make_scsi_hba; | ||
1413 | fabric->tf_ops.fabric_drop_wwn = &tcm_loop_drop_scsi_hba; | ||
1414 | fabric->tf_ops.fabric_make_tpg = &tcm_loop_make_naa_tpg; | ||
1415 | fabric->tf_ops.fabric_drop_tpg = &tcm_loop_drop_naa_tpg; | ||
1416 | /* | ||
1417 | * fabric_post_link() and fabric_pre_unlink() are used for | ||
1418 | * registration and release of TCM Loop Virtual SCSI LUNs. | ||
1419 | */ | ||
1420 | fabric->tf_ops.fabric_post_link = &tcm_loop_port_link; | ||
1421 | fabric->tf_ops.fabric_pre_unlink = &tcm_loop_port_unlink; | ||
1422 | fabric->tf_ops.fabric_make_np = NULL; | ||
1423 | fabric->tf_ops.fabric_drop_np = NULL; | ||
1424 | /* | ||
1425 | * Setup default attribute lists for various fabric->tf_cit_tmpl | ||
1426 | */ | ||
1427 | fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs; | ||
1428 | fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs; | ||
1429 | fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; | ||
1430 | fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; | ||
1431 | fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; | ||
1432 | /* | ||
1433 | * Once fabric->tf_ops has been setup, now register the fabric for | ||
1434 | * use within TCM | ||
1435 | */ | ||
1436 | ret = target_fabric_configfs_register(fabric); | ||
1437 | if (ret < 0) { | ||
1438 | pr_err("target_fabric_configfs_register() for" | ||
1439 | " TCM_Loop failed!\n"); | ||
1440 | target_fabric_configfs_free(fabric); | ||
1441 | return -1; | ||
1442 | } | ||
1443 | /* | ||
1444 | * Setup our local pointer to *fabric. | ||
1445 | */ | ||
1446 | tcm_loop_fabric_configfs = fabric; | ||
1447 | pr_debug("TCM_LOOP[0] - Set fabric ->" | ||
1448 | " tcm_loop_fabric_configfs\n"); | ||
1449 | return 0; | ||
1450 | } | ||
1451 | |||
1452 | static void tcm_loop_deregister_configfs(void) | ||
1453 | { | ||
1454 | if (!tcm_loop_fabric_configfs) | ||
1455 | return; | ||
1456 | |||
1457 | target_fabric_configfs_deregister(tcm_loop_fabric_configfs); | ||
1458 | tcm_loop_fabric_configfs = NULL; | ||
1459 | pr_debug("TCM_LOOP[0] - Cleared" | ||
1460 | " tcm_loop_fabric_configfs\n"); | ||
1461 | } | ||
1462 | 1430 | ||
1463 | static int __init tcm_loop_fabric_init(void) | 1431 | static int __init tcm_loop_fabric_init(void) |
1464 | { | 1432 | { |
@@ -1482,7 +1450,7 @@ static int __init tcm_loop_fabric_init(void) | |||
1482 | if (ret) | 1450 | if (ret) |
1483 | goto out_destroy_cache; | 1451 | goto out_destroy_cache; |
1484 | 1452 | ||
1485 | ret = tcm_loop_register_configfs(); | 1453 | ret = target_register_template(&loop_ops); |
1486 | if (ret) | 1454 | if (ret) |
1487 | goto out_release_core_bus; | 1455 | goto out_release_core_bus; |
1488 | 1456 | ||
@@ -1500,7 +1468,7 @@ out: | |||
1500 | 1468 | ||
1501 | static void __exit tcm_loop_fabric_exit(void) | 1469 | static void __exit tcm_loop_fabric_exit(void) |
1502 | { | 1470 | { |
1503 | tcm_loop_deregister_configfs(); | 1471 | target_unregister_template(&loop_ops); |
1504 | tcm_loop_release_core_bus(); | 1472 | tcm_loop_release_core_bus(); |
1505 | kmem_cache_destroy(tcm_loop_cmd_cache); | 1473 | kmem_cache_destroy(tcm_loop_cmd_cache); |
1506 | destroy_workqueue(tcm_loop_workqueue); | 1474 | destroy_workqueue(tcm_loop_workqueue); |
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h index 6ae49f272ba6..1e72ff77cac9 100644 --- a/drivers/target/loopback/tcm_loop.h +++ b/drivers/target/loopback/tcm_loop.h | |||
@@ -43,6 +43,7 @@ struct tcm_loop_nacl { | |||
43 | struct tcm_loop_tpg { | 43 | struct tcm_loop_tpg { |
44 | unsigned short tl_tpgt; | 44 | unsigned short tl_tpgt; |
45 | unsigned short tl_transport_status; | 45 | unsigned short tl_transport_status; |
46 | enum target_prot_type tl_fabric_prot_type; | ||
46 | atomic_t tl_tpg_port_count; | 47 | atomic_t tl_tpg_port_count; |
47 | struct se_portal_group tl_se_tpg; | 48 | struct se_portal_group tl_se_tpg; |
48 | struct tcm_loop_hba *tl_hba; | 49 | struct tcm_loop_hba *tl_hba; |
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c index 9512af6a8114..18b0f9703ff2 100644 --- a/drivers/target/sbp/sbp_target.c +++ b/drivers/target/sbp/sbp_target.c | |||
@@ -42,8 +42,7 @@ | |||
42 | 42 | ||
43 | #include "sbp_target.h" | 43 | #include "sbp_target.h" |
44 | 44 | ||
45 | /* Local pointer to allocated TCM configfs fabric module */ | 45 | static const struct target_core_fabric_ops sbp_ops; |
46 | static struct target_fabric_configfs *sbp_fabric_configfs; | ||
47 | 46 | ||
48 | /* FireWire address region for management and command block address handlers */ | 47 | /* FireWire address region for management and command block address handlers */ |
49 | static const struct fw_address_region sbp_register_region = { | 48 | static const struct fw_address_region sbp_register_region = { |
@@ -2215,8 +2214,7 @@ static struct se_portal_group *sbp_make_tpg( | |||
2215 | goto out_free_tpg; | 2214 | goto out_free_tpg; |
2216 | } | 2215 | } |
2217 | 2216 | ||
2218 | ret = core_tpg_register(&sbp_fabric_configfs->tf_ops, wwn, | 2217 | ret = core_tpg_register(&sbp_ops, wwn, &tpg->se_tpg, tpg, |
2219 | &tpg->se_tpg, (void *)tpg, | ||
2220 | TRANSPORT_TPG_TYPE_NORMAL); | 2218 | TRANSPORT_TPG_TYPE_NORMAL); |
2221 | if (ret < 0) | 2219 | if (ret < 0) |
2222 | goto out_unreg_mgt_agt; | 2220 | goto out_unreg_mgt_agt; |
@@ -2503,7 +2501,9 @@ static struct configfs_attribute *sbp_tpg_attrib_attrs[] = { | |||
2503 | NULL, | 2501 | NULL, |
2504 | }; | 2502 | }; |
2505 | 2503 | ||
2506 | static struct target_core_fabric_ops sbp_ops = { | 2504 | static const struct target_core_fabric_ops sbp_ops = { |
2505 | .module = THIS_MODULE, | ||
2506 | .name = "sbp", | ||
2507 | .get_fabric_name = sbp_get_fabric_name, | 2507 | .get_fabric_name = sbp_get_fabric_name, |
2508 | .get_fabric_proto_ident = sbp_get_fabric_proto_ident, | 2508 | .get_fabric_proto_ident = sbp_get_fabric_proto_ident, |
2509 | .tpg_get_wwn = sbp_get_fabric_wwn, | 2509 | .tpg_get_wwn = sbp_get_fabric_wwn, |
@@ -2544,68 +2544,20 @@ static struct target_core_fabric_ops sbp_ops = { | |||
2544 | .fabric_drop_np = NULL, | 2544 | .fabric_drop_np = NULL, |
2545 | .fabric_make_nodeacl = sbp_make_nodeacl, | 2545 | .fabric_make_nodeacl = sbp_make_nodeacl, |
2546 | .fabric_drop_nodeacl = sbp_drop_nodeacl, | 2546 | .fabric_drop_nodeacl = sbp_drop_nodeacl, |
2547 | }; | ||
2548 | |||
2549 | static int sbp_register_configfs(void) | ||
2550 | { | ||
2551 | struct target_fabric_configfs *fabric; | ||
2552 | int ret; | ||
2553 | |||
2554 | fabric = target_fabric_configfs_init(THIS_MODULE, "sbp"); | ||
2555 | if (IS_ERR(fabric)) { | ||
2556 | pr_err("target_fabric_configfs_init() failed\n"); | ||
2557 | return PTR_ERR(fabric); | ||
2558 | } | ||
2559 | |||
2560 | fabric->tf_ops = sbp_ops; | ||
2561 | |||
2562 | /* | ||
2563 | * Setup default attribute lists for various fabric->tf_cit_tmpl | ||
2564 | */ | ||
2565 | fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = sbp_wwn_attrs; | ||
2566 | fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = sbp_tpg_base_attrs; | ||
2567 | fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = sbp_tpg_attrib_attrs; | ||
2568 | fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; | ||
2569 | fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; | ||
2570 | fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; | ||
2571 | fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; | ||
2572 | fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; | ||
2573 | fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; | ||
2574 | |||
2575 | ret = target_fabric_configfs_register(fabric); | ||
2576 | if (ret < 0) { | ||
2577 | pr_err("target_fabric_configfs_register() failed for SBP\n"); | ||
2578 | return ret; | ||
2579 | } | ||
2580 | 2547 | ||
2581 | sbp_fabric_configfs = fabric; | 2548 | .tfc_wwn_attrs = sbp_wwn_attrs, |
2582 | 2549 | .tfc_tpg_base_attrs = sbp_tpg_base_attrs, | |
2583 | return 0; | 2550 | .tfc_tpg_attrib_attrs = sbp_tpg_attrib_attrs, |
2584 | }; | ||
2585 | |||
2586 | static void sbp_deregister_configfs(void) | ||
2587 | { | ||
2588 | if (!sbp_fabric_configfs) | ||
2589 | return; | ||
2590 | |||
2591 | target_fabric_configfs_deregister(sbp_fabric_configfs); | ||
2592 | sbp_fabric_configfs = NULL; | ||
2593 | }; | 2551 | }; |
2594 | 2552 | ||
2595 | static int __init sbp_init(void) | 2553 | static int __init sbp_init(void) |
2596 | { | 2554 | { |
2597 | int ret; | 2555 | return target_register_template(&sbp_ops); |
2598 | |||
2599 | ret = sbp_register_configfs(); | ||
2600 | if (ret < 0) | ||
2601 | return ret; | ||
2602 | |||
2603 | return 0; | ||
2604 | }; | 2556 | }; |
2605 | 2557 | ||
2606 | static void __exit sbp_exit(void) | 2558 | static void __exit sbp_exit(void) |
2607 | { | 2559 | { |
2608 | sbp_deregister_configfs(); | 2560 | target_unregister_template(&sbp_ops); |
2609 | }; | 2561 | }; |
2610 | 2562 | ||
2611 | MODULE_DESCRIPTION("FireWire SBP fabric driver"); | 2563 | MODULE_DESCRIPTION("FireWire SBP fabric driver"); |
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 75d89adfccc0..ddaf76a4ac2a 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c | |||
@@ -142,8 +142,8 @@ static struct config_group *target_core_register_fabric( | |||
142 | 142 | ||
143 | tf = target_core_get_fabric(name); | 143 | tf = target_core_get_fabric(name); |
144 | if (!tf) { | 144 | if (!tf) { |
145 | pr_err("target_core_register_fabric() trying autoload for %s\n", | 145 | pr_debug("target_core_register_fabric() trying autoload for %s\n", |
146 | name); | 146 | name); |
147 | 147 | ||
148 | /* | 148 | /* |
149 | * Below are some hardcoded request_module() calls to automatically | 149 | * Below are some hardcoded request_module() calls to automatically |
@@ -165,8 +165,8 @@ static struct config_group *target_core_register_fabric( | |||
165 | */ | 165 | */ |
166 | ret = request_module("iscsi_target_mod"); | 166 | ret = request_module("iscsi_target_mod"); |
167 | if (ret < 0) { | 167 | if (ret < 0) { |
168 | pr_err("request_module() failed for" | 168 | pr_debug("request_module() failed for" |
169 | " iscsi_target_mod.ko: %d\n", ret); | 169 | " iscsi_target_mod.ko: %d\n", ret); |
170 | return ERR_PTR(-EINVAL); | 170 | return ERR_PTR(-EINVAL); |
171 | } | 171 | } |
172 | } else if (!strncmp(name, "loopback", 8)) { | 172 | } else if (!strncmp(name, "loopback", 8)) { |
@@ -178,8 +178,8 @@ static struct config_group *target_core_register_fabric( | |||
178 | */ | 178 | */ |
179 | ret = request_module("tcm_loop"); | 179 | ret = request_module("tcm_loop"); |
180 | if (ret < 0) { | 180 | if (ret < 0) { |
181 | pr_err("request_module() failed for" | 181 | pr_debug("request_module() failed for" |
182 | " tcm_loop.ko: %d\n", ret); | 182 | " tcm_loop.ko: %d\n", ret); |
183 | return ERR_PTR(-EINVAL); | 183 | return ERR_PTR(-EINVAL); |
184 | } | 184 | } |
185 | } | 185 | } |
@@ -188,8 +188,8 @@ static struct config_group *target_core_register_fabric( | |||
188 | } | 188 | } |
189 | 189 | ||
190 | if (!tf) { | 190 | if (!tf) { |
191 | pr_err("target_core_get_fabric() failed for %s\n", | 191 | pr_debug("target_core_get_fabric() failed for %s\n", |
192 | name); | 192 | name); |
193 | return ERR_PTR(-EINVAL); | 193 | return ERR_PTR(-EINVAL); |
194 | } | 194 | } |
195 | pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:" | 195 | pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:" |
@@ -300,81 +300,17 @@ struct configfs_subsystem *target_core_subsystem[] = { | |||
300 | // Start functions called by external Target Fabrics Modules | 300 | // Start functions called by external Target Fabrics Modules |
301 | //############################################################################*/ | 301 | //############################################################################*/ |
302 | 302 | ||
303 | /* | 303 | static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo) |
304 | * First function called by fabric modules to: | ||
305 | * | ||
306 | * 1) Allocate a struct target_fabric_configfs and save the *fabric_cit pointer. | ||
307 | * 2) Add struct target_fabric_configfs to g_tf_list | ||
308 | * 3) Return struct target_fabric_configfs to fabric module to be passed | ||
309 | * into target_fabric_configfs_register(). | ||
310 | */ | ||
311 | struct target_fabric_configfs *target_fabric_configfs_init( | ||
312 | struct module *fabric_mod, | ||
313 | const char *name) | ||
314 | { | 304 | { |
315 | struct target_fabric_configfs *tf; | 305 | if (!tfo->name) { |
316 | 306 | pr_err("Missing tfo->name\n"); | |
317 | if (!(name)) { | 307 | return -EINVAL; |
318 | pr_err("Unable to locate passed fabric name\n"); | ||
319 | return ERR_PTR(-EINVAL); | ||
320 | } | 308 | } |
321 | if (strlen(name) >= TARGET_FABRIC_NAME_SIZE) { | 309 | if (strlen(tfo->name) >= TARGET_FABRIC_NAME_SIZE) { |
322 | pr_err("Passed name: %s exceeds TARGET_FABRIC" | 310 | pr_err("Passed name: %s exceeds TARGET_FABRIC" |
323 | "_NAME_SIZE\n", name); | 311 | "_NAME_SIZE\n", tfo->name); |
324 | return ERR_PTR(-EINVAL); | 312 | return -EINVAL; |
325 | } | 313 | } |
326 | |||
327 | tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL); | ||
328 | if (!tf) | ||
329 | return ERR_PTR(-ENOMEM); | ||
330 | |||
331 | INIT_LIST_HEAD(&tf->tf_list); | ||
332 | atomic_set(&tf->tf_access_cnt, 0); | ||
333 | /* | ||
334 | * Setup the default generic struct config_item_type's (cits) in | ||
335 | * struct target_fabric_configfs->tf_cit_tmpl | ||
336 | */ | ||
337 | tf->tf_module = fabric_mod; | ||
338 | target_fabric_setup_cits(tf); | ||
339 | |||
340 | tf->tf_subsys = target_core_subsystem[0]; | ||
341 | snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", name); | ||
342 | |||
343 | mutex_lock(&g_tf_lock); | ||
344 | list_add_tail(&tf->tf_list, &g_tf_list); | ||
345 | mutex_unlock(&g_tf_lock); | ||
346 | |||
347 | pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>" | ||
348 | ">>>>>>>>>>>>>>\n"); | ||
349 | pr_debug("Initialized struct target_fabric_configfs: %p for" | ||
350 | " %s\n", tf, tf->tf_name); | ||
351 | return tf; | ||
352 | } | ||
353 | EXPORT_SYMBOL(target_fabric_configfs_init); | ||
354 | |||
355 | /* | ||
356 | * Called by fabric plugins after FAILED target_fabric_configfs_register() call. | ||
357 | */ | ||
358 | void target_fabric_configfs_free( | ||
359 | struct target_fabric_configfs *tf) | ||
360 | { | ||
361 | mutex_lock(&g_tf_lock); | ||
362 | list_del(&tf->tf_list); | ||
363 | mutex_unlock(&g_tf_lock); | ||
364 | |||
365 | kfree(tf); | ||
366 | } | ||
367 | EXPORT_SYMBOL(target_fabric_configfs_free); | ||
368 | |||
369 | /* | ||
370 | * Perform a sanity check of the passed tf->tf_ops before completing | ||
371 | * TCM fabric module registration. | ||
372 | */ | ||
373 | static int target_fabric_tf_ops_check( | ||
374 | struct target_fabric_configfs *tf) | ||
375 | { | ||
376 | struct target_core_fabric_ops *tfo = &tf->tf_ops; | ||
377 | |||
378 | if (!tfo->get_fabric_name) { | 314 | if (!tfo->get_fabric_name) { |
379 | pr_err("Missing tfo->get_fabric_name()\n"); | 315 | pr_err("Missing tfo->get_fabric_name()\n"); |
380 | return -EINVAL; | 316 | return -EINVAL; |
@@ -508,77 +444,59 @@ static int target_fabric_tf_ops_check( | |||
508 | return 0; | 444 | return 0; |
509 | } | 445 | } |
510 | 446 | ||
511 | /* | 447 | int target_register_template(const struct target_core_fabric_ops *fo) |
512 | * Called 2nd from fabric module with returned parameter of | ||
513 | * struct target_fabric_configfs * from target_fabric_configfs_init(). | ||
514 | * | ||
515 | * Upon a successful registration, the new fabric's struct config_item is | ||
516 | * return. Also, a pointer to this struct is set in the passed | ||
517 | * struct target_fabric_configfs. | ||
518 | */ | ||
519 | int target_fabric_configfs_register( | ||
520 | struct target_fabric_configfs *tf) | ||
521 | { | 448 | { |
449 | struct target_fabric_configfs *tf; | ||
522 | int ret; | 450 | int ret; |
523 | 451 | ||
452 | ret = target_fabric_tf_ops_check(fo); | ||
453 | if (ret) | ||
454 | return ret; | ||
455 | |||
456 | tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL); | ||
524 | if (!tf) { | 457 | if (!tf) { |
525 | pr_err("Unable to locate target_fabric_configfs" | 458 | pr_err("%s: could not allocate memory!\n", __func__); |
526 | " pointer\n"); | 459 | return -ENOMEM; |
527 | return -EINVAL; | ||
528 | } | ||
529 | if (!tf->tf_subsys) { | ||
530 | pr_err("Unable to target struct config_subsystem" | ||
531 | " pointer\n"); | ||
532 | return -EINVAL; | ||
533 | } | 460 | } |
534 | ret = target_fabric_tf_ops_check(tf); | ||
535 | if (ret < 0) | ||
536 | return ret; | ||
537 | 461 | ||
538 | pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>" | 462 | INIT_LIST_HEAD(&tf->tf_list); |
539 | ">>>>>>>>>>\n"); | 463 | atomic_set(&tf->tf_access_cnt, 0); |
464 | |||
465 | /* | ||
466 | * Setup the default generic struct config_item_type's (cits) in | ||
467 | * struct target_fabric_configfs->tf_cit_tmpl | ||
468 | */ | ||
469 | tf->tf_module = fo->module; | ||
470 | tf->tf_subsys = target_core_subsystem[0]; | ||
471 | snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", fo->name); | ||
472 | |||
473 | tf->tf_ops = *fo; | ||
474 | target_fabric_setup_cits(tf); | ||
475 | |||
476 | mutex_lock(&g_tf_lock); | ||
477 | list_add_tail(&tf->tf_list, &g_tf_list); | ||
478 | mutex_unlock(&g_tf_lock); | ||
479 | |||
540 | return 0; | 480 | return 0; |
541 | } | 481 | } |
542 | EXPORT_SYMBOL(target_fabric_configfs_register); | 482 | EXPORT_SYMBOL(target_register_template); |
543 | 483 | ||
544 | void target_fabric_configfs_deregister( | 484 | void target_unregister_template(const struct target_core_fabric_ops *fo) |
545 | struct target_fabric_configfs *tf) | ||
546 | { | 485 | { |
547 | struct configfs_subsystem *su; | 486 | struct target_fabric_configfs *t; |
548 | 487 | ||
549 | if (!tf) { | ||
550 | pr_err("Unable to locate passed target_fabric_" | ||
551 | "configfs\n"); | ||
552 | return; | ||
553 | } | ||
554 | su = tf->tf_subsys; | ||
555 | if (!su) { | ||
556 | pr_err("Unable to locate passed tf->tf_subsys" | ||
557 | " pointer\n"); | ||
558 | return; | ||
559 | } | ||
560 | pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>>>" | ||
561 | ">>>>>>>>>>>>\n"); | ||
562 | mutex_lock(&g_tf_lock); | 488 | mutex_lock(&g_tf_lock); |
563 | if (atomic_read(&tf->tf_access_cnt)) { | 489 | list_for_each_entry(t, &g_tf_list, tf_list) { |
564 | mutex_unlock(&g_tf_lock); | 490 | if (!strcmp(t->tf_name, fo->name)) { |
565 | pr_err("Non zero tf->tf_access_cnt for fabric %s\n", | 491 | BUG_ON(atomic_read(&t->tf_access_cnt)); |
566 | tf->tf_name); | 492 | list_del(&t->tf_list); |
567 | BUG(); | 493 | kfree(t); |
494 | break; | ||
495 | } | ||
568 | } | 496 | } |
569 | list_del(&tf->tf_list); | ||
570 | mutex_unlock(&g_tf_lock); | 497 | mutex_unlock(&g_tf_lock); |
571 | |||
572 | pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing tf:" | ||
573 | " %s\n", tf->tf_name); | ||
574 | tf->tf_module = NULL; | ||
575 | tf->tf_subsys = NULL; | ||
576 | kfree(tf); | ||
577 | |||
578 | pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>" | ||
579 | ">>>>>\n"); | ||
580 | } | 498 | } |
581 | EXPORT_SYMBOL(target_fabric_configfs_deregister); | 499 | EXPORT_SYMBOL(target_unregister_template); |
582 | 500 | ||
583 | /*############################################################################## | 501 | /*############################################################################## |
584 | // Stop functions called by external Target Fabrics Modules | 502 | // Stop functions called by external Target Fabrics Modules |
@@ -945,7 +863,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port( | |||
945 | struct se_lun *lun; | 863 | struct se_lun *lun; |
946 | struct se_portal_group *se_tpg; | 864 | struct se_portal_group *se_tpg; |
947 | struct t10_pr_registration *pr_reg; | 865 | struct t10_pr_registration *pr_reg; |
948 | struct target_core_fabric_ops *tfo; | 866 | const struct target_core_fabric_ops *tfo; |
949 | ssize_t len = 0; | 867 | ssize_t len = 0; |
950 | 868 | ||
951 | spin_lock(&dev->dev_reservation_lock); | 869 | spin_lock(&dev->dev_reservation_lock); |
@@ -979,7 +897,7 @@ SE_DEV_PR_ATTR_RO(res_pr_holder_tg_port); | |||
979 | static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts( | 897 | static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts( |
980 | struct se_device *dev, char *page) | 898 | struct se_device *dev, char *page) |
981 | { | 899 | { |
982 | struct target_core_fabric_ops *tfo; | 900 | const struct target_core_fabric_ops *tfo; |
983 | struct t10_pr_registration *pr_reg; | 901 | struct t10_pr_registration *pr_reg; |
984 | unsigned char buf[384]; | 902 | unsigned char buf[384]; |
985 | char i_buf[PR_REG_ISID_ID_LEN]; | 903 | char i_buf[PR_REG_ISID_ID_LEN]; |
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index 0c3f90130b7d..1f7886bb16bf 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c | |||
@@ -56,6 +56,20 @@ static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) | |||
56 | pr_debug("Setup generic %s\n", __stringify(_name)); \ | 56 | pr_debug("Setup generic %s\n", __stringify(_name)); \ |
57 | } | 57 | } |
58 | 58 | ||
59 | #define TF_CIT_SETUP_DRV(_name, _item_ops, _group_ops) \ | ||
60 | static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \ | ||
61 | { \ | ||
62 | struct target_fabric_configfs_template *tfc = &tf->tf_cit_tmpl; \ | ||
63 | struct config_item_type *cit = &tfc->tfc_##_name##_cit; \ | ||
64 | struct configfs_attribute **attrs = tf->tf_ops.tfc_##_name##_attrs; \ | ||
65 | \ | ||
66 | cit->ct_item_ops = _item_ops; \ | ||
67 | cit->ct_group_ops = _group_ops; \ | ||
68 | cit->ct_attrs = attrs; \ | ||
69 | cit->ct_owner = tf->tf_module; \ | ||
70 | pr_debug("Setup generic %s\n", __stringify(_name)); \ | ||
71 | } | ||
72 | |||
59 | /* Start of tfc_tpg_mappedlun_cit */ | 73 | /* Start of tfc_tpg_mappedlun_cit */ |
60 | 74 | ||
61 | static int target_fabric_mappedlun_link( | 75 | static int target_fabric_mappedlun_link( |
@@ -278,7 +292,7 @@ static struct configfs_item_operations target_fabric_nacl_attrib_item_ops = { | |||
278 | .store_attribute = target_fabric_nacl_attrib_attr_store, | 292 | .store_attribute = target_fabric_nacl_attrib_attr_store, |
279 | }; | 293 | }; |
280 | 294 | ||
281 | TF_CIT_SETUP(tpg_nacl_attrib, &target_fabric_nacl_attrib_item_ops, NULL, NULL); | 295 | TF_CIT_SETUP_DRV(tpg_nacl_attrib, &target_fabric_nacl_attrib_item_ops, NULL); |
282 | 296 | ||
283 | /* End of tfc_tpg_nacl_attrib_cit */ | 297 | /* End of tfc_tpg_nacl_attrib_cit */ |
284 | 298 | ||
@@ -291,7 +305,7 @@ static struct configfs_item_operations target_fabric_nacl_auth_item_ops = { | |||
291 | .store_attribute = target_fabric_nacl_auth_attr_store, | 305 | .store_attribute = target_fabric_nacl_auth_attr_store, |
292 | }; | 306 | }; |
293 | 307 | ||
294 | TF_CIT_SETUP(tpg_nacl_auth, &target_fabric_nacl_auth_item_ops, NULL, NULL); | 308 | TF_CIT_SETUP_DRV(tpg_nacl_auth, &target_fabric_nacl_auth_item_ops, NULL); |
295 | 309 | ||
296 | /* End of tfc_tpg_nacl_auth_cit */ | 310 | /* End of tfc_tpg_nacl_auth_cit */ |
297 | 311 | ||
@@ -304,7 +318,7 @@ static struct configfs_item_operations target_fabric_nacl_param_item_ops = { | |||
304 | .store_attribute = target_fabric_nacl_param_attr_store, | 318 | .store_attribute = target_fabric_nacl_param_attr_store, |
305 | }; | 319 | }; |
306 | 320 | ||
307 | TF_CIT_SETUP(tpg_nacl_param, &target_fabric_nacl_param_item_ops, NULL, NULL); | 321 | TF_CIT_SETUP_DRV(tpg_nacl_param, &target_fabric_nacl_param_item_ops, NULL); |
308 | 322 | ||
309 | /* End of tfc_tpg_nacl_param_cit */ | 323 | /* End of tfc_tpg_nacl_param_cit */ |
310 | 324 | ||
@@ -461,8 +475,8 @@ static struct configfs_group_operations target_fabric_nacl_base_group_ops = { | |||
461 | .drop_item = target_fabric_drop_mappedlun, | 475 | .drop_item = target_fabric_drop_mappedlun, |
462 | }; | 476 | }; |
463 | 477 | ||
464 | TF_CIT_SETUP(tpg_nacl_base, &target_fabric_nacl_base_item_ops, | 478 | TF_CIT_SETUP_DRV(tpg_nacl_base, &target_fabric_nacl_base_item_ops, |
465 | &target_fabric_nacl_base_group_ops, NULL); | 479 | &target_fabric_nacl_base_group_ops); |
466 | 480 | ||
467 | /* End of tfc_tpg_nacl_base_cit */ | 481 | /* End of tfc_tpg_nacl_base_cit */ |
468 | 482 | ||
@@ -570,7 +584,7 @@ static struct configfs_item_operations target_fabric_np_base_item_ops = { | |||
570 | .store_attribute = target_fabric_np_base_attr_store, | 584 | .store_attribute = target_fabric_np_base_attr_store, |
571 | }; | 585 | }; |
572 | 586 | ||
573 | TF_CIT_SETUP(tpg_np_base, &target_fabric_np_base_item_ops, NULL, NULL); | 587 | TF_CIT_SETUP_DRV(tpg_np_base, &target_fabric_np_base_item_ops, NULL); |
574 | 588 | ||
575 | /* End of tfc_tpg_np_base_cit */ | 589 | /* End of tfc_tpg_np_base_cit */ |
576 | 590 | ||
@@ -966,7 +980,7 @@ static struct configfs_item_operations target_fabric_tpg_attrib_item_ops = { | |||
966 | .store_attribute = target_fabric_tpg_attrib_attr_store, | 980 | .store_attribute = target_fabric_tpg_attrib_attr_store, |
967 | }; | 981 | }; |
968 | 982 | ||
969 | TF_CIT_SETUP(tpg_attrib, &target_fabric_tpg_attrib_item_ops, NULL, NULL); | 983 | TF_CIT_SETUP_DRV(tpg_attrib, &target_fabric_tpg_attrib_item_ops, NULL); |
970 | 984 | ||
971 | /* End of tfc_tpg_attrib_cit */ | 985 | /* End of tfc_tpg_attrib_cit */ |
972 | 986 | ||
@@ -979,7 +993,7 @@ static struct configfs_item_operations target_fabric_tpg_auth_item_ops = { | |||
979 | .store_attribute = target_fabric_tpg_auth_attr_store, | 993 | .store_attribute = target_fabric_tpg_auth_attr_store, |
980 | }; | 994 | }; |
981 | 995 | ||
982 | TF_CIT_SETUP(tpg_auth, &target_fabric_tpg_auth_item_ops, NULL, NULL); | 996 | TF_CIT_SETUP_DRV(tpg_auth, &target_fabric_tpg_auth_item_ops, NULL); |
983 | 997 | ||
984 | /* End of tfc_tpg_attrib_cit */ | 998 | /* End of tfc_tpg_attrib_cit */ |
985 | 999 | ||
@@ -992,7 +1006,7 @@ static struct configfs_item_operations target_fabric_tpg_param_item_ops = { | |||
992 | .store_attribute = target_fabric_tpg_param_attr_store, | 1006 | .store_attribute = target_fabric_tpg_param_attr_store, |
993 | }; | 1007 | }; |
994 | 1008 | ||
995 | TF_CIT_SETUP(tpg_param, &target_fabric_tpg_param_item_ops, NULL, NULL); | 1009 | TF_CIT_SETUP_DRV(tpg_param, &target_fabric_tpg_param_item_ops, NULL); |
996 | 1010 | ||
997 | /* End of tfc_tpg_param_cit */ | 1011 | /* End of tfc_tpg_param_cit */ |
998 | 1012 | ||
@@ -1018,7 +1032,7 @@ static struct configfs_item_operations target_fabric_tpg_base_item_ops = { | |||
1018 | .store_attribute = target_fabric_tpg_attr_store, | 1032 | .store_attribute = target_fabric_tpg_attr_store, |
1019 | }; | 1033 | }; |
1020 | 1034 | ||
1021 | TF_CIT_SETUP(tpg_base, &target_fabric_tpg_base_item_ops, NULL, NULL); | 1035 | TF_CIT_SETUP_DRV(tpg_base, &target_fabric_tpg_base_item_ops, NULL); |
1022 | 1036 | ||
1023 | /* End of tfc_tpg_base_cit */ | 1037 | /* End of tfc_tpg_base_cit */ |
1024 | 1038 | ||
@@ -1192,7 +1206,7 @@ static struct configfs_item_operations target_fabric_wwn_item_ops = { | |||
1192 | .store_attribute = target_fabric_wwn_attr_store, | 1206 | .store_attribute = target_fabric_wwn_attr_store, |
1193 | }; | 1207 | }; |
1194 | 1208 | ||
1195 | TF_CIT_SETUP(wwn, &target_fabric_wwn_item_ops, &target_fabric_wwn_group_ops, NULL); | 1209 | TF_CIT_SETUP_DRV(wwn, &target_fabric_wwn_item_ops, &target_fabric_wwn_group_ops); |
1196 | 1210 | ||
1197 | /* End of tfc_wwn_cit */ | 1211 | /* End of tfc_wwn_cit */ |
1198 | 1212 | ||
@@ -1206,7 +1220,7 @@ static struct configfs_item_operations target_fabric_discovery_item_ops = { | |||
1206 | .store_attribute = target_fabric_discovery_attr_store, | 1220 | .store_attribute = target_fabric_discovery_attr_store, |
1207 | }; | 1221 | }; |
1208 | 1222 | ||
1209 | TF_CIT_SETUP(discovery, &target_fabric_discovery_item_ops, NULL, NULL); | 1223 | TF_CIT_SETUP_DRV(discovery, &target_fabric_discovery_item_ops, NULL); |
1210 | 1224 | ||
1211 | /* End of tfc_discovery_cit */ | 1225 | /* End of tfc_discovery_cit */ |
1212 | 1226 | ||
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 44620fb6bd45..f7e6e51aed36 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c | |||
@@ -264,40 +264,32 @@ static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot, | |||
264 | struct se_device *se_dev = cmd->se_dev; | 264 | struct se_device *se_dev = cmd->se_dev; |
265 | struct fd_dev *dev = FD_DEV(se_dev); | 265 | struct fd_dev *dev = FD_DEV(se_dev); |
266 | struct file *prot_fd = dev->fd_prot_file; | 266 | struct file *prot_fd = dev->fd_prot_file; |
267 | struct scatterlist *sg; | ||
268 | loff_t pos = (cmd->t_task_lba * se_dev->prot_length); | 267 | loff_t pos = (cmd->t_task_lba * se_dev->prot_length); |
269 | unsigned char *buf; | 268 | unsigned char *buf; |
270 | u32 prot_size, len, size; | 269 | u32 prot_size; |
271 | int rc, ret = 1, i; | 270 | int rc, ret = 1; |
272 | 271 | ||
273 | prot_size = (cmd->data_length / se_dev->dev_attrib.block_size) * | 272 | prot_size = (cmd->data_length / se_dev->dev_attrib.block_size) * |
274 | se_dev->prot_length; | 273 | se_dev->prot_length; |
275 | 274 | ||
276 | if (!is_write) { | 275 | if (!is_write) { |
277 | fd_prot->prot_buf = vzalloc(prot_size); | 276 | fd_prot->prot_buf = kzalloc(prot_size, GFP_KERNEL); |
278 | if (!fd_prot->prot_buf) { | 277 | if (!fd_prot->prot_buf) { |
279 | pr_err("Unable to allocate fd_prot->prot_buf\n"); | 278 | pr_err("Unable to allocate fd_prot->prot_buf\n"); |
280 | return -ENOMEM; | 279 | return -ENOMEM; |
281 | } | 280 | } |
282 | buf = fd_prot->prot_buf; | 281 | buf = fd_prot->prot_buf; |
283 | 282 | ||
284 | fd_prot->prot_sg_nents = cmd->t_prot_nents; | 283 | fd_prot->prot_sg_nents = 1; |
285 | fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist) * | 284 | fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist), |
286 | fd_prot->prot_sg_nents, GFP_KERNEL); | 285 | GFP_KERNEL); |
287 | if (!fd_prot->prot_sg) { | 286 | if (!fd_prot->prot_sg) { |
288 | pr_err("Unable to allocate fd_prot->prot_sg\n"); | 287 | pr_err("Unable to allocate fd_prot->prot_sg\n"); |
289 | vfree(fd_prot->prot_buf); | 288 | kfree(fd_prot->prot_buf); |
290 | return -ENOMEM; | 289 | return -ENOMEM; |
291 | } | 290 | } |
292 | size = prot_size; | 291 | sg_init_table(fd_prot->prot_sg, fd_prot->prot_sg_nents); |
293 | 292 | sg_set_buf(fd_prot->prot_sg, buf, prot_size); | |
294 | for_each_sg(fd_prot->prot_sg, sg, fd_prot->prot_sg_nents, i) { | ||
295 | |||
296 | len = min_t(u32, PAGE_SIZE, size); | ||
297 | sg_set_buf(sg, buf, len); | ||
298 | size -= len; | ||
299 | buf += len; | ||
300 | } | ||
301 | } | 293 | } |
302 | 294 | ||
303 | if (is_write) { | 295 | if (is_write) { |
@@ -318,7 +310,7 @@ static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot, | |||
318 | 310 | ||
319 | if (is_write || ret < 0) { | 311 | if (is_write || ret < 0) { |
320 | kfree(fd_prot->prot_sg); | 312 | kfree(fd_prot->prot_sg); |
321 | vfree(fd_prot->prot_buf); | 313 | kfree(fd_prot->prot_buf); |
322 | } | 314 | } |
323 | 315 | ||
324 | return ret; | 316 | return ret; |
@@ -331,36 +323,33 @@ static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl, | |||
331 | struct fd_dev *dev = FD_DEV(se_dev); | 323 | struct fd_dev *dev = FD_DEV(se_dev); |
332 | struct file *fd = dev->fd_file; | 324 | struct file *fd = dev->fd_file; |
333 | struct scatterlist *sg; | 325 | struct scatterlist *sg; |
334 | struct iovec *iov; | 326 | struct iov_iter iter; |
335 | mm_segment_t old_fs; | 327 | struct bio_vec *bvec; |
328 | ssize_t len = 0; | ||
336 | loff_t pos = (cmd->t_task_lba * se_dev->dev_attrib.block_size); | 329 | loff_t pos = (cmd->t_task_lba * se_dev->dev_attrib.block_size); |
337 | int ret = 0, i; | 330 | int ret = 0, i; |
338 | 331 | ||
339 | iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL); | 332 | bvec = kcalloc(sgl_nents, sizeof(struct bio_vec), GFP_KERNEL); |
340 | if (!iov) { | 333 | if (!bvec) { |
341 | pr_err("Unable to allocate fd_do_readv iov[]\n"); | 334 | pr_err("Unable to allocate fd_do_readv iov[]\n"); |
342 | return -ENOMEM; | 335 | return -ENOMEM; |
343 | } | 336 | } |
344 | 337 | ||
345 | for_each_sg(sgl, sg, sgl_nents, i) { | 338 | for_each_sg(sgl, sg, sgl_nents, i) { |
346 | iov[i].iov_len = sg->length; | 339 | bvec[i].bv_page = sg_page(sg); |
347 | iov[i].iov_base = kmap(sg_page(sg)) + sg->offset; | 340 | bvec[i].bv_len = sg->length; |
348 | } | 341 | bvec[i].bv_offset = sg->offset; |
349 | 342 | ||
350 | old_fs = get_fs(); | 343 | len += sg->length; |
351 | set_fs(get_ds()); | 344 | } |
352 | 345 | ||
346 | iov_iter_bvec(&iter, ITER_BVEC, bvec, sgl_nents, len); | ||
353 | if (is_write) | 347 | if (is_write) |
354 | ret = vfs_writev(fd, &iov[0], sgl_nents, &pos); | 348 | ret = vfs_iter_write(fd, &iter, &pos); |
355 | else | 349 | else |
356 | ret = vfs_readv(fd, &iov[0], sgl_nents, &pos); | 350 | ret = vfs_iter_read(fd, &iter, &pos); |
357 | |||
358 | set_fs(old_fs); | ||
359 | |||
360 | for_each_sg(sgl, sg, sgl_nents, i) | ||
361 | kunmap(sg_page(sg)); | ||
362 | 351 | ||
363 | kfree(iov); | 352 | kfree(bvec); |
364 | 353 | ||
365 | if (is_write) { | 354 | if (is_write) { |
366 | if (ret < 0 || ret != cmd->data_length) { | 355 | if (ret < 0 || ret != cmd->data_length) { |
@@ -436,59 +425,17 @@ fd_execute_sync_cache(struct se_cmd *cmd) | |||
436 | return 0; | 425 | return 0; |
437 | } | 426 | } |
438 | 427 | ||
439 | static unsigned char * | ||
440 | fd_setup_write_same_buf(struct se_cmd *cmd, struct scatterlist *sg, | ||
441 | unsigned int len) | ||
442 | { | ||
443 | struct se_device *se_dev = cmd->se_dev; | ||
444 | unsigned int block_size = se_dev->dev_attrib.block_size; | ||
445 | unsigned int i = 0, end; | ||
446 | unsigned char *buf, *p, *kmap_buf; | ||
447 | |||
448 | buf = kzalloc(min_t(unsigned int, len, PAGE_SIZE), GFP_KERNEL); | ||
449 | if (!buf) { | ||
450 | pr_err("Unable to allocate fd_execute_write_same buf\n"); | ||
451 | return NULL; | ||
452 | } | ||
453 | |||
454 | kmap_buf = kmap(sg_page(sg)) + sg->offset; | ||
455 | if (!kmap_buf) { | ||
456 | pr_err("kmap() failed in fd_setup_write_same\n"); | ||
457 | kfree(buf); | ||
458 | return NULL; | ||
459 | } | ||
460 | /* | ||
461 | * Fill local *buf to contain multiple WRITE_SAME blocks up to | ||
462 | * min(len, PAGE_SIZE) | ||
463 | */ | ||
464 | p = buf; | ||
465 | end = min_t(unsigned int, len, PAGE_SIZE); | ||
466 | |||
467 | while (i < end) { | ||
468 | memcpy(p, kmap_buf, block_size); | ||
469 | |||
470 | i += block_size; | ||
471 | p += block_size; | ||
472 | } | ||
473 | kunmap(sg_page(sg)); | ||
474 | |||
475 | return buf; | ||
476 | } | ||
477 | |||
478 | static sense_reason_t | 428 | static sense_reason_t |
479 | fd_execute_write_same(struct se_cmd *cmd) | 429 | fd_execute_write_same(struct se_cmd *cmd) |
480 | { | 430 | { |
481 | struct se_device *se_dev = cmd->se_dev; | 431 | struct se_device *se_dev = cmd->se_dev; |
482 | struct fd_dev *fd_dev = FD_DEV(se_dev); | 432 | struct fd_dev *fd_dev = FD_DEV(se_dev); |
483 | struct file *f = fd_dev->fd_file; | ||
484 | struct scatterlist *sg; | ||
485 | struct iovec *iov; | ||
486 | mm_segment_t old_fs; | ||
487 | sector_t nolb = sbc_get_write_same_sectors(cmd); | ||
488 | loff_t pos = cmd->t_task_lba * se_dev->dev_attrib.block_size; | 433 | loff_t pos = cmd->t_task_lba * se_dev->dev_attrib.block_size; |
489 | unsigned int len, len_tmp, iov_num; | 434 | sector_t nolb = sbc_get_write_same_sectors(cmd); |
490 | int i, rc; | 435 | struct iov_iter iter; |
491 | unsigned char *buf; | 436 | struct bio_vec *bvec; |
437 | unsigned int len = 0, i; | ||
438 | ssize_t ret; | ||
492 | 439 | ||
493 | if (!nolb) { | 440 | if (!nolb) { |
494 | target_complete_cmd(cmd, SAM_STAT_GOOD); | 441 | target_complete_cmd(cmd, SAM_STAT_GOOD); |
@@ -499,56 +446,92 @@ fd_execute_write_same(struct se_cmd *cmd) | |||
499 | " backends not supported\n"); | 446 | " backends not supported\n"); |
500 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 447 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
501 | } | 448 | } |
502 | sg = &cmd->t_data_sg[0]; | ||
503 | 449 | ||
504 | if (cmd->t_data_nents > 1 || | 450 | if (cmd->t_data_nents > 1 || |
505 | sg->length != cmd->se_dev->dev_attrib.block_size) { | 451 | cmd->t_data_sg[0].length != cmd->se_dev->dev_attrib.block_size) { |
506 | pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u" | 452 | pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u" |
507 | " block_size: %u\n", cmd->t_data_nents, sg->length, | 453 | " block_size: %u\n", |
454 | cmd->t_data_nents, | ||
455 | cmd->t_data_sg[0].length, | ||
508 | cmd->se_dev->dev_attrib.block_size); | 456 | cmd->se_dev->dev_attrib.block_size); |
509 | return TCM_INVALID_CDB_FIELD; | 457 | return TCM_INVALID_CDB_FIELD; |
510 | } | 458 | } |
511 | 459 | ||
512 | len = len_tmp = nolb * se_dev->dev_attrib.block_size; | 460 | bvec = kcalloc(nolb, sizeof(struct bio_vec), GFP_KERNEL); |
513 | iov_num = DIV_ROUND_UP(len, PAGE_SIZE); | 461 | if (!bvec) |
514 | |||
515 | buf = fd_setup_write_same_buf(cmd, sg, len); | ||
516 | if (!buf) | ||
517 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 462 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
518 | 463 | ||
519 | iov = vzalloc(sizeof(struct iovec) * iov_num); | 464 | for (i = 0; i < nolb; i++) { |
520 | if (!iov) { | 465 | bvec[i].bv_page = sg_page(&cmd->t_data_sg[0]); |
521 | pr_err("Unable to allocate fd_execute_write_same iovecs\n"); | 466 | bvec[i].bv_len = cmd->t_data_sg[0].length; |
522 | kfree(buf); | 467 | bvec[i].bv_offset = cmd->t_data_sg[0].offset; |
468 | |||
469 | len += se_dev->dev_attrib.block_size; | ||
470 | } | ||
471 | |||
472 | iov_iter_bvec(&iter, ITER_BVEC, bvec, nolb, len); | ||
473 | ret = vfs_iter_write(fd_dev->fd_file, &iter, &pos); | ||
474 | |||
475 | kfree(bvec); | ||
476 | if (ret < 0 || ret != len) { | ||
477 | pr_err("vfs_iter_write() returned %zd for write same\n", ret); | ||
523 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 478 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
524 | } | 479 | } |
525 | /* | 480 | |
526 | * Map the single fabric received scatterlist block now populated | 481 | target_complete_cmd(cmd, SAM_STAT_GOOD); |
527 | * in *buf into each iovec for I/O submission. | 482 | return 0; |
528 | */ | 483 | } |
529 | for (i = 0; i < iov_num; i++) { | 484 | |
530 | iov[i].iov_base = buf; | 485 | static int |
531 | iov[i].iov_len = min_t(unsigned int, len_tmp, PAGE_SIZE); | 486 | fd_do_prot_fill(struct se_device *se_dev, sector_t lba, sector_t nolb, |
532 | len_tmp -= iov[i].iov_len; | 487 | void *buf, size_t bufsize) |
488 | { | ||
489 | struct fd_dev *fd_dev = FD_DEV(se_dev); | ||
490 | struct file *prot_fd = fd_dev->fd_prot_file; | ||
491 | sector_t prot_length, prot; | ||
492 | loff_t pos = lba * se_dev->prot_length; | ||
493 | |||
494 | if (!prot_fd) { | ||
495 | pr_err("Unable to locate fd_dev->fd_prot_file\n"); | ||
496 | return -ENODEV; | ||
533 | } | 497 | } |
534 | 498 | ||
535 | old_fs = get_fs(); | 499 | prot_length = nolb * se_dev->prot_length; |
536 | set_fs(get_ds()); | ||
537 | rc = vfs_writev(f, &iov[0], iov_num, &pos); | ||
538 | set_fs(old_fs); | ||
539 | 500 | ||
540 | vfree(iov); | 501 | for (prot = 0; prot < prot_length;) { |
541 | kfree(buf); | 502 | sector_t len = min_t(sector_t, bufsize, prot_length - prot); |
503 | ssize_t ret = kernel_write(prot_fd, buf, len, pos + prot); | ||
542 | 504 | ||
543 | if (rc < 0 || rc != len) { | 505 | if (ret != len) { |
544 | pr_err("vfs_writev() returned %d for write same\n", rc); | 506 | pr_err("vfs_write to prot file failed: %zd\n", ret); |
545 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 507 | return ret < 0 ? ret : -ENODEV; |
508 | } | ||
509 | prot += ret; | ||
546 | } | 510 | } |
547 | 511 | ||
548 | target_complete_cmd(cmd, SAM_STAT_GOOD); | ||
549 | return 0; | 512 | return 0; |
550 | } | 513 | } |
551 | 514 | ||
515 | static int | ||
516 | fd_do_prot_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb) | ||
517 | { | ||
518 | void *buf; | ||
519 | int rc; | ||
520 | |||
521 | buf = (void *)__get_free_page(GFP_KERNEL); | ||
522 | if (!buf) { | ||
523 | pr_err("Unable to allocate FILEIO prot buf\n"); | ||
524 | return -ENOMEM; | ||
525 | } | ||
526 | memset(buf, 0xff, PAGE_SIZE); | ||
527 | |||
528 | rc = fd_do_prot_fill(cmd->se_dev, lba, nolb, buf, PAGE_SIZE); | ||
529 | |||
530 | free_page((unsigned long)buf); | ||
531 | |||
532 | return rc; | ||
533 | } | ||
534 | |||
552 | static sense_reason_t | 535 | static sense_reason_t |
553 | fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb) | 536 | fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb) |
554 | { | 537 | { |
@@ -556,6 +539,12 @@ fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb) | |||
556 | struct inode *inode = file->f_mapping->host; | 539 | struct inode *inode = file->f_mapping->host; |
557 | int ret; | 540 | int ret; |
558 | 541 | ||
542 | if (cmd->se_dev->dev_attrib.pi_prot_type) { | ||
543 | ret = fd_do_prot_unmap(cmd, lba, nolb); | ||
544 | if (ret) | ||
545 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
546 | } | ||
547 | |||
559 | if (S_ISBLK(inode->i_mode)) { | 548 | if (S_ISBLK(inode->i_mode)) { |
560 | /* The backend is block device, use discard */ | 549 | /* The backend is block device, use discard */ |
561 | struct block_device *bdev = inode->i_bdev; | 550 | struct block_device *bdev = inode->i_bdev; |
@@ -595,7 +584,7 @@ fd_execute_write_same_unmap(struct se_cmd *cmd) | |||
595 | struct file *file = fd_dev->fd_file; | 584 | struct file *file = fd_dev->fd_file; |
596 | sector_t lba = cmd->t_task_lba; | 585 | sector_t lba = cmd->t_task_lba; |
597 | sector_t nolb = sbc_get_write_same_sectors(cmd); | 586 | sector_t nolb = sbc_get_write_same_sectors(cmd); |
598 | int ret; | 587 | sense_reason_t ret; |
599 | 588 | ||
600 | if (!nolb) { | 589 | if (!nolb) { |
601 | target_complete_cmd(cmd, SAM_STAT_GOOD); | 590 | target_complete_cmd(cmd, SAM_STAT_GOOD); |
@@ -643,7 +632,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, | |||
643 | if (data_direction == DMA_FROM_DEVICE) { | 632 | if (data_direction == DMA_FROM_DEVICE) { |
644 | memset(&fd_prot, 0, sizeof(struct fd_prot)); | 633 | memset(&fd_prot, 0, sizeof(struct fd_prot)); |
645 | 634 | ||
646 | if (cmd->prot_type) { | 635 | if (cmd->prot_type && dev->dev_attrib.pi_prot_type) { |
647 | ret = fd_do_prot_rw(cmd, &fd_prot, false); | 636 | ret = fd_do_prot_rw(cmd, &fd_prot, false); |
648 | if (ret < 0) | 637 | if (ret < 0) |
649 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 638 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
@@ -651,23 +640,23 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, | |||
651 | 640 | ||
652 | ret = fd_do_rw(cmd, sgl, sgl_nents, 0); | 641 | ret = fd_do_rw(cmd, sgl, sgl_nents, 0); |
653 | 642 | ||
654 | if (ret > 0 && cmd->prot_type) { | 643 | if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type) { |
655 | u32 sectors = cmd->data_length / dev->dev_attrib.block_size; | 644 | u32 sectors = cmd->data_length / dev->dev_attrib.block_size; |
656 | 645 | ||
657 | rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, | 646 | rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, |
658 | 0, fd_prot.prot_sg, 0); | 647 | 0, fd_prot.prot_sg, 0); |
659 | if (rc) { | 648 | if (rc) { |
660 | kfree(fd_prot.prot_sg); | 649 | kfree(fd_prot.prot_sg); |
661 | vfree(fd_prot.prot_buf); | 650 | kfree(fd_prot.prot_buf); |
662 | return rc; | 651 | return rc; |
663 | } | 652 | } |
664 | kfree(fd_prot.prot_sg); | 653 | kfree(fd_prot.prot_sg); |
665 | vfree(fd_prot.prot_buf); | 654 | kfree(fd_prot.prot_buf); |
666 | } | 655 | } |
667 | } else { | 656 | } else { |
668 | memset(&fd_prot, 0, sizeof(struct fd_prot)); | 657 | memset(&fd_prot, 0, sizeof(struct fd_prot)); |
669 | 658 | ||
670 | if (cmd->prot_type) { | 659 | if (cmd->prot_type && dev->dev_attrib.pi_prot_type) { |
671 | u32 sectors = cmd->data_length / dev->dev_attrib.block_size; | 660 | u32 sectors = cmd->data_length / dev->dev_attrib.block_size; |
672 | 661 | ||
673 | ret = fd_do_prot_rw(cmd, &fd_prot, false); | 662 | ret = fd_do_prot_rw(cmd, &fd_prot, false); |
@@ -678,7 +667,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, | |||
678 | 0, fd_prot.prot_sg, 0); | 667 | 0, fd_prot.prot_sg, 0); |
679 | if (rc) { | 668 | if (rc) { |
680 | kfree(fd_prot.prot_sg); | 669 | kfree(fd_prot.prot_sg); |
681 | vfree(fd_prot.prot_buf); | 670 | kfree(fd_prot.prot_buf); |
682 | return rc; | 671 | return rc; |
683 | } | 672 | } |
684 | } | 673 | } |
@@ -705,7 +694,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, | |||
705 | vfs_fsync_range(fd_dev->fd_file, start, end, 1); | 694 | vfs_fsync_range(fd_dev->fd_file, start, end, 1); |
706 | } | 695 | } |
707 | 696 | ||
708 | if (ret > 0 && cmd->prot_type) { | 697 | if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type) { |
709 | ret = fd_do_prot_rw(cmd, &fd_prot, true); | 698 | ret = fd_do_prot_rw(cmd, &fd_prot, true); |
710 | if (ret < 0) | 699 | if (ret < 0) |
711 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 700 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
@@ -714,7 +703,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, | |||
714 | 703 | ||
715 | if (ret < 0) { | 704 | if (ret < 0) { |
716 | kfree(fd_prot.prot_sg); | 705 | kfree(fd_prot.prot_sg); |
717 | vfree(fd_prot.prot_buf); | 706 | kfree(fd_prot.prot_buf); |
718 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 707 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
719 | } | 708 | } |
720 | 709 | ||
@@ -878,48 +867,28 @@ static int fd_init_prot(struct se_device *dev) | |||
878 | 867 | ||
879 | static int fd_format_prot(struct se_device *dev) | 868 | static int fd_format_prot(struct se_device *dev) |
880 | { | 869 | { |
881 | struct fd_dev *fd_dev = FD_DEV(dev); | ||
882 | struct file *prot_fd = fd_dev->fd_prot_file; | ||
883 | sector_t prot_length, prot; | ||
884 | unsigned char *buf; | 870 | unsigned char *buf; |
885 | loff_t pos = 0; | ||
886 | int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size; | 871 | int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size; |
887 | int rc, ret = 0, size, len; | 872 | int ret; |
888 | 873 | ||
889 | if (!dev->dev_attrib.pi_prot_type) { | 874 | if (!dev->dev_attrib.pi_prot_type) { |
890 | pr_err("Unable to format_prot while pi_prot_type == 0\n"); | 875 | pr_err("Unable to format_prot while pi_prot_type == 0\n"); |
891 | return -ENODEV; | 876 | return -ENODEV; |
892 | } | 877 | } |
893 | if (!prot_fd) { | ||
894 | pr_err("Unable to locate fd_dev->fd_prot_file\n"); | ||
895 | return -ENODEV; | ||
896 | } | ||
897 | 878 | ||
898 | buf = vzalloc(unit_size); | 879 | buf = vzalloc(unit_size); |
899 | if (!buf) { | 880 | if (!buf) { |
900 | pr_err("Unable to allocate FILEIO prot buf\n"); | 881 | pr_err("Unable to allocate FILEIO prot buf\n"); |
901 | return -ENOMEM; | 882 | return -ENOMEM; |
902 | } | 883 | } |
903 | prot_length = (dev->transport->get_blocks(dev) + 1) * dev->prot_length; | ||
904 | size = prot_length; | ||
905 | 884 | ||
906 | pr_debug("Using FILEIO prot_length: %llu\n", | 885 | pr_debug("Using FILEIO prot_length: %llu\n", |
907 | (unsigned long long)prot_length); | 886 | (unsigned long long)(dev->transport->get_blocks(dev) + 1) * |
887 | dev->prot_length); | ||
908 | 888 | ||
909 | memset(buf, 0xff, unit_size); | 889 | memset(buf, 0xff, unit_size); |
910 | for (prot = 0; prot < prot_length; prot += unit_size) { | 890 | ret = fd_do_prot_fill(dev, 0, dev->transport->get_blocks(dev) + 1, |
911 | len = min(unit_size, size); | 891 | buf, unit_size); |
912 | rc = kernel_write(prot_fd, buf, len, pos); | ||
913 | if (rc != len) { | ||
914 | pr_err("vfs_write to prot file failed: %d\n", rc); | ||
915 | ret = -ENODEV; | ||
916 | goto out; | ||
917 | } | ||
918 | pos += len; | ||
919 | size -= len; | ||
920 | } | ||
921 | |||
922 | out: | ||
923 | vfree(buf); | 892 | vfree(buf); |
924 | return ret; | 893 | return ret; |
925 | } | 894 | } |
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index d4a4b0fb444a..1b7947c2510f 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c | |||
@@ -444,7 +444,7 @@ iblock_execute_write_same_unmap(struct se_cmd *cmd) | |||
444 | struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd; | 444 | struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd; |
445 | sector_t lba = cmd->t_task_lba; | 445 | sector_t lba = cmd->t_task_lba; |
446 | sector_t nolb = sbc_get_write_same_sectors(cmd); | 446 | sector_t nolb = sbc_get_write_same_sectors(cmd); |
447 | int ret; | 447 | sense_reason_t ret; |
448 | 448 | ||
449 | ret = iblock_do_unmap(cmd, bdev, lba, nolb); | 449 | ret = iblock_do_unmap(cmd, bdev, lba, nolb); |
450 | if (ret) | 450 | if (ret) |
@@ -774,7 +774,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, | |||
774 | sg_num--; | 774 | sg_num--; |
775 | } | 775 | } |
776 | 776 | ||
777 | if (cmd->prot_type) { | 777 | if (cmd->prot_type && dev->dev_attrib.pi_prot_type) { |
778 | int rc = iblock_alloc_bip(cmd, bio_start); | 778 | int rc = iblock_alloc_bip(cmd, bio_start); |
779 | if (rc) | 779 | if (rc) |
780 | goto fail_put_bios; | 780 | goto fail_put_bios; |
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index 60381db90026..874a9bc988d8 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h | |||
@@ -4,7 +4,13 @@ | |||
4 | /* target_core_alua.c */ | 4 | /* target_core_alua.c */ |
5 | extern struct t10_alua_lu_gp *default_lu_gp; | 5 | extern struct t10_alua_lu_gp *default_lu_gp; |
6 | 6 | ||
7 | /* target_core_configfs.c */ | ||
8 | extern struct configfs_subsystem *target_core_subsystem[]; | ||
9 | |||
7 | /* target_core_device.c */ | 10 | /* target_core_device.c */ |
11 | extern struct mutex g_device_mutex; | ||
12 | extern struct list_head g_device_list; | ||
13 | |||
8 | struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16); | 14 | struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16); |
9 | int core_free_device_list_for_node(struct se_node_acl *, | 15 | int core_free_device_list_for_node(struct se_node_acl *, |
10 | struct se_portal_group *); | 16 | struct se_portal_group *); |
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 2de6fb8cee8d..c1aa9655e96e 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c | |||
@@ -78,6 +78,22 @@ enum preempt_type { | |||
78 | static void __core_scsi3_complete_pro_release(struct se_device *, struct se_node_acl *, | 78 | static void __core_scsi3_complete_pro_release(struct se_device *, struct se_node_acl *, |
79 | struct t10_pr_registration *, int, int); | 79 | struct t10_pr_registration *, int, int); |
80 | 80 | ||
81 | static int is_reservation_holder( | ||
82 | struct t10_pr_registration *pr_res_holder, | ||
83 | struct t10_pr_registration *pr_reg) | ||
84 | { | ||
85 | int pr_res_type; | ||
86 | |||
87 | if (pr_res_holder) { | ||
88 | pr_res_type = pr_res_holder->pr_res_type; | ||
89 | |||
90 | return pr_res_holder == pr_reg || | ||
91 | pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG || | ||
92 | pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG; | ||
93 | } | ||
94 | return 0; | ||
95 | } | ||
96 | |||
81 | static sense_reason_t | 97 | static sense_reason_t |
82 | target_scsi2_reservation_check(struct se_cmd *cmd) | 98 | target_scsi2_reservation_check(struct se_cmd *cmd) |
83 | { | 99 | { |
@@ -664,7 +680,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( | |||
664 | struct se_dev_entry *deve_tmp; | 680 | struct se_dev_entry *deve_tmp; |
665 | struct se_node_acl *nacl_tmp; | 681 | struct se_node_acl *nacl_tmp; |
666 | struct se_port *port, *port_tmp; | 682 | struct se_port *port, *port_tmp; |
667 | struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; | 683 | const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; |
668 | struct t10_pr_registration *pr_reg, *pr_reg_atp, *pr_reg_tmp, *pr_reg_tmp_safe; | 684 | struct t10_pr_registration *pr_reg, *pr_reg_atp, *pr_reg_tmp, *pr_reg_tmp_safe; |
669 | int ret; | 685 | int ret; |
670 | /* | 686 | /* |
@@ -963,7 +979,7 @@ int core_scsi3_check_aptpl_registration( | |||
963 | } | 979 | } |
964 | 980 | ||
965 | static void __core_scsi3_dump_registration( | 981 | static void __core_scsi3_dump_registration( |
966 | struct target_core_fabric_ops *tfo, | 982 | const struct target_core_fabric_ops *tfo, |
967 | struct se_device *dev, | 983 | struct se_device *dev, |
968 | struct se_node_acl *nacl, | 984 | struct se_node_acl *nacl, |
969 | struct t10_pr_registration *pr_reg, | 985 | struct t10_pr_registration *pr_reg, |
@@ -1004,7 +1020,7 @@ static void __core_scsi3_add_registration( | |||
1004 | enum register_type register_type, | 1020 | enum register_type register_type, |
1005 | int register_move) | 1021 | int register_move) |
1006 | { | 1022 | { |
1007 | struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; | 1023 | const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; |
1008 | struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe; | 1024 | struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe; |
1009 | struct t10_reservation *pr_tmpl = &dev->t10_pr; | 1025 | struct t10_reservation *pr_tmpl = &dev->t10_pr; |
1010 | 1026 | ||
@@ -1220,8 +1236,10 @@ static void __core_scsi3_free_registration( | |||
1220 | struct t10_pr_registration *pr_reg, | 1236 | struct t10_pr_registration *pr_reg, |
1221 | struct list_head *preempt_and_abort_list, | 1237 | struct list_head *preempt_and_abort_list, |
1222 | int dec_holders) | 1238 | int dec_holders) |
1239 | __releases(&pr_tmpl->registration_lock) | ||
1240 | __acquires(&pr_tmpl->registration_lock) | ||
1223 | { | 1241 | { |
1224 | struct target_core_fabric_ops *tfo = | 1242 | const struct target_core_fabric_ops *tfo = |
1225 | pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo; | 1243 | pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo; |
1226 | struct t10_reservation *pr_tmpl = &dev->t10_pr; | 1244 | struct t10_reservation *pr_tmpl = &dev->t10_pr; |
1227 | char i_buf[PR_REG_ISID_ID_LEN]; | 1245 | char i_buf[PR_REG_ISID_ID_LEN]; |
@@ -1445,7 +1463,7 @@ core_scsi3_decode_spec_i_port( | |||
1445 | struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe; | 1463 | struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe; |
1446 | LIST_HEAD(tid_dest_list); | 1464 | LIST_HEAD(tid_dest_list); |
1447 | struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp; | 1465 | struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp; |
1448 | struct target_core_fabric_ops *tmp_tf_ops; | 1466 | const struct target_core_fabric_ops *tmp_tf_ops; |
1449 | unsigned char *buf; | 1467 | unsigned char *buf; |
1450 | unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident; | 1468 | unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident; |
1451 | char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN]; | 1469 | char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN]; |
@@ -2287,7 +2305,6 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key) | |||
2287 | spin_lock(&dev->dev_reservation_lock); | 2305 | spin_lock(&dev->dev_reservation_lock); |
2288 | pr_res_holder = dev->dev_pr_res_holder; | 2306 | pr_res_holder = dev->dev_pr_res_holder; |
2289 | if (pr_res_holder) { | 2307 | if (pr_res_holder) { |
2290 | int pr_res_type = pr_res_holder->pr_res_type; | ||
2291 | /* | 2308 | /* |
2292 | * From spc4r17 Section 5.7.9: Reserving: | 2309 | * From spc4r17 Section 5.7.9: Reserving: |
2293 | * | 2310 | * |
@@ -2298,9 +2315,7 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key) | |||
2298 | * the logical unit, then the command shall be completed with | 2315 | * the logical unit, then the command shall be completed with |
2299 | * RESERVATION CONFLICT status. | 2316 | * RESERVATION CONFLICT status. |
2300 | */ | 2317 | */ |
2301 | if ((pr_res_holder != pr_reg) && | 2318 | if (!is_reservation_holder(pr_res_holder, pr_reg)) { |
2302 | (pr_res_type != PR_TYPE_WRITE_EXCLUSIVE_ALLREG) && | ||
2303 | (pr_res_type != PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) { | ||
2304 | struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; | 2319 | struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; |
2305 | pr_err("SPC-3 PR: Attempted RESERVE from" | 2320 | pr_err("SPC-3 PR: Attempted RESERVE from" |
2306 | " [%s]: %s while reservation already held by" | 2321 | " [%s]: %s while reservation already held by" |
@@ -2409,7 +2424,7 @@ static void __core_scsi3_complete_pro_release( | |||
2409 | int explicit, | 2424 | int explicit, |
2410 | int unreg) | 2425 | int unreg) |
2411 | { | 2426 | { |
2412 | struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo; | 2427 | const struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo; |
2413 | char i_buf[PR_REG_ISID_ID_LEN]; | 2428 | char i_buf[PR_REG_ISID_ID_LEN]; |
2414 | int pr_res_type = 0, pr_res_scope = 0; | 2429 | int pr_res_type = 0, pr_res_scope = 0; |
2415 | 2430 | ||
@@ -2477,7 +2492,6 @@ core_scsi3_emulate_pro_release(struct se_cmd *cmd, int type, int scope, | |||
2477 | struct se_lun *se_lun = cmd->se_lun; | 2492 | struct se_lun *se_lun = cmd->se_lun; |
2478 | struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_res_holder; | 2493 | struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_res_holder; |
2479 | struct t10_reservation *pr_tmpl = &dev->t10_pr; | 2494 | struct t10_reservation *pr_tmpl = &dev->t10_pr; |
2480 | int all_reg = 0; | ||
2481 | sense_reason_t ret = 0; | 2495 | sense_reason_t ret = 0; |
2482 | 2496 | ||
2483 | if (!se_sess || !se_lun) { | 2497 | if (!se_sess || !se_lun) { |
@@ -2514,13 +2528,9 @@ core_scsi3_emulate_pro_release(struct se_cmd *cmd, int type, int scope, | |||
2514 | spin_unlock(&dev->dev_reservation_lock); | 2528 | spin_unlock(&dev->dev_reservation_lock); |
2515 | goto out_put_pr_reg; | 2529 | goto out_put_pr_reg; |
2516 | } | 2530 | } |
2517 | if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) || | ||
2518 | (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) | ||
2519 | all_reg = 1; | ||
2520 | 2531 | ||
2521 | if ((all_reg == 0) && (pr_res_holder != pr_reg)) { | 2532 | if (!is_reservation_holder(pr_res_holder, pr_reg)) { |
2522 | /* | 2533 | /* |
2523 | * Non 'All Registrants' PR Type cases.. | ||
2524 | * Release request from a registered I_T nexus that is not a | 2534 | * Release request from a registered I_T nexus that is not a |
2525 | * persistent reservation holder. return GOOD status. | 2535 | * persistent reservation holder. return GOOD status. |
2526 | */ | 2536 | */ |
@@ -2726,7 +2736,7 @@ static void __core_scsi3_complete_pro_preempt( | |||
2726 | enum preempt_type preempt_type) | 2736 | enum preempt_type preempt_type) |
2727 | { | 2737 | { |
2728 | struct se_node_acl *nacl = pr_reg->pr_reg_nacl; | 2738 | struct se_node_acl *nacl = pr_reg->pr_reg_nacl; |
2729 | struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; | 2739 | const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; |
2730 | char i_buf[PR_REG_ISID_ID_LEN]; | 2740 | char i_buf[PR_REG_ISID_ID_LEN]; |
2731 | 2741 | ||
2732 | memset(i_buf, 0, PR_REG_ISID_ID_LEN); | 2742 | memset(i_buf, 0, PR_REG_ISID_ID_LEN); |
@@ -3111,7 +3121,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key, | |||
3111 | struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL; | 3121 | struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL; |
3112 | struct se_port *se_port; | 3122 | struct se_port *se_port; |
3113 | struct se_portal_group *se_tpg, *dest_se_tpg = NULL; | 3123 | struct se_portal_group *se_tpg, *dest_se_tpg = NULL; |
3114 | struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops; | 3124 | const struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops; |
3115 | struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg; | 3125 | struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg; |
3116 | struct t10_reservation *pr_tmpl = &dev->t10_pr; | 3126 | struct t10_reservation *pr_tmpl = &dev->t10_pr; |
3117 | unsigned char *buf; | 3127 | unsigned char *buf; |
@@ -3375,7 +3385,7 @@ after_iport_check: | |||
3375 | * From spc4r17 section 5.7.8 Table 50 -- | 3385 | * From spc4r17 section 5.7.8 Table 50 -- |
3376 | * Register behaviors for a REGISTER AND MOVE service action | 3386 | * Register behaviors for a REGISTER AND MOVE service action |
3377 | */ | 3387 | */ |
3378 | if (pr_res_holder != pr_reg) { | 3388 | if (!is_reservation_holder(pr_res_holder, pr_reg)) { |
3379 | pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T" | 3389 | pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T" |
3380 | " Nexus is not reservation holder\n"); | 3390 | " Nexus is not reservation holder\n"); |
3381 | spin_unlock(&dev->dev_reservation_lock); | 3391 | spin_unlock(&dev->dev_reservation_lock); |
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index 98e83ac5661b..a263bf5fab8d 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c | |||
@@ -139,10 +139,22 @@ static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table * | |||
139 | unsigned char *p; | 139 | unsigned char *p; |
140 | 140 | ||
141 | while (total_sg_needed) { | 141 | while (total_sg_needed) { |
142 | unsigned int chain_entry = 0; | ||
143 | |||
142 | sg_per_table = (total_sg_needed > max_sg_per_table) ? | 144 | sg_per_table = (total_sg_needed > max_sg_per_table) ? |
143 | max_sg_per_table : total_sg_needed; | 145 | max_sg_per_table : total_sg_needed; |
144 | 146 | ||
145 | sg = kzalloc(sg_per_table * sizeof(struct scatterlist), | 147 | #ifdef CONFIG_ARCH_HAS_SG_CHAIN |
148 | |||
149 | /* | ||
150 | * Reserve extra element for chain entry | ||
151 | */ | ||
152 | if (sg_per_table < total_sg_needed) | ||
153 | chain_entry = 1; | ||
154 | |||
155 | #endif /* CONFIG_ARCH_HAS_SG_CHAIN */ | ||
156 | |||
157 | sg = kcalloc(sg_per_table + chain_entry, sizeof(*sg), | ||
146 | GFP_KERNEL); | 158 | GFP_KERNEL); |
147 | if (!sg) { | 159 | if (!sg) { |
148 | pr_err("Unable to allocate scatterlist array" | 160 | pr_err("Unable to allocate scatterlist array" |
@@ -150,7 +162,16 @@ static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table * | |||
150 | return -ENOMEM; | 162 | return -ENOMEM; |
151 | } | 163 | } |
152 | 164 | ||
153 | sg_init_table(sg, sg_per_table); | 165 | sg_init_table(sg, sg_per_table + chain_entry); |
166 | |||
167 | #ifdef CONFIG_ARCH_HAS_SG_CHAIN | ||
168 | |||
169 | if (i > 0) { | ||
170 | sg_chain(sg_table[i - 1].sg_table, | ||
171 | max_sg_per_table + 1, sg); | ||
172 | } | ||
173 | |||
174 | #endif /* CONFIG_ARCH_HAS_SG_CHAIN */ | ||
154 | 175 | ||
155 | sg_table[i].sg_table = sg; | 176 | sg_table[i].sg_table = sg; |
156 | sg_table[i].rd_sg_count = sg_per_table; | 177 | sg_table[i].rd_sg_count = sg_per_table; |
@@ -382,6 +403,76 @@ static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page | |||
382 | return NULL; | 403 | return NULL; |
383 | } | 404 | } |
384 | 405 | ||
406 | typedef sense_reason_t (*dif_verify)(struct se_cmd *, sector_t, unsigned int, | ||
407 | unsigned int, struct scatterlist *, int); | ||
408 | |||
409 | static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, dif_verify dif_verify) | ||
410 | { | ||
411 | struct se_device *se_dev = cmd->se_dev; | ||
412 | struct rd_dev *dev = RD_DEV(se_dev); | ||
413 | struct rd_dev_sg_table *prot_table; | ||
414 | bool need_to_release = false; | ||
415 | struct scatterlist *prot_sg; | ||
416 | u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size; | ||
417 | u32 prot_offset, prot_page; | ||
418 | u32 prot_npages __maybe_unused; | ||
419 | u64 tmp; | ||
420 | sense_reason_t rc = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
421 | |||
422 | tmp = cmd->t_task_lba * se_dev->prot_length; | ||
423 | prot_offset = do_div(tmp, PAGE_SIZE); | ||
424 | prot_page = tmp; | ||
425 | |||
426 | prot_table = rd_get_prot_table(dev, prot_page); | ||
427 | if (!prot_table) | ||
428 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
429 | |||
430 | prot_sg = &prot_table->sg_table[prot_page - | ||
431 | prot_table->page_start_offset]; | ||
432 | |||
433 | #ifndef CONFIG_ARCH_HAS_SG_CHAIN | ||
434 | |||
435 | prot_npages = DIV_ROUND_UP(prot_offset + sectors * se_dev->prot_length, | ||
436 | PAGE_SIZE); | ||
437 | |||
438 | /* | ||
439 | * Allocate temporaly contiguous scatterlist entries if prot pages | ||
440 | * straddles multiple scatterlist tables. | ||
441 | */ | ||
442 | if (prot_table->page_end_offset < prot_page + prot_npages - 1) { | ||
443 | int i; | ||
444 | |||
445 | prot_sg = kcalloc(prot_npages, sizeof(*prot_sg), GFP_KERNEL); | ||
446 | if (!prot_sg) | ||
447 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
448 | |||
449 | need_to_release = true; | ||
450 | sg_init_table(prot_sg, prot_npages); | ||
451 | |||
452 | for (i = 0; i < prot_npages; i++) { | ||
453 | if (prot_page + i > prot_table->page_end_offset) { | ||
454 | prot_table = rd_get_prot_table(dev, | ||
455 | prot_page + i); | ||
456 | if (!prot_table) { | ||
457 | kfree(prot_sg); | ||
458 | return rc; | ||
459 | } | ||
460 | sg_unmark_end(&prot_sg[i - 1]); | ||
461 | } | ||
462 | prot_sg[i] = prot_table->sg_table[prot_page + i - | ||
463 | prot_table->page_start_offset]; | ||
464 | } | ||
465 | } | ||
466 | |||
467 | #endif /* !CONFIG_ARCH_HAS_SG_CHAIN */ | ||
468 | |||
469 | rc = dif_verify(cmd, cmd->t_task_lba, sectors, 0, prot_sg, prot_offset); | ||
470 | if (need_to_release) | ||
471 | kfree(prot_sg); | ||
472 | |||
473 | return rc; | ||
474 | } | ||
475 | |||
385 | static sense_reason_t | 476 | static sense_reason_t |
386 | rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, | 477 | rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, |
387 | enum dma_data_direction data_direction) | 478 | enum dma_data_direction data_direction) |
@@ -419,24 +510,9 @@ rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, | |||
419 | data_direction == DMA_FROM_DEVICE ? "Read" : "Write", | 510 | data_direction == DMA_FROM_DEVICE ? "Read" : "Write", |
420 | cmd->t_task_lba, rd_size, rd_page, rd_offset); | 511 | cmd->t_task_lba, rd_size, rd_page, rd_offset); |
421 | 512 | ||
422 | if (cmd->prot_type && data_direction == DMA_TO_DEVICE) { | 513 | if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type && |
423 | struct rd_dev_sg_table *prot_table; | 514 | data_direction == DMA_TO_DEVICE) { |
424 | struct scatterlist *prot_sg; | 515 | rc = rd_do_prot_rw(cmd, sbc_dif_verify_write); |
425 | u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size; | ||
426 | u32 prot_offset, prot_page; | ||
427 | |||
428 | tmp = cmd->t_task_lba * se_dev->prot_length; | ||
429 | prot_offset = do_div(tmp, PAGE_SIZE); | ||
430 | prot_page = tmp; | ||
431 | |||
432 | prot_table = rd_get_prot_table(dev, prot_page); | ||
433 | if (!prot_table) | ||
434 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
435 | |||
436 | prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset]; | ||
437 | |||
438 | rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors, 0, | ||
439 | prot_sg, prot_offset); | ||
440 | if (rc) | 516 | if (rc) |
441 | return rc; | 517 | return rc; |
442 | } | 518 | } |
@@ -502,24 +578,9 @@ rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, | |||
502 | } | 578 | } |
503 | sg_miter_stop(&m); | 579 | sg_miter_stop(&m); |
504 | 580 | ||
505 | if (cmd->prot_type && data_direction == DMA_FROM_DEVICE) { | 581 | if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type && |
506 | struct rd_dev_sg_table *prot_table; | 582 | data_direction == DMA_FROM_DEVICE) { |
507 | struct scatterlist *prot_sg; | 583 | rc = rd_do_prot_rw(cmd, sbc_dif_verify_read); |
508 | u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size; | ||
509 | u32 prot_offset, prot_page; | ||
510 | |||
511 | tmp = cmd->t_task_lba * se_dev->prot_length; | ||
512 | prot_offset = do_div(tmp, PAGE_SIZE); | ||
513 | prot_page = tmp; | ||
514 | |||
515 | prot_table = rd_get_prot_table(dev, prot_page); | ||
516 | if (!prot_table) | ||
517 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
518 | |||
519 | prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset]; | ||
520 | |||
521 | rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0, | ||
522 | prot_sg, prot_offset); | ||
523 | if (rc) | 584 | if (rc) |
524 | return rc; | 585 | return rc; |
525 | } | 586 | } |
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 3e7297411110..8855781ac653 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c | |||
@@ -93,6 +93,8 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd) | |||
93 | { | 93 | { |
94 | struct se_device *dev = cmd->se_dev; | 94 | struct se_device *dev = cmd->se_dev; |
95 | struct se_session *sess = cmd->se_sess; | 95 | struct se_session *sess = cmd->se_sess; |
96 | int pi_prot_type = dev->dev_attrib.pi_prot_type; | ||
97 | |||
96 | unsigned char *rbuf; | 98 | unsigned char *rbuf; |
97 | unsigned char buf[32]; | 99 | unsigned char buf[32]; |
98 | unsigned long long blocks = dev->transport->get_blocks(dev); | 100 | unsigned long long blocks = dev->transport->get_blocks(dev); |
@@ -114,8 +116,15 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd) | |||
114 | * Set P_TYPE and PROT_EN bits for DIF support | 116 | * Set P_TYPE and PROT_EN bits for DIF support |
115 | */ | 117 | */ |
116 | if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { | 118 | if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { |
117 | if (dev->dev_attrib.pi_prot_type) | 119 | /* |
118 | buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1; | 120 | * Only override a device's pi_prot_type if no T10-PI is |
121 | * available, and sess_prot_type has been explicitly enabled. | ||
122 | */ | ||
123 | if (!pi_prot_type) | ||
124 | pi_prot_type = sess->sess_prot_type; | ||
125 | |||
126 | if (pi_prot_type) | ||
127 | buf[12] = (pi_prot_type - 1) << 1 | 0x1; | ||
119 | } | 128 | } |
120 | 129 | ||
121 | if (dev->transport->get_lbppbe) | 130 | if (dev->transport->get_lbppbe) |
@@ -312,7 +321,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o | |||
312 | return 0; | 321 | return 0; |
313 | } | 322 | } |
314 | 323 | ||
315 | static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd) | 324 | static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success) |
316 | { | 325 | { |
317 | unsigned char *buf, *addr; | 326 | unsigned char *buf, *addr; |
318 | struct scatterlist *sg; | 327 | struct scatterlist *sg; |
@@ -376,7 +385,7 @@ sbc_execute_rw(struct se_cmd *cmd) | |||
376 | cmd->data_direction); | 385 | cmd->data_direction); |
377 | } | 386 | } |
378 | 387 | ||
379 | static sense_reason_t compare_and_write_post(struct se_cmd *cmd) | 388 | static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success) |
380 | { | 389 | { |
381 | struct se_device *dev = cmd->se_dev; | 390 | struct se_device *dev = cmd->se_dev; |
382 | 391 | ||
@@ -399,7 +408,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd) | |||
399 | return TCM_NO_SENSE; | 408 | return TCM_NO_SENSE; |
400 | } | 409 | } |
401 | 410 | ||
402 | static sense_reason_t compare_and_write_callback(struct se_cmd *cmd) | 411 | static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success) |
403 | { | 412 | { |
404 | struct se_device *dev = cmd->se_dev; | 413 | struct se_device *dev = cmd->se_dev; |
405 | struct scatterlist *write_sg = NULL, *sg; | 414 | struct scatterlist *write_sg = NULL, *sg; |
@@ -414,11 +423,16 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd) | |||
414 | 423 | ||
415 | /* | 424 | /* |
416 | * Handle early failure in transport_generic_request_failure(), | 425 | * Handle early failure in transport_generic_request_failure(), |
417 | * which will not have taken ->caw_mutex yet.. | 426 | * which will not have taken ->caw_sem yet.. |
418 | */ | 427 | */ |
419 | if (!cmd->t_data_sg || !cmd->t_bidi_data_sg) | 428 | if (!success && (!cmd->t_data_sg || !cmd->t_bidi_data_sg)) |
420 | return TCM_NO_SENSE; | 429 | return TCM_NO_SENSE; |
421 | /* | 430 | /* |
431 | * Handle special case for zero-length COMPARE_AND_WRITE | ||
432 | */ | ||
433 | if (!cmd->data_length) | ||
434 | goto out; | ||
435 | /* | ||
422 | * Immediately exit + release dev->caw_sem if command has already | 436 | * Immediately exit + release dev->caw_sem if command has already |
423 | * been failed with a non-zero SCSI status. | 437 | * been failed with a non-zero SCSI status. |
424 | */ | 438 | */ |
@@ -581,12 +595,13 @@ sbc_compare_and_write(struct se_cmd *cmd) | |||
581 | } | 595 | } |
582 | 596 | ||
583 | static int | 597 | static int |
584 | sbc_set_prot_op_checks(u8 protect, enum target_prot_type prot_type, | 598 | sbc_set_prot_op_checks(u8 protect, bool fabric_prot, enum target_prot_type prot_type, |
585 | bool is_write, struct se_cmd *cmd) | 599 | bool is_write, struct se_cmd *cmd) |
586 | { | 600 | { |
587 | if (is_write) { | 601 | if (is_write) { |
588 | cmd->prot_op = protect ? TARGET_PROT_DOUT_PASS : | 602 | cmd->prot_op = fabric_prot ? TARGET_PROT_DOUT_STRIP : |
589 | TARGET_PROT_DOUT_INSERT; | 603 | protect ? TARGET_PROT_DOUT_PASS : |
604 | TARGET_PROT_DOUT_INSERT; | ||
590 | switch (protect) { | 605 | switch (protect) { |
591 | case 0x0: | 606 | case 0x0: |
592 | case 0x3: | 607 | case 0x3: |
@@ -610,8 +625,9 @@ sbc_set_prot_op_checks(u8 protect, enum target_prot_type prot_type, | |||
610 | return -EINVAL; | 625 | return -EINVAL; |
611 | } | 626 | } |
612 | } else { | 627 | } else { |
613 | cmd->prot_op = protect ? TARGET_PROT_DIN_PASS : | 628 | cmd->prot_op = fabric_prot ? TARGET_PROT_DIN_INSERT : |
614 | TARGET_PROT_DIN_STRIP; | 629 | protect ? TARGET_PROT_DIN_PASS : |
630 | TARGET_PROT_DIN_STRIP; | ||
615 | switch (protect) { | 631 | switch (protect) { |
616 | case 0x0: | 632 | case 0x0: |
617 | case 0x1: | 633 | case 0x1: |
@@ -644,11 +660,15 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, | |||
644 | u32 sectors, bool is_write) | 660 | u32 sectors, bool is_write) |
645 | { | 661 | { |
646 | u8 protect = cdb[1] >> 5; | 662 | u8 protect = cdb[1] >> 5; |
663 | int sp_ops = cmd->se_sess->sup_prot_ops; | ||
664 | int pi_prot_type = dev->dev_attrib.pi_prot_type; | ||
665 | bool fabric_prot = false; | ||
647 | 666 | ||
648 | if (!cmd->t_prot_sg || !cmd->t_prot_nents) { | 667 | if (!cmd->t_prot_sg || !cmd->t_prot_nents) { |
649 | if (protect && !dev->dev_attrib.pi_prot_type) { | 668 | if (unlikely(protect && |
650 | pr_err("CDB contains protect bit, but device does not" | 669 | !dev->dev_attrib.pi_prot_type && !cmd->se_sess->sess_prot_type)) { |
651 | " advertise PROTECT=1 feature bit\n"); | 670 | pr_err("CDB contains protect bit, but device + fabric does" |
671 | " not advertise PROTECT=1 feature bit\n"); | ||
652 | return TCM_INVALID_CDB_FIELD; | 672 | return TCM_INVALID_CDB_FIELD; |
653 | } | 673 | } |
654 | if (cmd->prot_pto) | 674 | if (cmd->prot_pto) |
@@ -669,15 +689,32 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, | |||
669 | cmd->reftag_seed = cmd->t_task_lba; | 689 | cmd->reftag_seed = cmd->t_task_lba; |
670 | break; | 690 | break; |
671 | case TARGET_DIF_TYPE0_PROT: | 691 | case TARGET_DIF_TYPE0_PROT: |
692 | /* | ||
693 | * See if the fabric supports T10-PI, and the session has been | ||
694 | * configured to allow export PROTECT=1 feature bit with backend | ||
695 | * devices that don't support T10-PI. | ||
696 | */ | ||
697 | fabric_prot = is_write ? | ||
698 | !!(sp_ops & (TARGET_PROT_DOUT_PASS | TARGET_PROT_DOUT_STRIP)) : | ||
699 | !!(sp_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DIN_INSERT)); | ||
700 | |||
701 | if (fabric_prot && cmd->se_sess->sess_prot_type) { | ||
702 | pi_prot_type = cmd->se_sess->sess_prot_type; | ||
703 | break; | ||
704 | } | ||
705 | if (!protect) | ||
706 | return TCM_NO_SENSE; | ||
707 | /* Fallthrough */ | ||
672 | default: | 708 | default: |
673 | return TCM_NO_SENSE; | 709 | pr_err("Unable to determine pi_prot_type for CDB: 0x%02x " |
710 | "PROTECT: 0x%02x\n", cdb[0], protect); | ||
711 | return TCM_INVALID_CDB_FIELD; | ||
674 | } | 712 | } |
675 | 713 | ||
676 | if (sbc_set_prot_op_checks(protect, dev->dev_attrib.pi_prot_type, | 714 | if (sbc_set_prot_op_checks(protect, fabric_prot, pi_prot_type, is_write, cmd)) |
677 | is_write, cmd)) | ||
678 | return TCM_INVALID_CDB_FIELD; | 715 | return TCM_INVALID_CDB_FIELD; |
679 | 716 | ||
680 | cmd->prot_type = dev->dev_attrib.pi_prot_type; | 717 | cmd->prot_type = pi_prot_type; |
681 | cmd->prot_length = dev->prot_length * sectors; | 718 | cmd->prot_length = dev->prot_length * sectors; |
682 | 719 | ||
683 | /** | 720 | /** |
@@ -1166,14 +1203,16 @@ sbc_dif_generate(struct se_cmd *cmd) | |||
1166 | sdt = paddr + offset; | 1203 | sdt = paddr + offset; |
1167 | sdt->guard_tag = cpu_to_be16(crc_t10dif(daddr + j, | 1204 | sdt->guard_tag = cpu_to_be16(crc_t10dif(daddr + j, |
1168 | dev->dev_attrib.block_size)); | 1205 | dev->dev_attrib.block_size)); |
1169 | if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT) | 1206 | if (cmd->prot_type == TARGET_DIF_TYPE1_PROT) |
1170 | sdt->ref_tag = cpu_to_be32(sector & 0xffffffff); | 1207 | sdt->ref_tag = cpu_to_be32(sector & 0xffffffff); |
1171 | sdt->app_tag = 0; | 1208 | sdt->app_tag = 0; |
1172 | 1209 | ||
1173 | pr_debug("DIF WRITE INSERT sector: %llu guard_tag: 0x%04x" | 1210 | pr_debug("DIF %s INSERT sector: %llu guard_tag: 0x%04x" |
1174 | " app_tag: 0x%04x ref_tag: %u\n", | 1211 | " app_tag: 0x%04x ref_tag: %u\n", |
1175 | (unsigned long long)sector, sdt->guard_tag, | 1212 | (cmd->data_direction == DMA_TO_DEVICE) ? |
1176 | sdt->app_tag, be32_to_cpu(sdt->ref_tag)); | 1213 | "WRITE" : "READ", (unsigned long long)sector, |
1214 | sdt->guard_tag, sdt->app_tag, | ||
1215 | be32_to_cpu(sdt->ref_tag)); | ||
1177 | 1216 | ||
1178 | sector++; | 1217 | sector++; |
1179 | offset += sizeof(struct se_dif_v1_tuple); | 1218 | offset += sizeof(struct se_dif_v1_tuple); |
@@ -1185,12 +1224,16 @@ sbc_dif_generate(struct se_cmd *cmd) | |||
1185 | } | 1224 | } |
1186 | 1225 | ||
1187 | static sense_reason_t | 1226 | static sense_reason_t |
1188 | sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt, | 1227 | sbc_dif_v1_verify(struct se_cmd *cmd, struct se_dif_v1_tuple *sdt, |
1189 | const void *p, sector_t sector, unsigned int ei_lba) | 1228 | const void *p, sector_t sector, unsigned int ei_lba) |
1190 | { | 1229 | { |
1230 | struct se_device *dev = cmd->se_dev; | ||
1191 | int block_size = dev->dev_attrib.block_size; | 1231 | int block_size = dev->dev_attrib.block_size; |
1192 | __be16 csum; | 1232 | __be16 csum; |
1193 | 1233 | ||
1234 | if (!(cmd->prot_checks & TARGET_DIF_CHECK_GUARD)) | ||
1235 | goto check_ref; | ||
1236 | |||
1194 | csum = cpu_to_be16(crc_t10dif(p, block_size)); | 1237 | csum = cpu_to_be16(crc_t10dif(p, block_size)); |
1195 | 1238 | ||
1196 | if (sdt->guard_tag != csum) { | 1239 | if (sdt->guard_tag != csum) { |
@@ -1200,7 +1243,11 @@ sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt, | |||
1200 | return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; | 1243 | return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; |
1201 | } | 1244 | } |
1202 | 1245 | ||
1203 | if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT && | 1246 | check_ref: |
1247 | if (!(cmd->prot_checks & TARGET_DIF_CHECK_REFTAG)) | ||
1248 | return 0; | ||
1249 | |||
1250 | if (cmd->prot_type == TARGET_DIF_TYPE1_PROT && | ||
1204 | be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { | 1251 | be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { |
1205 | pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x" | 1252 | pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x" |
1206 | " sector MSB: 0x%08x\n", (unsigned long long)sector, | 1253 | " sector MSB: 0x%08x\n", (unsigned long long)sector, |
@@ -1208,7 +1255,7 @@ sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt, | |||
1208 | return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; | 1255 | return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; |
1209 | } | 1256 | } |
1210 | 1257 | ||
1211 | if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE2_PROT && | 1258 | if (cmd->prot_type == TARGET_DIF_TYPE2_PROT && |
1212 | be32_to_cpu(sdt->ref_tag) != ei_lba) { | 1259 | be32_to_cpu(sdt->ref_tag) != ei_lba) { |
1213 | pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x" | 1260 | pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x" |
1214 | " ei_lba: 0x%08x\n", (unsigned long long)sector, | 1261 | " ei_lba: 0x%08x\n", (unsigned long long)sector, |
@@ -1229,6 +1276,9 @@ sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read, | |||
1229 | unsigned int i, len, left; | 1276 | unsigned int i, len, left; |
1230 | unsigned int offset = sg_off; | 1277 | unsigned int offset = sg_off; |
1231 | 1278 | ||
1279 | if (!sg) | ||
1280 | return; | ||
1281 | |||
1232 | left = sectors * dev->prot_length; | 1282 | left = sectors * dev->prot_length; |
1233 | 1283 | ||
1234 | for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { | 1284 | for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { |
@@ -1292,7 +1342,7 @@ sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors, | |||
1292 | (unsigned long long)sector, sdt->guard_tag, | 1342 | (unsigned long long)sector, sdt->guard_tag, |
1293 | sdt->app_tag, be32_to_cpu(sdt->ref_tag)); | 1343 | sdt->app_tag, be32_to_cpu(sdt->ref_tag)); |
1294 | 1344 | ||
1295 | rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector, | 1345 | rc = sbc_dif_v1_verify(cmd, sdt, daddr + j, sector, |
1296 | ei_lba); | 1346 | ei_lba); |
1297 | if (rc) { | 1347 | if (rc) { |
1298 | kunmap_atomic(paddr); | 1348 | kunmap_atomic(paddr); |
@@ -1309,6 +1359,9 @@ sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors, | |||
1309 | kunmap_atomic(paddr); | 1359 | kunmap_atomic(paddr); |
1310 | kunmap_atomic(daddr); | 1360 | kunmap_atomic(daddr); |
1311 | } | 1361 | } |
1362 | if (!sg) | ||
1363 | return 0; | ||
1364 | |||
1312 | sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off); | 1365 | sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off); |
1313 | 1366 | ||
1314 | return 0; | 1367 | return 0; |
@@ -1353,7 +1406,7 @@ __sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors, | |||
1353 | continue; | 1406 | continue; |
1354 | } | 1407 | } |
1355 | 1408 | ||
1356 | rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector, | 1409 | rc = sbc_dif_v1_verify(cmd, sdt, daddr + j, sector, |
1357 | ei_lba); | 1410 | ei_lba); |
1358 | if (rc) { | 1411 | if (rc) { |
1359 | kunmap_atomic(paddr); | 1412 | kunmap_atomic(paddr); |
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 6c8bd6bc175c..7912aa124385 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c | |||
@@ -103,10 +103,12 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf) | |||
103 | buf[5] |= 0x8; | 103 | buf[5] |= 0x8; |
104 | /* | 104 | /* |
105 | * Set Protection (PROTECT) bit when DIF has been enabled on the | 105 | * Set Protection (PROTECT) bit when DIF has been enabled on the |
106 | * device, and the transport supports VERIFY + PASS. | 106 | * device, and the fabric supports VERIFY + PASS. Also report |
107 | * PROTECT=1 if sess_prot_type has been configured to allow T10-PI | ||
108 | * to unprotected devices. | ||
107 | */ | 109 | */ |
108 | if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { | 110 | if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { |
109 | if (dev->dev_attrib.pi_prot_type) | 111 | if (dev->dev_attrib.pi_prot_type || cmd->se_sess->sess_prot_type) |
110 | buf[5] |= 0x1; | 112 | buf[5] |= 0x1; |
111 | } | 113 | } |
112 | 114 | ||
@@ -467,9 +469,11 @@ spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) | |||
467 | * only for TYPE3 protection. | 469 | * only for TYPE3 protection. |
468 | */ | 470 | */ |
469 | if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { | 471 | if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { |
470 | if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT) | 472 | if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT || |
473 | cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE1_PROT) | ||
471 | buf[4] = 0x5; | 474 | buf[4] = 0x5; |
472 | else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT) | 475 | else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT || |
476 | cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE3_PROT) | ||
473 | buf[4] = 0x4; | 477 | buf[4] = 0x4; |
474 | } | 478 | } |
475 | 479 | ||
@@ -861,7 +865,7 @@ static int spc_modesense_control(struct se_cmd *cmd, u8 pc, u8 *p) | |||
861 | * TAG field. | 865 | * TAG field. |
862 | */ | 866 | */ |
863 | if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { | 867 | if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { |
864 | if (dev->dev_attrib.pi_prot_type) | 868 | if (dev->dev_attrib.pi_prot_type || sess->sess_prot_type) |
865 | p[5] |= 0x80; | 869 | p[5] |= 0x80; |
866 | } | 870 | } |
867 | 871 | ||
@@ -1099,7 +1103,7 @@ static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd) | |||
1099 | unsigned char *buf; | 1103 | unsigned char *buf; |
1100 | unsigned char tbuf[SE_MODE_PAGE_BUF]; | 1104 | unsigned char tbuf[SE_MODE_PAGE_BUF]; |
1101 | int length; | 1105 | int length; |
1102 | int ret = 0; | 1106 | sense_reason_t ret = 0; |
1103 | int i; | 1107 | int i; |
1104 | 1108 | ||
1105 | if (!cmd->data_length) { | 1109 | if (!cmd->data_length) { |
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index fa5e157db47b..315ec3458eeb 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c | |||
@@ -125,8 +125,8 @@ void core_tmr_abort_task( | |||
125 | if (dev != se_cmd->se_dev) | 125 | if (dev != se_cmd->se_dev) |
126 | continue; | 126 | continue; |
127 | 127 | ||
128 | /* skip se_cmd associated with tmr */ | 128 | /* skip task management functions, including tmr->task_cmd */ |
129 | if (tmr->task_cmd == se_cmd) | 129 | if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) |
130 | continue; | 130 | continue; |
131 | 131 | ||
132 | ref_tag = se_cmd->se_tfo->get_task_tag(se_cmd); | 132 | ref_tag = se_cmd->se_tfo->get_task_tag(se_cmd); |
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 0696de9553d3..47f064415bf6 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c | |||
@@ -672,7 +672,7 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg) | |||
672 | } | 672 | } |
673 | 673 | ||
674 | int core_tpg_register( | 674 | int core_tpg_register( |
675 | struct target_core_fabric_ops *tfo, | 675 | const struct target_core_fabric_ops *tfo, |
676 | struct se_wwn *se_wwn, | 676 | struct se_wwn *se_wwn, |
677 | struct se_portal_group *se_tpg, | 677 | struct se_portal_group *se_tpg, |
678 | void *tpg_fabric_ptr, | 678 | void *tpg_fabric_ptr, |
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index ac3cbabdbdf0..3fe5cb240b6f 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -322,6 +322,7 @@ void __transport_register_session( | |||
322 | struct se_session *se_sess, | 322 | struct se_session *se_sess, |
323 | void *fabric_sess_ptr) | 323 | void *fabric_sess_ptr) |
324 | { | 324 | { |
325 | const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo; | ||
325 | unsigned char buf[PR_REG_ISID_LEN]; | 326 | unsigned char buf[PR_REG_ISID_LEN]; |
326 | 327 | ||
327 | se_sess->se_tpg = se_tpg; | 328 | se_sess->se_tpg = se_tpg; |
@@ -334,6 +335,21 @@ void __transport_register_session( | |||
334 | */ | 335 | */ |
335 | if (se_nacl) { | 336 | if (se_nacl) { |
336 | /* | 337 | /* |
338 | * | ||
339 | * Determine if fabric allows for T10-PI feature bits exposed to | ||
340 | * initiators for device backends with !dev->dev_attrib.pi_prot_type. | ||
341 | * | ||
342 | * If so, then always save prot_type on a per se_node_acl node | ||
343 | * basis and re-instate the previous sess_prot_type to avoid | ||
344 | * disabling PI from below any previously initiator side | ||
345 | * registered LUNs. | ||
346 | */ | ||
347 | if (se_nacl->saved_prot_type) | ||
348 | se_sess->sess_prot_type = se_nacl->saved_prot_type; | ||
349 | else if (tfo->tpg_check_prot_fabric_only) | ||
350 | se_sess->sess_prot_type = se_nacl->saved_prot_type = | ||
351 | tfo->tpg_check_prot_fabric_only(se_tpg); | ||
352 | /* | ||
337 | * If the fabric module supports an ISID based TransportID, | 353 | * If the fabric module supports an ISID based TransportID, |
338 | * save this value in binary from the fabric I_T Nexus now. | 354 | * save this value in binary from the fabric I_T Nexus now. |
339 | */ | 355 | */ |
@@ -404,6 +420,30 @@ void target_put_session(struct se_session *se_sess) | |||
404 | } | 420 | } |
405 | EXPORT_SYMBOL(target_put_session); | 421 | EXPORT_SYMBOL(target_put_session); |
406 | 422 | ||
423 | ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page) | ||
424 | { | ||
425 | struct se_session *se_sess; | ||
426 | ssize_t len = 0; | ||
427 | |||
428 | spin_lock_bh(&se_tpg->session_lock); | ||
429 | list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) { | ||
430 | if (!se_sess->se_node_acl) | ||
431 | continue; | ||
432 | if (!se_sess->se_node_acl->dynamic_node_acl) | ||
433 | continue; | ||
434 | if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE) | ||
435 | break; | ||
436 | |||
437 | len += snprintf(page + len, PAGE_SIZE - len, "%s\n", | ||
438 | se_sess->se_node_acl->initiatorname); | ||
439 | len += 1; /* Include NULL terminator */ | ||
440 | } | ||
441 | spin_unlock_bh(&se_tpg->session_lock); | ||
442 | |||
443 | return len; | ||
444 | } | ||
445 | EXPORT_SYMBOL(target_show_dynamic_sessions); | ||
446 | |||
407 | static void target_complete_nacl(struct kref *kref) | 447 | static void target_complete_nacl(struct kref *kref) |
408 | { | 448 | { |
409 | struct se_node_acl *nacl = container_of(kref, | 449 | struct se_node_acl *nacl = container_of(kref, |
@@ -462,7 +502,7 @@ EXPORT_SYMBOL(transport_free_session); | |||
462 | void transport_deregister_session(struct se_session *se_sess) | 502 | void transport_deregister_session(struct se_session *se_sess) |
463 | { | 503 | { |
464 | struct se_portal_group *se_tpg = se_sess->se_tpg; | 504 | struct se_portal_group *se_tpg = se_sess->se_tpg; |
465 | struct target_core_fabric_ops *se_tfo; | 505 | const struct target_core_fabric_ops *se_tfo; |
466 | struct se_node_acl *se_nacl; | 506 | struct se_node_acl *se_nacl; |
467 | unsigned long flags; | 507 | unsigned long flags; |
468 | bool comp_nacl = true; | 508 | bool comp_nacl = true; |
@@ -1118,7 +1158,7 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size) | |||
1118 | */ | 1158 | */ |
1119 | void transport_init_se_cmd( | 1159 | void transport_init_se_cmd( |
1120 | struct se_cmd *cmd, | 1160 | struct se_cmd *cmd, |
1121 | struct target_core_fabric_ops *tfo, | 1161 | const struct target_core_fabric_ops *tfo, |
1122 | struct se_session *se_sess, | 1162 | struct se_session *se_sess, |
1123 | u32 data_length, | 1163 | u32 data_length, |
1124 | int data_direction, | 1164 | int data_direction, |
@@ -1570,6 +1610,8 @@ EXPORT_SYMBOL(target_submit_tmr); | |||
1570 | * has completed. | 1610 | * has completed. |
1571 | */ | 1611 | */ |
1572 | bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags) | 1612 | bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags) |
1613 | __releases(&cmd->t_state_lock) | ||
1614 | __acquires(&cmd->t_state_lock) | ||
1573 | { | 1615 | { |
1574 | bool was_active = false; | 1616 | bool was_active = false; |
1575 | 1617 | ||
@@ -1615,11 +1657,11 @@ void transport_generic_request_failure(struct se_cmd *cmd, | |||
1615 | transport_complete_task_attr(cmd); | 1657 | transport_complete_task_attr(cmd); |
1616 | /* | 1658 | /* |
1617 | * Handle special case for COMPARE_AND_WRITE failure, where the | 1659 | * Handle special case for COMPARE_AND_WRITE failure, where the |
1618 | * callback is expected to drop the per device ->caw_mutex. | 1660 | * callback is expected to drop the per device ->caw_sem. |
1619 | */ | 1661 | */ |
1620 | if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && | 1662 | if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && |
1621 | cmd->transport_complete_callback) | 1663 | cmd->transport_complete_callback) |
1622 | cmd->transport_complete_callback(cmd); | 1664 | cmd->transport_complete_callback(cmd, false); |
1623 | 1665 | ||
1624 | switch (sense_reason) { | 1666 | switch (sense_reason) { |
1625 | case TCM_NON_EXISTENT_LUN: | 1667 | case TCM_NON_EXISTENT_LUN: |
@@ -1706,6 +1748,41 @@ void __target_execute_cmd(struct se_cmd *cmd) | |||
1706 | } | 1748 | } |
1707 | } | 1749 | } |
1708 | 1750 | ||
1751 | static int target_write_prot_action(struct se_cmd *cmd) | ||
1752 | { | ||
1753 | u32 sectors; | ||
1754 | /* | ||
1755 | * Perform WRITE_INSERT of PI using software emulation when backend | ||
1756 | * device has PI enabled, if the transport has not already generated | ||
1757 | * PI using hardware WRITE_INSERT offload. | ||
1758 | */ | ||
1759 | switch (cmd->prot_op) { | ||
1760 | case TARGET_PROT_DOUT_INSERT: | ||
1761 | if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT)) | ||
1762 | sbc_dif_generate(cmd); | ||
1763 | break; | ||
1764 | case TARGET_PROT_DOUT_STRIP: | ||
1765 | if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP) | ||
1766 | break; | ||
1767 | |||
1768 | sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size); | ||
1769 | cmd->pi_err = sbc_dif_verify_write(cmd, cmd->t_task_lba, | ||
1770 | sectors, 0, NULL, 0); | ||
1771 | if (unlikely(cmd->pi_err)) { | ||
1772 | spin_lock_irq(&cmd->t_state_lock); | ||
1773 | cmd->transport_state &= ~CMD_T_BUSY|CMD_T_SENT; | ||
1774 | spin_unlock_irq(&cmd->t_state_lock); | ||
1775 | transport_generic_request_failure(cmd, cmd->pi_err); | ||
1776 | return -1; | ||
1777 | } | ||
1778 | break; | ||
1779 | default: | ||
1780 | break; | ||
1781 | } | ||
1782 | |||
1783 | return 0; | ||
1784 | } | ||
1785 | |||
1709 | static bool target_handle_task_attr(struct se_cmd *cmd) | 1786 | static bool target_handle_task_attr(struct se_cmd *cmd) |
1710 | { | 1787 | { |
1711 | struct se_device *dev = cmd->se_dev; | 1788 | struct se_device *dev = cmd->se_dev; |
@@ -1785,15 +1862,9 @@ void target_execute_cmd(struct se_cmd *cmd) | |||
1785 | cmd->t_state = TRANSPORT_PROCESSING; | 1862 | cmd->t_state = TRANSPORT_PROCESSING; |
1786 | cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; | 1863 | cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; |
1787 | spin_unlock_irq(&cmd->t_state_lock); | 1864 | spin_unlock_irq(&cmd->t_state_lock); |
1788 | /* | 1865 | |
1789 | * Perform WRITE_INSERT of PI using software emulation when backend | 1866 | if (target_write_prot_action(cmd)) |
1790 | * device has PI enabled, if the transport has not already generated | 1867 | return; |
1791 | * PI using hardware WRITE_INSERT offload. | ||
1792 | */ | ||
1793 | if (cmd->prot_op == TARGET_PROT_DOUT_INSERT) { | ||
1794 | if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT)) | ||
1795 | sbc_dif_generate(cmd); | ||
1796 | } | ||
1797 | 1868 | ||
1798 | if (target_handle_task_attr(cmd)) { | 1869 | if (target_handle_task_attr(cmd)) { |
1799 | spin_lock_irq(&cmd->t_state_lock); | 1870 | spin_lock_irq(&cmd->t_state_lock); |
@@ -1919,16 +1990,28 @@ static void transport_handle_queue_full( | |||
1919 | schedule_work(&cmd->se_dev->qf_work_queue); | 1990 | schedule_work(&cmd->se_dev->qf_work_queue); |
1920 | } | 1991 | } |
1921 | 1992 | ||
1922 | static bool target_check_read_strip(struct se_cmd *cmd) | 1993 | static bool target_read_prot_action(struct se_cmd *cmd) |
1923 | { | 1994 | { |
1924 | sense_reason_t rc; | 1995 | sense_reason_t rc; |
1925 | 1996 | ||
1926 | if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) { | 1997 | switch (cmd->prot_op) { |
1927 | rc = sbc_dif_read_strip(cmd); | 1998 | case TARGET_PROT_DIN_STRIP: |
1928 | if (rc) { | 1999 | if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) { |
1929 | cmd->pi_err = rc; | 2000 | rc = sbc_dif_read_strip(cmd); |
1930 | return true; | 2001 | if (rc) { |
2002 | cmd->pi_err = rc; | ||
2003 | return true; | ||
2004 | } | ||
1931 | } | 2005 | } |
2006 | break; | ||
2007 | case TARGET_PROT_DIN_INSERT: | ||
2008 | if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT) | ||
2009 | break; | ||
2010 | |||
2011 | sbc_dif_generate(cmd); | ||
2012 | break; | ||
2013 | default: | ||
2014 | break; | ||
1932 | } | 2015 | } |
1933 | 2016 | ||
1934 | return false; | 2017 | return false; |
@@ -1975,8 +2058,12 @@ static void target_complete_ok_work(struct work_struct *work) | |||
1975 | if (cmd->transport_complete_callback) { | 2058 | if (cmd->transport_complete_callback) { |
1976 | sense_reason_t rc; | 2059 | sense_reason_t rc; |
1977 | 2060 | ||
1978 | rc = cmd->transport_complete_callback(cmd); | 2061 | rc = cmd->transport_complete_callback(cmd, true); |
1979 | if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) { | 2062 | if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) { |
2063 | if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && | ||
2064 | !cmd->data_length) | ||
2065 | goto queue_rsp; | ||
2066 | |||
1980 | return; | 2067 | return; |
1981 | } else if (rc) { | 2068 | } else if (rc) { |
1982 | ret = transport_send_check_condition_and_sense(cmd, | 2069 | ret = transport_send_check_condition_and_sense(cmd, |
@@ -1990,6 +2077,7 @@ static void target_complete_ok_work(struct work_struct *work) | |||
1990 | } | 2077 | } |
1991 | } | 2078 | } |
1992 | 2079 | ||
2080 | queue_rsp: | ||
1993 | switch (cmd->data_direction) { | 2081 | switch (cmd->data_direction) { |
1994 | case DMA_FROM_DEVICE: | 2082 | case DMA_FROM_DEVICE: |
1995 | spin_lock(&cmd->se_lun->lun_sep_lock); | 2083 | spin_lock(&cmd->se_lun->lun_sep_lock); |
@@ -2003,8 +2091,7 @@ static void target_complete_ok_work(struct work_struct *work) | |||
2003 | * backend had PI enabled, if the transport will not be | 2091 | * backend had PI enabled, if the transport will not be |
2004 | * performing hardware READ_STRIP offload. | 2092 | * performing hardware READ_STRIP offload. |
2005 | */ | 2093 | */ |
2006 | if (cmd->prot_op == TARGET_PROT_DIN_STRIP && | 2094 | if (target_read_prot_action(cmd)) { |
2007 | target_check_read_strip(cmd)) { | ||
2008 | ret = transport_send_check_condition_and_sense(cmd, | 2095 | ret = transport_send_check_condition_and_sense(cmd, |
2009 | cmd->pi_err, 0); | 2096 | cmd->pi_err, 0); |
2010 | if (ret == -EAGAIN || ret == -ENOMEM) | 2097 | if (ret == -EAGAIN || ret == -ENOMEM) |
@@ -2094,6 +2181,16 @@ static inline void transport_reset_sgl_orig(struct se_cmd *cmd) | |||
2094 | static inline void transport_free_pages(struct se_cmd *cmd) | 2181 | static inline void transport_free_pages(struct se_cmd *cmd) |
2095 | { | 2182 | { |
2096 | if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { | 2183 | if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { |
2184 | /* | ||
2185 | * Release special case READ buffer payload required for | ||
2186 | * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE | ||
2187 | */ | ||
2188 | if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) { | ||
2189 | transport_free_sgl(cmd->t_bidi_data_sg, | ||
2190 | cmd->t_bidi_data_nents); | ||
2191 | cmd->t_bidi_data_sg = NULL; | ||
2192 | cmd->t_bidi_data_nents = 0; | ||
2193 | } | ||
2097 | transport_reset_sgl_orig(cmd); | 2194 | transport_reset_sgl_orig(cmd); |
2098 | return; | 2195 | return; |
2099 | } | 2196 | } |
@@ -2246,6 +2343,7 @@ sense_reason_t | |||
2246 | transport_generic_new_cmd(struct se_cmd *cmd) | 2343 | transport_generic_new_cmd(struct se_cmd *cmd) |
2247 | { | 2344 | { |
2248 | int ret = 0; | 2345 | int ret = 0; |
2346 | bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); | ||
2249 | 2347 | ||
2250 | /* | 2348 | /* |
2251 | * Determine is the TCM fabric module has already allocated physical | 2349 | * Determine is the TCM fabric module has already allocated physical |
@@ -2254,7 +2352,6 @@ transport_generic_new_cmd(struct se_cmd *cmd) | |||
2254 | */ | 2352 | */ |
2255 | if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && | 2353 | if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && |
2256 | cmd->data_length) { | 2354 | cmd->data_length) { |
2257 | bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); | ||
2258 | 2355 | ||
2259 | if ((cmd->se_cmd_flags & SCF_BIDI) || | 2356 | if ((cmd->se_cmd_flags & SCF_BIDI) || |
2260 | (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) { | 2357 | (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) { |
@@ -2285,6 +2382,20 @@ transport_generic_new_cmd(struct se_cmd *cmd) | |||
2285 | cmd->data_length, zero_flag); | 2382 | cmd->data_length, zero_flag); |
2286 | if (ret < 0) | 2383 | if (ret < 0) |
2287 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 2384 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
2385 | } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && | ||
2386 | cmd->data_length) { | ||
2387 | /* | ||
2388 | * Special case for COMPARE_AND_WRITE with fabrics | ||
2389 | * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC. | ||
2390 | */ | ||
2391 | u32 caw_length = cmd->t_task_nolb * | ||
2392 | cmd->se_dev->dev_attrib.block_size; | ||
2393 | |||
2394 | ret = target_alloc_sgl(&cmd->t_bidi_data_sg, | ||
2395 | &cmd->t_bidi_data_nents, | ||
2396 | caw_length, zero_flag); | ||
2397 | if (ret < 0) | ||
2398 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
2288 | } | 2399 | } |
2289 | /* | 2400 | /* |
2290 | * If this command is not a write we can execute it right here, | 2401 | * If this command is not a write we can execute it right here, |
@@ -2376,10 +2487,8 @@ int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd, | |||
2376 | * fabric acknowledgement that requires two target_put_sess_cmd() | 2487 | * fabric acknowledgement that requires two target_put_sess_cmd() |
2377 | * invocations before se_cmd descriptor release. | 2488 | * invocations before se_cmd descriptor release. |
2378 | */ | 2489 | */ |
2379 | if (ack_kref) { | 2490 | if (ack_kref) |
2380 | kref_get(&se_cmd->cmd_kref); | 2491 | kref_get(&se_cmd->cmd_kref); |
2381 | se_cmd->se_cmd_flags |= SCF_ACK_KREF; | ||
2382 | } | ||
2383 | 2492 | ||
2384 | spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); | 2493 | spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); |
2385 | if (se_sess->sess_tearing_down) { | 2494 | if (se_sess->sess_tearing_down) { |
@@ -2398,6 +2507,7 @@ out: | |||
2398 | EXPORT_SYMBOL(target_get_sess_cmd); | 2507 | EXPORT_SYMBOL(target_get_sess_cmd); |
2399 | 2508 | ||
2400 | static void target_release_cmd_kref(struct kref *kref) | 2509 | static void target_release_cmd_kref(struct kref *kref) |
2510 | __releases(&se_cmd->se_sess->sess_cmd_lock) | ||
2401 | { | 2511 | { |
2402 | struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); | 2512 | struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); |
2403 | struct se_session *se_sess = se_cmd->se_sess; | 2513 | struct se_session *se_sess = se_cmd->se_sess; |
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 1a1bcf71ec9d..dbc872a6c981 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c | |||
@@ -344,8 +344,11 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) | |||
344 | 344 | ||
345 | entry = (void *) mb + CMDR_OFF + cmd_head; | 345 | entry = (void *) mb + CMDR_OFF + cmd_head; |
346 | tcmu_flush_dcache_range(entry, sizeof(*entry)); | 346 | tcmu_flush_dcache_range(entry, sizeof(*entry)); |
347 | tcmu_hdr_set_op(&entry->hdr, TCMU_OP_PAD); | 347 | tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD); |
348 | tcmu_hdr_set_len(&entry->hdr, pad_size); | 348 | tcmu_hdr_set_len(&entry->hdr.len_op, pad_size); |
349 | entry->hdr.cmd_id = 0; /* not used for PAD */ | ||
350 | entry->hdr.kflags = 0; | ||
351 | entry->hdr.uflags = 0; | ||
349 | 352 | ||
350 | UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size); | 353 | UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size); |
351 | 354 | ||
@@ -355,9 +358,11 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) | |||
355 | 358 | ||
356 | entry = (void *) mb + CMDR_OFF + cmd_head; | 359 | entry = (void *) mb + CMDR_OFF + cmd_head; |
357 | tcmu_flush_dcache_range(entry, sizeof(*entry)); | 360 | tcmu_flush_dcache_range(entry, sizeof(*entry)); |
358 | tcmu_hdr_set_op(&entry->hdr, TCMU_OP_CMD); | 361 | tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD); |
359 | tcmu_hdr_set_len(&entry->hdr, command_size); | 362 | tcmu_hdr_set_len(&entry->hdr.len_op, command_size); |
360 | entry->cmd_id = tcmu_cmd->cmd_id; | 363 | entry->hdr.cmd_id = tcmu_cmd->cmd_id; |
364 | entry->hdr.kflags = 0; | ||
365 | entry->hdr.uflags = 0; | ||
361 | 366 | ||
362 | /* | 367 | /* |
363 | * Fix up iovecs, and handle if allocation in data ring wrapped. | 368 | * Fix up iovecs, and handle if allocation in data ring wrapped. |
@@ -376,7 +381,8 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) | |||
376 | 381 | ||
377 | /* Even iov_base is relative to mb_addr */ | 382 | /* Even iov_base is relative to mb_addr */ |
378 | iov->iov_len = copy_bytes; | 383 | iov->iov_len = copy_bytes; |
379 | iov->iov_base = (void *) udev->data_off + udev->data_head; | 384 | iov->iov_base = (void __user *) udev->data_off + |
385 | udev->data_head; | ||
380 | iov_cnt++; | 386 | iov_cnt++; |
381 | iov++; | 387 | iov++; |
382 | 388 | ||
@@ -388,7 +394,8 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) | |||
388 | copy_bytes = sg->length - copy_bytes; | 394 | copy_bytes = sg->length - copy_bytes; |
389 | 395 | ||
390 | iov->iov_len = copy_bytes; | 396 | iov->iov_len = copy_bytes; |
391 | iov->iov_base = (void *) udev->data_off + udev->data_head; | 397 | iov->iov_base = (void __user *) udev->data_off + |
398 | udev->data_head; | ||
392 | 399 | ||
393 | if (se_cmd->data_direction == DMA_TO_DEVICE) { | 400 | if (se_cmd->data_direction == DMA_TO_DEVICE) { |
394 | to = (void *) mb + udev->data_off + udev->data_head; | 401 | to = (void *) mb + udev->data_off + udev->data_head; |
@@ -405,6 +412,8 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) | |||
405 | kunmap_atomic(from); | 412 | kunmap_atomic(from); |
406 | } | 413 | } |
407 | entry->req.iov_cnt = iov_cnt; | 414 | entry->req.iov_cnt = iov_cnt; |
415 | entry->req.iov_bidi_cnt = 0; | ||
416 | entry->req.iov_dif_cnt = 0; | ||
408 | 417 | ||
409 | /* All offsets relative to mb_addr, not start of entry! */ | 418 | /* All offsets relative to mb_addr, not start of entry! */ |
410 | cdb_off = CMDR_OFF + cmd_head + base_command_size; | 419 | cdb_off = CMDR_OFF + cmd_head + base_command_size; |
@@ -462,6 +471,17 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry * | |||
462 | return; | 471 | return; |
463 | } | 472 | } |
464 | 473 | ||
474 | if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) { | ||
475 | UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size); | ||
476 | pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n", | ||
477 | cmd->se_cmd); | ||
478 | transport_generic_request_failure(cmd->se_cmd, | ||
479 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); | ||
480 | cmd->se_cmd = NULL; | ||
481 | kmem_cache_free(tcmu_cmd_cache, cmd); | ||
482 | return; | ||
483 | } | ||
484 | |||
465 | if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) { | 485 | if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) { |
466 | memcpy(se_cmd->sense_buffer, entry->rsp.sense_buffer, | 486 | memcpy(se_cmd->sense_buffer, entry->rsp.sense_buffer, |
467 | se_cmd->scsi_sense_length); | 487 | se_cmd->scsi_sense_length); |
@@ -540,14 +560,16 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) | |||
540 | 560 | ||
541 | tcmu_flush_dcache_range(entry, sizeof(*entry)); | 561 | tcmu_flush_dcache_range(entry, sizeof(*entry)); |
542 | 562 | ||
543 | if (tcmu_hdr_get_op(&entry->hdr) == TCMU_OP_PAD) { | 563 | if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) { |
544 | UPDATE_HEAD(udev->cmdr_last_cleaned, tcmu_hdr_get_len(&entry->hdr), udev->cmdr_size); | 564 | UPDATE_HEAD(udev->cmdr_last_cleaned, |
565 | tcmu_hdr_get_len(entry->hdr.len_op), | ||
566 | udev->cmdr_size); | ||
545 | continue; | 567 | continue; |
546 | } | 568 | } |
547 | WARN_ON(tcmu_hdr_get_op(&entry->hdr) != TCMU_OP_CMD); | 569 | WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD); |
548 | 570 | ||
549 | spin_lock(&udev->commands_lock); | 571 | spin_lock(&udev->commands_lock); |
550 | cmd = idr_find(&udev->commands, entry->cmd_id); | 572 | cmd = idr_find(&udev->commands, entry->hdr.cmd_id); |
551 | if (cmd) | 573 | if (cmd) |
552 | idr_remove(&udev->commands, cmd->cmd_id); | 574 | idr_remove(&udev->commands, cmd->cmd_id); |
553 | spin_unlock(&udev->commands_lock); | 575 | spin_unlock(&udev->commands_lock); |
@@ -560,7 +582,9 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) | |||
560 | 582 | ||
561 | tcmu_handle_completion(cmd, entry); | 583 | tcmu_handle_completion(cmd, entry); |
562 | 584 | ||
563 | UPDATE_HEAD(udev->cmdr_last_cleaned, tcmu_hdr_get_len(&entry->hdr), udev->cmdr_size); | 585 | UPDATE_HEAD(udev->cmdr_last_cleaned, |
586 | tcmu_hdr_get_len(entry->hdr.len_op), | ||
587 | udev->cmdr_size); | ||
564 | 588 | ||
565 | handled++; | 589 | handled++; |
566 | } | 590 | } |
@@ -838,14 +862,14 @@ static int tcmu_configure_device(struct se_device *dev) | |||
838 | udev->data_size = TCMU_RING_SIZE - CMDR_SIZE; | 862 | udev->data_size = TCMU_RING_SIZE - CMDR_SIZE; |
839 | 863 | ||
840 | mb = udev->mb_addr; | 864 | mb = udev->mb_addr; |
841 | mb->version = 1; | 865 | mb->version = TCMU_MAILBOX_VERSION; |
842 | mb->cmdr_off = CMDR_OFF; | 866 | mb->cmdr_off = CMDR_OFF; |
843 | mb->cmdr_size = udev->cmdr_size; | 867 | mb->cmdr_size = udev->cmdr_size; |
844 | 868 | ||
845 | WARN_ON(!PAGE_ALIGNED(udev->data_off)); | 869 | WARN_ON(!PAGE_ALIGNED(udev->data_off)); |
846 | WARN_ON(udev->data_size % PAGE_SIZE); | 870 | WARN_ON(udev->data_size % PAGE_SIZE); |
847 | 871 | ||
848 | info->version = "1"; | 872 | info->version = xstr(TCMU_MAILBOX_VERSION); |
849 | 873 | ||
850 | info->mem[0].name = "tcm-user command & data buffer"; | 874 | info->mem[0].name = "tcm-user command & data buffer"; |
851 | info->mem[0].addr = (phys_addr_t) udev->mb_addr; | 875 | info->mem[0].addr = (phys_addr_t) udev->mb_addr; |
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c index 33ac39bf75e5..a600ff15dcfd 100644 --- a/drivers/target/target_core_xcopy.c +++ b/drivers/target/target_core_xcopy.c | |||
@@ -34,20 +34,12 @@ | |||
34 | #include <target/target_core_fabric.h> | 34 | #include <target/target_core_fabric.h> |
35 | #include <target/target_core_configfs.h> | 35 | #include <target/target_core_configfs.h> |
36 | 36 | ||
37 | #include "target_core_internal.h" | ||
37 | #include "target_core_pr.h" | 38 | #include "target_core_pr.h" |
38 | #include "target_core_ua.h" | 39 | #include "target_core_ua.h" |
39 | #include "target_core_xcopy.h" | 40 | #include "target_core_xcopy.h" |
40 | 41 | ||
41 | static struct workqueue_struct *xcopy_wq = NULL; | 42 | static struct workqueue_struct *xcopy_wq = NULL; |
42 | /* | ||
43 | * From target_core_device.c | ||
44 | */ | ||
45 | extern struct mutex g_device_mutex; | ||
46 | extern struct list_head g_device_list; | ||
47 | /* | ||
48 | * From target_core_configfs.c | ||
49 | */ | ||
50 | extern struct configfs_subsystem *target_core_subsystem[]; | ||
51 | 43 | ||
52 | static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf) | 44 | static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf) |
53 | { | 45 | { |
@@ -433,7 +425,7 @@ static int xcopy_pt_queue_status(struct se_cmd *se_cmd) | |||
433 | return 0; | 425 | return 0; |
434 | } | 426 | } |
435 | 427 | ||
436 | static struct target_core_fabric_ops xcopy_pt_tfo = { | 428 | static const struct target_core_fabric_ops xcopy_pt_tfo = { |
437 | .get_fabric_name = xcopy_pt_get_fabric_name, | 429 | .get_fabric_name = xcopy_pt_get_fabric_name, |
438 | .get_task_tag = xcopy_pt_get_tag, | 430 | .get_task_tag = xcopy_pt_get_tag, |
439 | .get_cmd_state = xcopy_pt_get_cmd_state, | 431 | .get_cmd_state = xcopy_pt_get_cmd_state, |
@@ -548,33 +540,22 @@ static void target_xcopy_setup_pt_port( | |||
548 | } | 540 | } |
549 | } | 541 | } |
550 | 542 | ||
551 | static int target_xcopy_init_pt_lun( | 543 | static void target_xcopy_init_pt_lun(struct se_device *se_dev, |
552 | struct xcopy_pt_cmd *xpt_cmd, | 544 | struct se_cmd *pt_cmd, bool remote_port) |
553 | struct xcopy_op *xop, | ||
554 | struct se_device *se_dev, | ||
555 | struct se_cmd *pt_cmd, | ||
556 | bool remote_port) | ||
557 | { | 545 | { |
558 | /* | 546 | /* |
559 | * Don't allocate + init an pt_cmd->se_lun if honoring local port for | 547 | * Don't allocate + init an pt_cmd->se_lun if honoring local port for |
560 | * reservations. The pt_cmd->se_lun pointer will be setup from within | 548 | * reservations. The pt_cmd->se_lun pointer will be setup from within |
561 | * target_xcopy_setup_pt_port() | 549 | * target_xcopy_setup_pt_port() |
562 | */ | 550 | */ |
563 | if (!remote_port) { | 551 | if (remote_port) { |
564 | pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH; | 552 | pr_debug("Setup emulated se_dev: %p from se_dev\n", |
565 | return 0; | 553 | pt_cmd->se_dev); |
554 | pt_cmd->se_lun = &se_dev->xcopy_lun; | ||
555 | pt_cmd->se_dev = se_dev; | ||
566 | } | 556 | } |
567 | 557 | ||
568 | pt_cmd->se_lun = &se_dev->xcopy_lun; | 558 | pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; |
569 | pt_cmd->se_dev = se_dev; | ||
570 | |||
571 | pr_debug("Setup emulated se_dev: %p from se_dev\n", pt_cmd->se_dev); | ||
572 | pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH; | ||
573 | |||
574 | pr_debug("Setup emulated se_dev: %p to pt_cmd->se_lun->lun_se_dev\n", | ||
575 | pt_cmd->se_lun->lun_se_dev); | ||
576 | |||
577 | return 0; | ||
578 | } | 559 | } |
579 | 560 | ||
580 | static int target_xcopy_setup_pt_cmd( | 561 | static int target_xcopy_setup_pt_cmd( |
@@ -592,11 +573,8 @@ static int target_xcopy_setup_pt_cmd( | |||
592 | * Setup LUN+port to honor reservations based upon xop->op_origin for | 573 | * Setup LUN+port to honor reservations based upon xop->op_origin for |
593 | * X-COPY PUSH or X-COPY PULL based upon where the CDB was received. | 574 | * X-COPY PUSH or X-COPY PULL based upon where the CDB was received. |
594 | */ | 575 | */ |
595 | rc = target_xcopy_init_pt_lun(xpt_cmd, xop, se_dev, cmd, remote_port); | 576 | target_xcopy_init_pt_lun(se_dev, cmd, remote_port); |
596 | if (rc < 0) { | 577 | |
597 | ret = rc; | ||
598 | goto out; | ||
599 | } | ||
600 | xpt_cmd->xcopy_op = xop; | 578 | xpt_cmd->xcopy_op = xop; |
601 | target_xcopy_setup_pt_port(xpt_cmd, xop, remote_port); | 579 | target_xcopy_setup_pt_port(xpt_cmd, xop, remote_port); |
602 | 580 | ||
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h index a0bcfd3e7e7d..881deb3d499a 100644 --- a/drivers/target/tcm_fc/tcm_fc.h +++ b/drivers/target/tcm_fc/tcm_fc.h | |||
@@ -129,7 +129,6 @@ struct ft_cmd { | |||
129 | 129 | ||
130 | extern struct mutex ft_lport_lock; | 130 | extern struct mutex ft_lport_lock; |
131 | extern struct fc4_prov ft_prov; | 131 | extern struct fc4_prov ft_prov; |
132 | extern struct target_fabric_configfs *ft_configfs; | ||
133 | extern unsigned int ft_debug_logging; | 132 | extern unsigned int ft_debug_logging; |
134 | 133 | ||
135 | /* | 134 | /* |
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index efdcb9663a1a..65dce1345966 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c | |||
@@ -48,7 +48,7 @@ | |||
48 | 48 | ||
49 | #include "tcm_fc.h" | 49 | #include "tcm_fc.h" |
50 | 50 | ||
51 | struct target_fabric_configfs *ft_configfs; | 51 | static const struct target_core_fabric_ops ft_fabric_ops; |
52 | 52 | ||
53 | static LIST_HEAD(ft_wwn_list); | 53 | static LIST_HEAD(ft_wwn_list); |
54 | DEFINE_MUTEX(ft_lport_lock); | 54 | DEFINE_MUTEX(ft_lport_lock); |
@@ -337,7 +337,7 @@ static struct se_portal_group *ft_add_tpg( | |||
337 | return NULL; | 337 | return NULL; |
338 | } | 338 | } |
339 | 339 | ||
340 | ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg, | 340 | ret = core_tpg_register(&ft_fabric_ops, wwn, &tpg->se_tpg, |
341 | tpg, TRANSPORT_TPG_TYPE_NORMAL); | 341 | tpg, TRANSPORT_TPG_TYPE_NORMAL); |
342 | if (ret < 0) { | 342 | if (ret < 0) { |
343 | destroy_workqueue(wq); | 343 | destroy_workqueue(wq); |
@@ -507,7 +507,9 @@ static u32 ft_tpg_get_inst_index(struct se_portal_group *se_tpg) | |||
507 | return tpg->index; | 507 | return tpg->index; |
508 | } | 508 | } |
509 | 509 | ||
510 | static struct target_core_fabric_ops ft_fabric_ops = { | 510 | static const struct target_core_fabric_ops ft_fabric_ops = { |
511 | .module = THIS_MODULE, | ||
512 | .name = "fc", | ||
511 | .get_fabric_name = ft_get_fabric_name, | 513 | .get_fabric_name = ft_get_fabric_name, |
512 | .get_fabric_proto_ident = fc_get_fabric_proto_ident, | 514 | .get_fabric_proto_ident = fc_get_fabric_proto_ident, |
513 | .tpg_get_wwn = ft_get_fabric_wwn, | 515 | .tpg_get_wwn = ft_get_fabric_wwn, |
@@ -552,62 +554,10 @@ static struct target_core_fabric_ops ft_fabric_ops = { | |||
552 | .fabric_drop_np = NULL, | 554 | .fabric_drop_np = NULL, |
553 | .fabric_make_nodeacl = &ft_add_acl, | 555 | .fabric_make_nodeacl = &ft_add_acl, |
554 | .fabric_drop_nodeacl = &ft_del_acl, | 556 | .fabric_drop_nodeacl = &ft_del_acl, |
555 | }; | ||
556 | |||
557 | static int ft_register_configfs(void) | ||
558 | { | ||
559 | struct target_fabric_configfs *fabric; | ||
560 | int ret; | ||
561 | |||
562 | /* | ||
563 | * Register the top level struct config_item_type with TCM core | ||
564 | */ | ||
565 | fabric = target_fabric_configfs_init(THIS_MODULE, "fc"); | ||
566 | if (IS_ERR(fabric)) { | ||
567 | pr_err("%s: target_fabric_configfs_init() failed!\n", | ||
568 | __func__); | ||
569 | return PTR_ERR(fabric); | ||
570 | } | ||
571 | fabric->tf_ops = ft_fabric_ops; | ||
572 | |||
573 | /* | ||
574 | * Setup default attribute lists for various fabric->tf_cit_tmpl | ||
575 | */ | ||
576 | fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = ft_wwn_attrs; | ||
577 | fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL; | ||
578 | fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; | ||
579 | fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; | ||
580 | fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; | ||
581 | fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = | ||
582 | ft_nacl_base_attrs; | ||
583 | fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; | ||
584 | fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; | ||
585 | fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; | ||
586 | /* | ||
587 | * register the fabric for use within TCM | ||
588 | */ | ||
589 | ret = target_fabric_configfs_register(fabric); | ||
590 | if (ret < 0) { | ||
591 | pr_debug("target_fabric_configfs_register() for" | ||
592 | " FC Target failed!\n"); | ||
593 | target_fabric_configfs_free(fabric); | ||
594 | return -1; | ||
595 | } | ||
596 | |||
597 | /* | ||
598 | * Setup our local pointer to *fabric. | ||
599 | */ | ||
600 | ft_configfs = fabric; | ||
601 | return 0; | ||
602 | } | ||
603 | 557 | ||
604 | static void ft_deregister_configfs(void) | 558 | .tfc_wwn_attrs = ft_wwn_attrs, |
605 | { | 559 | .tfc_tpg_nacl_base_attrs = ft_nacl_base_attrs, |
606 | if (!ft_configfs) | 560 | }; |
607 | return; | ||
608 | target_fabric_configfs_deregister(ft_configfs); | ||
609 | ft_configfs = NULL; | ||
610 | } | ||
611 | 561 | ||
612 | static struct notifier_block ft_notifier = { | 562 | static struct notifier_block ft_notifier = { |
613 | .notifier_call = ft_lport_notify | 563 | .notifier_call = ft_lport_notify |
@@ -615,15 +565,24 @@ static struct notifier_block ft_notifier = { | |||
615 | 565 | ||
616 | static int __init ft_init(void) | 566 | static int __init ft_init(void) |
617 | { | 567 | { |
618 | if (ft_register_configfs()) | 568 | int ret; |
619 | return -1; | 569 | |
620 | if (fc_fc4_register_provider(FC_TYPE_FCP, &ft_prov)) { | 570 | ret = target_register_template(&ft_fabric_ops); |
621 | ft_deregister_configfs(); | 571 | if (ret) |
622 | return -1; | 572 | goto out; |
623 | } | 573 | |
574 | ret = fc_fc4_register_provider(FC_TYPE_FCP, &ft_prov); | ||
575 | if (ret) | ||
576 | goto out_unregister_template; | ||
577 | |||
624 | blocking_notifier_chain_register(&fc_lport_notifier_head, &ft_notifier); | 578 | blocking_notifier_chain_register(&fc_lport_notifier_head, &ft_notifier); |
625 | fc_lport_iterate(ft_lport_add, NULL); | 579 | fc_lport_iterate(ft_lport_add, NULL); |
626 | return 0; | 580 | return 0; |
581 | |||
582 | out_unregister_template: | ||
583 | target_unregister_template(&ft_fabric_ops); | ||
584 | out: | ||
585 | return ret; | ||
627 | } | 586 | } |
628 | 587 | ||
629 | static void __exit ft_exit(void) | 588 | static void __exit ft_exit(void) |
@@ -632,7 +591,7 @@ static void __exit ft_exit(void) | |||
632 | &ft_notifier); | 591 | &ft_notifier); |
633 | fc_fc4_deregister_provider(FC_TYPE_FCP, &ft_prov); | 592 | fc_fc4_deregister_provider(FC_TYPE_FCP, &ft_prov); |
634 | fc_lport_iterate(ft_lport_del, NULL); | 593 | fc_lport_iterate(ft_lport_del, NULL); |
635 | ft_deregister_configfs(); | 594 | target_unregister_template(&ft_fabric_ops); |
636 | synchronize_rcu(); | 595 | synchronize_rcu(); |
637 | } | 596 | } |
638 | 597 | ||
diff --git a/drivers/usb/gadget/legacy/tcm_usb_gadget.c b/drivers/usb/gadget/legacy/tcm_usb_gadget.c index 6e0a019aad54..8b80addc4ce6 100644 --- a/drivers/usb/gadget/legacy/tcm_usb_gadget.c +++ b/drivers/usb/gadget/legacy/tcm_usb_gadget.c | |||
@@ -29,7 +29,7 @@ | |||
29 | 29 | ||
30 | USB_GADGET_COMPOSITE_OPTIONS(); | 30 | USB_GADGET_COMPOSITE_OPTIONS(); |
31 | 31 | ||
32 | static struct target_fabric_configfs *usbg_fabric_configfs; | 32 | static const struct target_core_fabric_ops usbg_ops; |
33 | 33 | ||
34 | static inline struct f_uas *to_f_uas(struct usb_function *f) | 34 | static inline struct f_uas *to_f_uas(struct usb_function *f) |
35 | { | 35 | { |
@@ -1572,8 +1572,7 @@ static struct se_portal_group *usbg_make_tpg( | |||
1572 | tpg->tport = tport; | 1572 | tpg->tport = tport; |
1573 | tpg->tport_tpgt = tpgt; | 1573 | tpg->tport_tpgt = tpgt; |
1574 | 1574 | ||
1575 | ret = core_tpg_register(&usbg_fabric_configfs->tf_ops, wwn, | 1575 | ret = core_tpg_register(&usbg_ops, wwn, &tpg->se_tpg, tpg, |
1576 | &tpg->se_tpg, tpg, | ||
1577 | TRANSPORT_TPG_TYPE_NORMAL); | 1576 | TRANSPORT_TPG_TYPE_NORMAL); |
1578 | if (ret < 0) { | 1577 | if (ret < 0) { |
1579 | destroy_workqueue(tpg->workqueue); | 1578 | destroy_workqueue(tpg->workqueue); |
@@ -1864,7 +1863,9 @@ static int usbg_check_stop_free(struct se_cmd *se_cmd) | |||
1864 | return 1; | 1863 | return 1; |
1865 | } | 1864 | } |
1866 | 1865 | ||
1867 | static struct target_core_fabric_ops usbg_ops = { | 1866 | static const struct target_core_fabric_ops usbg_ops = { |
1867 | .module = THIS_MODULE, | ||
1868 | .name = "usb_gadget", | ||
1868 | .get_fabric_name = usbg_get_fabric_name, | 1869 | .get_fabric_name = usbg_get_fabric_name, |
1869 | .get_fabric_proto_ident = usbg_get_fabric_proto_ident, | 1870 | .get_fabric_proto_ident = usbg_get_fabric_proto_ident, |
1870 | .tpg_get_wwn = usbg_get_fabric_wwn, | 1871 | .tpg_get_wwn = usbg_get_fabric_wwn, |
@@ -1906,46 +1907,9 @@ static struct target_core_fabric_ops usbg_ops = { | |||
1906 | .fabric_drop_np = NULL, | 1907 | .fabric_drop_np = NULL, |
1907 | .fabric_make_nodeacl = usbg_make_nodeacl, | 1908 | .fabric_make_nodeacl = usbg_make_nodeacl, |
1908 | .fabric_drop_nodeacl = usbg_drop_nodeacl, | 1909 | .fabric_drop_nodeacl = usbg_drop_nodeacl, |
1909 | }; | ||
1910 | |||
1911 | static int usbg_register_configfs(void) | ||
1912 | { | ||
1913 | struct target_fabric_configfs *fabric; | ||
1914 | int ret; | ||
1915 | |||
1916 | fabric = target_fabric_configfs_init(THIS_MODULE, "usb_gadget"); | ||
1917 | if (IS_ERR(fabric)) { | ||
1918 | printk(KERN_ERR "target_fabric_configfs_init() failed\n"); | ||
1919 | return PTR_ERR(fabric); | ||
1920 | } | ||
1921 | |||
1922 | fabric->tf_ops = usbg_ops; | ||
1923 | fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = usbg_wwn_attrs; | ||
1924 | fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = usbg_base_attrs; | ||
1925 | fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; | ||
1926 | fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; | ||
1927 | fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; | ||
1928 | fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; | ||
1929 | fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; | ||
1930 | fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; | ||
1931 | fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; | ||
1932 | ret = target_fabric_configfs_register(fabric); | ||
1933 | if (ret < 0) { | ||
1934 | printk(KERN_ERR "target_fabric_configfs_register() failed" | ||
1935 | " for usb-gadget\n"); | ||
1936 | return ret; | ||
1937 | } | ||
1938 | usbg_fabric_configfs = fabric; | ||
1939 | return 0; | ||
1940 | }; | ||
1941 | 1910 | ||
1942 | static void usbg_deregister_configfs(void) | 1911 | .tfc_wwn_attrs = usbg_wwn_attrs, |
1943 | { | 1912 | .tfc_tpg_base_attrs = usbg_base_attrs, |
1944 | if (!(usbg_fabric_configfs)) | ||
1945 | return; | ||
1946 | |||
1947 | target_fabric_configfs_deregister(usbg_fabric_configfs); | ||
1948 | usbg_fabric_configfs = NULL; | ||
1949 | }; | 1913 | }; |
1950 | 1914 | ||
1951 | /* Start gadget.c code */ | 1915 | /* Start gadget.c code */ |
@@ -2454,16 +2418,13 @@ static void usbg_detach(struct usbg_tpg *tpg) | |||
2454 | 2418 | ||
2455 | static int __init usb_target_gadget_init(void) | 2419 | static int __init usb_target_gadget_init(void) |
2456 | { | 2420 | { |
2457 | int ret; | 2421 | return target_register_template(&usbg_ops); |
2458 | |||
2459 | ret = usbg_register_configfs(); | ||
2460 | return ret; | ||
2461 | } | 2422 | } |
2462 | module_init(usb_target_gadget_init); | 2423 | module_init(usb_target_gadget_init); |
2463 | 2424 | ||
2464 | static void __exit usb_target_gadget_exit(void) | 2425 | static void __exit usb_target_gadget_exit(void) |
2465 | { | 2426 | { |
2466 | usbg_deregister_configfs(); | 2427 | target_unregister_template(&usbg_ops); |
2467 | } | 2428 | } |
2468 | module_exit(usb_target_gadget_exit); | 2429 | module_exit(usb_target_gadget_exit); |
2469 | 2430 | ||
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 71df240a467a..5e19bb53b3a9 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c | |||
@@ -131,6 +131,8 @@ struct vhost_scsi_tpg { | |||
131 | int tv_tpg_port_count; | 131 | int tv_tpg_port_count; |
132 | /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */ | 132 | /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */ |
133 | int tv_tpg_vhost_count; | 133 | int tv_tpg_vhost_count; |
134 | /* Used for enabling T10-PI with legacy devices */ | ||
135 | int tv_fabric_prot_type; | ||
134 | /* list for vhost_scsi_list */ | 136 | /* list for vhost_scsi_list */ |
135 | struct list_head tv_tpg_list; | 137 | struct list_head tv_tpg_list; |
136 | /* Used to protect access for tpg_nexus */ | 138 | /* Used to protect access for tpg_nexus */ |
@@ -214,9 +216,7 @@ struct vhost_scsi { | |||
214 | int vs_events_nr; /* num of pending events, protected by vq->mutex */ | 216 | int vs_events_nr; /* num of pending events, protected by vq->mutex */ |
215 | }; | 217 | }; |
216 | 218 | ||
217 | /* Local pointer to allocated TCM configfs fabric module */ | 219 | static struct target_core_fabric_ops vhost_scsi_ops; |
218 | static struct target_fabric_configfs *vhost_scsi_fabric_configfs; | ||
219 | |||
220 | static struct workqueue_struct *vhost_scsi_workqueue; | 220 | static struct workqueue_struct *vhost_scsi_workqueue; |
221 | 221 | ||
222 | /* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */ | 222 | /* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */ |
@@ -431,6 +431,14 @@ vhost_scsi_parse_pr_out_transport_id(struct se_portal_group *se_tpg, | |||
431 | port_nexus_ptr); | 431 | port_nexus_ptr); |
432 | } | 432 | } |
433 | 433 | ||
434 | static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg) | ||
435 | { | ||
436 | struct vhost_scsi_tpg *tpg = container_of(se_tpg, | ||
437 | struct vhost_scsi_tpg, se_tpg); | ||
438 | |||
439 | return tpg->tv_fabric_prot_type; | ||
440 | } | ||
441 | |||
434 | static struct se_node_acl * | 442 | static struct se_node_acl * |
435 | vhost_scsi_alloc_fabric_acl(struct se_portal_group *se_tpg) | 443 | vhost_scsi_alloc_fabric_acl(struct se_portal_group *se_tpg) |
436 | { | 444 | { |
@@ -1878,6 +1886,45 @@ static void vhost_scsi_free_cmd_map_res(struct vhost_scsi_nexus *nexus, | |||
1878 | } | 1886 | } |
1879 | } | 1887 | } |
1880 | 1888 | ||
1889 | static ssize_t vhost_scsi_tpg_attrib_store_fabric_prot_type( | ||
1890 | struct se_portal_group *se_tpg, | ||
1891 | const char *page, | ||
1892 | size_t count) | ||
1893 | { | ||
1894 | struct vhost_scsi_tpg *tpg = container_of(se_tpg, | ||
1895 | struct vhost_scsi_tpg, se_tpg); | ||
1896 | unsigned long val; | ||
1897 | int ret = kstrtoul(page, 0, &val); | ||
1898 | |||
1899 | if (ret) { | ||
1900 | pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret); | ||
1901 | return ret; | ||
1902 | } | ||
1903 | if (val != 0 && val != 1 && val != 3) { | ||
1904 | pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val); | ||
1905 | return -EINVAL; | ||
1906 | } | ||
1907 | tpg->tv_fabric_prot_type = val; | ||
1908 | |||
1909 | return count; | ||
1910 | } | ||
1911 | |||
1912 | static ssize_t vhost_scsi_tpg_attrib_show_fabric_prot_type( | ||
1913 | struct se_portal_group *se_tpg, | ||
1914 | char *page) | ||
1915 | { | ||
1916 | struct vhost_scsi_tpg *tpg = container_of(se_tpg, | ||
1917 | struct vhost_scsi_tpg, se_tpg); | ||
1918 | |||
1919 | return sprintf(page, "%d\n", tpg->tv_fabric_prot_type); | ||
1920 | } | ||
1921 | TF_TPG_ATTRIB_ATTR(vhost_scsi, fabric_prot_type, S_IRUGO | S_IWUSR); | ||
1922 | |||
1923 | static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = { | ||
1924 | &vhost_scsi_tpg_attrib_fabric_prot_type.attr, | ||
1925 | NULL, | ||
1926 | }; | ||
1927 | |||
1881 | static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg, | 1928 | static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg, |
1882 | const char *name) | 1929 | const char *name) |
1883 | { | 1930 | { |
@@ -2155,7 +2202,7 @@ vhost_scsi_make_tpg(struct se_wwn *wwn, | |||
2155 | tpg->tport = tport; | 2202 | tpg->tport = tport; |
2156 | tpg->tport_tpgt = tpgt; | 2203 | tpg->tport_tpgt = tpgt; |
2157 | 2204 | ||
2158 | ret = core_tpg_register(&vhost_scsi_fabric_configfs->tf_ops, wwn, | 2205 | ret = core_tpg_register(&vhost_scsi_ops, wwn, |
2159 | &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); | 2206 | &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); |
2160 | if (ret < 0) { | 2207 | if (ret < 0) { |
2161 | kfree(tpg); | 2208 | kfree(tpg); |
@@ -2277,6 +2324,8 @@ static struct configfs_attribute *vhost_scsi_wwn_attrs[] = { | |||
2277 | }; | 2324 | }; |
2278 | 2325 | ||
2279 | static struct target_core_fabric_ops vhost_scsi_ops = { | 2326 | static struct target_core_fabric_ops vhost_scsi_ops = { |
2327 | .module = THIS_MODULE, | ||
2328 | .name = "vhost", | ||
2280 | .get_fabric_name = vhost_scsi_get_fabric_name, | 2329 | .get_fabric_name = vhost_scsi_get_fabric_name, |
2281 | .get_fabric_proto_ident = vhost_scsi_get_fabric_proto_ident, | 2330 | .get_fabric_proto_ident = vhost_scsi_get_fabric_proto_ident, |
2282 | .tpg_get_wwn = vhost_scsi_get_fabric_wwn, | 2331 | .tpg_get_wwn = vhost_scsi_get_fabric_wwn, |
@@ -2289,6 +2338,7 @@ static struct target_core_fabric_ops vhost_scsi_ops = { | |||
2289 | .tpg_check_demo_mode_cache = vhost_scsi_check_true, | 2338 | .tpg_check_demo_mode_cache = vhost_scsi_check_true, |
2290 | .tpg_check_demo_mode_write_protect = vhost_scsi_check_false, | 2339 | .tpg_check_demo_mode_write_protect = vhost_scsi_check_false, |
2291 | .tpg_check_prod_mode_write_protect = vhost_scsi_check_false, | 2340 | .tpg_check_prod_mode_write_protect = vhost_scsi_check_false, |
2341 | .tpg_check_prot_fabric_only = vhost_scsi_check_prot_fabric_only, | ||
2292 | .tpg_alloc_fabric_acl = vhost_scsi_alloc_fabric_acl, | 2342 | .tpg_alloc_fabric_acl = vhost_scsi_alloc_fabric_acl, |
2293 | .tpg_release_fabric_acl = vhost_scsi_release_fabric_acl, | 2343 | .tpg_release_fabric_acl = vhost_scsi_release_fabric_acl, |
2294 | .tpg_get_inst_index = vhost_scsi_tpg_get_inst_index, | 2344 | .tpg_get_inst_index = vhost_scsi_tpg_get_inst_index, |
@@ -2320,70 +2370,20 @@ static struct target_core_fabric_ops vhost_scsi_ops = { | |||
2320 | .fabric_drop_np = NULL, | 2370 | .fabric_drop_np = NULL, |
2321 | .fabric_make_nodeacl = vhost_scsi_make_nodeacl, | 2371 | .fabric_make_nodeacl = vhost_scsi_make_nodeacl, |
2322 | .fabric_drop_nodeacl = vhost_scsi_drop_nodeacl, | 2372 | .fabric_drop_nodeacl = vhost_scsi_drop_nodeacl, |
2373 | |||
2374 | .tfc_wwn_attrs = vhost_scsi_wwn_attrs, | ||
2375 | .tfc_tpg_base_attrs = vhost_scsi_tpg_attrs, | ||
2376 | .tfc_tpg_attrib_attrs = vhost_scsi_tpg_attrib_attrs, | ||
2323 | }; | 2377 | }; |
2324 | 2378 | ||
2325 | static int vhost_scsi_register_configfs(void) | 2379 | static int __init vhost_scsi_init(void) |
2326 | { | 2380 | { |
2327 | struct target_fabric_configfs *fabric; | 2381 | int ret = -ENOMEM; |
2328 | int ret; | ||
2329 | 2382 | ||
2330 | pr_debug("vhost-scsi fabric module %s on %s/%s" | 2383 | pr_debug("TCM_VHOST fabric module %s on %s/%s" |
2331 | " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname, | 2384 | " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname, |
2332 | utsname()->machine); | 2385 | utsname()->machine); |
2333 | /* | ||
2334 | * Register the top level struct config_item_type with TCM core | ||
2335 | */ | ||
2336 | fabric = target_fabric_configfs_init(THIS_MODULE, "vhost"); | ||
2337 | if (IS_ERR(fabric)) { | ||
2338 | pr_err("target_fabric_configfs_init() failed\n"); | ||
2339 | return PTR_ERR(fabric); | ||
2340 | } | ||
2341 | /* | ||
2342 | * Setup fabric->tf_ops from our local vhost_scsi_ops | ||
2343 | */ | ||
2344 | fabric->tf_ops = vhost_scsi_ops; | ||
2345 | /* | ||
2346 | * Setup default attribute lists for various fabric->tf_cit_tmpl | ||
2347 | */ | ||
2348 | fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = vhost_scsi_wwn_attrs; | ||
2349 | fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = vhost_scsi_tpg_attrs; | ||
2350 | fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; | ||
2351 | fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; | ||
2352 | fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; | ||
2353 | fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; | ||
2354 | fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; | ||
2355 | fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; | ||
2356 | fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; | ||
2357 | /* | ||
2358 | * Register the fabric for use within TCM | ||
2359 | */ | ||
2360 | ret = target_fabric_configfs_register(fabric); | ||
2361 | if (ret < 0) { | ||
2362 | pr_err("target_fabric_configfs_register() failed" | ||
2363 | " for TCM_VHOST\n"); | ||
2364 | return ret; | ||
2365 | } | ||
2366 | /* | ||
2367 | * Setup our local pointer to *fabric | ||
2368 | */ | ||
2369 | vhost_scsi_fabric_configfs = fabric; | ||
2370 | pr_debug("TCM_VHOST[0] - Set fabric -> vhost_scsi_fabric_configfs\n"); | ||
2371 | return 0; | ||
2372 | }; | ||
2373 | |||
2374 | static void vhost_scsi_deregister_configfs(void) | ||
2375 | { | ||
2376 | if (!vhost_scsi_fabric_configfs) | ||
2377 | return; | ||
2378 | |||
2379 | target_fabric_configfs_deregister(vhost_scsi_fabric_configfs); | ||
2380 | vhost_scsi_fabric_configfs = NULL; | ||
2381 | pr_debug("TCM_VHOST[0] - Cleared vhost_scsi_fabric_configfs\n"); | ||
2382 | }; | ||
2383 | 2386 | ||
2384 | static int __init vhost_scsi_init(void) | ||
2385 | { | ||
2386 | int ret = -ENOMEM; | ||
2387 | /* | 2387 | /* |
2388 | * Use our own dedicated workqueue for submitting I/O into | 2388 | * Use our own dedicated workqueue for submitting I/O into |
2389 | * target core to avoid contention within system_wq. | 2389 | * target core to avoid contention within system_wq. |
@@ -2396,7 +2396,7 @@ static int __init vhost_scsi_init(void) | |||
2396 | if (ret < 0) | 2396 | if (ret < 0) |
2397 | goto out_destroy_workqueue; | 2397 | goto out_destroy_workqueue; |
2398 | 2398 | ||
2399 | ret = vhost_scsi_register_configfs(); | 2399 | ret = target_register_template(&vhost_scsi_ops); |
2400 | if (ret < 0) | 2400 | if (ret < 0) |
2401 | goto out_vhost_scsi_deregister; | 2401 | goto out_vhost_scsi_deregister; |
2402 | 2402 | ||
@@ -2412,7 +2412,7 @@ out: | |||
2412 | 2412 | ||
2413 | static void vhost_scsi_exit(void) | 2413 | static void vhost_scsi_exit(void) |
2414 | { | 2414 | { |
2415 | vhost_scsi_deregister_configfs(); | 2415 | target_unregister_template(&vhost_scsi_ops); |
2416 | vhost_scsi_deregister(); | 2416 | vhost_scsi_deregister(); |
2417 | destroy_workqueue(vhost_scsi_workqueue); | 2417 | destroy_workqueue(vhost_scsi_workqueue); |
2418 | }; | 2418 | }; |
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c index 07ef38325223..b7f51504f85a 100644 --- a/drivers/xen/xen-scsiback.c +++ b/drivers/xen/xen-scsiback.c | |||
@@ -204,8 +204,7 @@ static LIST_HEAD(scsiback_free_pages); | |||
204 | static DEFINE_MUTEX(scsiback_mutex); | 204 | static DEFINE_MUTEX(scsiback_mutex); |
205 | static LIST_HEAD(scsiback_list); | 205 | static LIST_HEAD(scsiback_list); |
206 | 206 | ||
207 | /* Local pointer to allocated TCM configfs fabric module */ | 207 | static const struct target_core_fabric_ops scsiback_ops; |
208 | static struct target_fabric_configfs *scsiback_fabric_configfs; | ||
209 | 208 | ||
210 | static void scsiback_get(struct vscsibk_info *info) | 209 | static void scsiback_get(struct vscsibk_info *info) |
211 | { | 210 | { |
@@ -1902,7 +1901,7 @@ scsiback_make_tpg(struct se_wwn *wwn, | |||
1902 | tpg->tport = tport; | 1901 | tpg->tport = tport; |
1903 | tpg->tport_tpgt = tpgt; | 1902 | tpg->tport_tpgt = tpgt; |
1904 | 1903 | ||
1905 | ret = core_tpg_register(&scsiback_fabric_configfs->tf_ops, wwn, | 1904 | ret = core_tpg_register(&scsiback_ops, wwn, |
1906 | &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); | 1905 | &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); |
1907 | if (ret < 0) { | 1906 | if (ret < 0) { |
1908 | kfree(tpg); | 1907 | kfree(tpg); |
@@ -1944,7 +1943,9 @@ static int scsiback_check_false(struct se_portal_group *se_tpg) | |||
1944 | return 0; | 1943 | return 0; |
1945 | } | 1944 | } |
1946 | 1945 | ||
1947 | static struct target_core_fabric_ops scsiback_ops = { | 1946 | static const struct target_core_fabric_ops scsiback_ops = { |
1947 | .module = THIS_MODULE, | ||
1948 | .name = "xen-pvscsi", | ||
1948 | .get_fabric_name = scsiback_get_fabric_name, | 1949 | .get_fabric_name = scsiback_get_fabric_name, |
1949 | .get_fabric_proto_ident = scsiback_get_fabric_proto_ident, | 1950 | .get_fabric_proto_ident = scsiback_get_fabric_proto_ident, |
1950 | .tpg_get_wwn = scsiback_get_fabric_wwn, | 1951 | .tpg_get_wwn = scsiback_get_fabric_wwn, |
@@ -1991,62 +1992,10 @@ static struct target_core_fabric_ops scsiback_ops = { | |||
1991 | .fabric_make_nodeacl = scsiback_make_nodeacl, | 1992 | .fabric_make_nodeacl = scsiback_make_nodeacl, |
1992 | .fabric_drop_nodeacl = scsiback_drop_nodeacl, | 1993 | .fabric_drop_nodeacl = scsiback_drop_nodeacl, |
1993 | #endif | 1994 | #endif |
1994 | }; | ||
1995 | |||
1996 | static int scsiback_register_configfs(void) | ||
1997 | { | ||
1998 | struct target_fabric_configfs *fabric; | ||
1999 | int ret; | ||
2000 | |||
2001 | pr_debug("fabric module %s on %s/%s on "UTS_RELEASE"\n", | ||
2002 | VSCSI_VERSION, utsname()->sysname, utsname()->machine); | ||
2003 | /* | ||
2004 | * Register the top level struct config_item_type with TCM core | ||
2005 | */ | ||
2006 | fabric = target_fabric_configfs_init(THIS_MODULE, "xen-pvscsi"); | ||
2007 | if (IS_ERR(fabric)) | ||
2008 | return PTR_ERR(fabric); | ||
2009 | 1995 | ||
2010 | /* | 1996 | .tfc_wwn_attrs = scsiback_wwn_attrs, |
2011 | * Setup fabric->tf_ops from our local scsiback_ops | 1997 | .tfc_tpg_base_attrs = scsiback_tpg_attrs, |
2012 | */ | 1998 | .tfc_tpg_param_attrs = scsiback_param_attrs, |
2013 | fabric->tf_ops = scsiback_ops; | ||
2014 | /* | ||
2015 | * Setup default attribute lists for various fabric->tf_cit_tmpl | ||
2016 | */ | ||
2017 | fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = scsiback_wwn_attrs; | ||
2018 | fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = scsiback_tpg_attrs; | ||
2019 | fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; | ||
2020 | fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = scsiback_param_attrs; | ||
2021 | fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; | ||
2022 | fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; | ||
2023 | fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; | ||
2024 | fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; | ||
2025 | fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; | ||
2026 | /* | ||
2027 | * Register the fabric for use within TCM | ||
2028 | */ | ||
2029 | ret = target_fabric_configfs_register(fabric); | ||
2030 | if (ret < 0) { | ||
2031 | target_fabric_configfs_free(fabric); | ||
2032 | return ret; | ||
2033 | } | ||
2034 | /* | ||
2035 | * Setup our local pointer to *fabric | ||
2036 | */ | ||
2037 | scsiback_fabric_configfs = fabric; | ||
2038 | pr_debug("Set fabric -> scsiback_fabric_configfs\n"); | ||
2039 | return 0; | ||
2040 | }; | ||
2041 | |||
2042 | static void scsiback_deregister_configfs(void) | ||
2043 | { | ||
2044 | if (!scsiback_fabric_configfs) | ||
2045 | return; | ||
2046 | |||
2047 | target_fabric_configfs_deregister(scsiback_fabric_configfs); | ||
2048 | scsiback_fabric_configfs = NULL; | ||
2049 | pr_debug("Cleared scsiback_fabric_configfs\n"); | ||
2050 | }; | 1999 | }; |
2051 | 2000 | ||
2052 | static const struct xenbus_device_id scsiback_ids[] = { | 2001 | static const struct xenbus_device_id scsiback_ids[] = { |
@@ -2078,6 +2027,9 @@ static int __init scsiback_init(void) | |||
2078 | if (!xen_domain()) | 2027 | if (!xen_domain()) |
2079 | return -ENODEV; | 2028 | return -ENODEV; |
2080 | 2029 | ||
2030 | pr_debug("xen-pvscsi: fabric module %s on %s/%s on "UTS_RELEASE"\n", | ||
2031 | VSCSI_VERSION, utsname()->sysname, utsname()->machine); | ||
2032 | |||
2081 | scsiback_cachep = kmem_cache_create("vscsiif_cache", | 2033 | scsiback_cachep = kmem_cache_create("vscsiif_cache", |
2082 | sizeof(struct vscsibk_pend), 0, 0, scsiback_init_pend); | 2034 | sizeof(struct vscsibk_pend), 0, 0, scsiback_init_pend); |
2083 | if (!scsiback_cachep) | 2035 | if (!scsiback_cachep) |
@@ -2087,7 +2039,7 @@ static int __init scsiback_init(void) | |||
2087 | if (ret) | 2039 | if (ret) |
2088 | goto out_cache_destroy; | 2040 | goto out_cache_destroy; |
2089 | 2041 | ||
2090 | ret = scsiback_register_configfs(); | 2042 | ret = target_register_template(&scsiback_ops); |
2091 | if (ret) | 2043 | if (ret) |
2092 | goto out_unregister_xenbus; | 2044 | goto out_unregister_xenbus; |
2093 | 2045 | ||
@@ -2110,7 +2062,7 @@ static void __exit scsiback_exit(void) | |||
2110 | BUG(); | 2062 | BUG(); |
2111 | gnttab_free_pages(1, &page); | 2063 | gnttab_free_pages(1, &page); |
2112 | } | 2064 | } |
2113 | scsiback_deregister_configfs(); | 2065 | target_unregister_template(&scsiback_ops); |
2114 | xenbus_unregister_driver(&scsiback_driver); | 2066 | xenbus_unregister_driver(&scsiback_driver); |
2115 | kmem_cache_destroy(scsiback_cachep); | 2067 | kmem_cache_destroy(scsiback_cachep); |
2116 | } | 2068 | } |
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h index d3583d3ee193..54e7af301888 100644 --- a/include/target/iscsi/iscsi_target_core.h +++ b/include/target/iscsi/iscsi_target_core.h | |||
@@ -20,6 +20,8 @@ | |||
20 | #define ISCSIT_MIN_TAGS 16 | 20 | #define ISCSIT_MIN_TAGS 16 |
21 | #define ISCSIT_EXTRA_TAGS 8 | 21 | #define ISCSIT_EXTRA_TAGS 8 |
22 | #define ISCSIT_TCP_BACKLOG 256 | 22 | #define ISCSIT_TCP_BACKLOG 256 |
23 | #define ISCSI_RX_THREAD_NAME "iscsi_trx" | ||
24 | #define ISCSI_TX_THREAD_NAME "iscsi_ttx" | ||
23 | 25 | ||
24 | /* struct iscsi_node_attrib sanity values */ | 26 | /* struct iscsi_node_attrib sanity values */ |
25 | #define NA_DATAOUT_TIMEOUT 3 | 27 | #define NA_DATAOUT_TIMEOUT 3 |
@@ -60,6 +62,7 @@ | |||
60 | #define TA_CACHE_CORE_NPS 0 | 62 | #define TA_CACHE_CORE_NPS 0 |
61 | /* T10 protection information disabled by default */ | 63 | /* T10 protection information disabled by default */ |
62 | #define TA_DEFAULT_T10_PI 0 | 64 | #define TA_DEFAULT_T10_PI 0 |
65 | #define TA_DEFAULT_FABRIC_PROT_TYPE 0 | ||
63 | 66 | ||
64 | #define ISCSI_IOV_DATA_BUFFER 5 | 67 | #define ISCSI_IOV_DATA_BUFFER 5 |
65 | 68 | ||
@@ -600,8 +603,11 @@ struct iscsi_conn { | |||
600 | struct iscsi_tpg_np *tpg_np; | 603 | struct iscsi_tpg_np *tpg_np; |
601 | /* Pointer to parent session */ | 604 | /* Pointer to parent session */ |
602 | struct iscsi_session *sess; | 605 | struct iscsi_session *sess; |
603 | /* Pointer to thread_set in use for this conn's threads */ | 606 | int bitmap_id; |
604 | struct iscsi_thread_set *thread_set; | 607 | int rx_thread_active; |
608 | struct task_struct *rx_thread; | ||
609 | int tx_thread_active; | ||
610 | struct task_struct *tx_thread; | ||
605 | /* list_head for session connection list */ | 611 | /* list_head for session connection list */ |
606 | struct list_head conn_list; | 612 | struct list_head conn_list; |
607 | } ____cacheline_aligned; | 613 | } ____cacheline_aligned; |
@@ -767,6 +773,7 @@ struct iscsi_tpg_attrib { | |||
767 | u32 demo_mode_discovery; | 773 | u32 demo_mode_discovery; |
768 | u32 default_erl; | 774 | u32 default_erl; |
769 | u8 t10_pi; | 775 | u8 t10_pi; |
776 | u32 fabric_prot_type; | ||
770 | struct iscsi_portal_group *tpg; | 777 | struct iscsi_portal_group *tpg; |
771 | }; | 778 | }; |
772 | 779 | ||
@@ -871,10 +878,10 @@ struct iscsit_global { | |||
871 | /* Unique identifier used for the authentication daemon */ | 878 | /* Unique identifier used for the authentication daemon */ |
872 | u32 auth_id; | 879 | u32 auth_id; |
873 | u32 inactive_ts; | 880 | u32 inactive_ts; |
874 | /* Thread Set bitmap count */ | 881 | #define ISCSIT_BITMAP_BITS 262144 |
875 | int ts_bitmap_count; | ||
876 | /* Thread Set bitmap pointer */ | 882 | /* Thread Set bitmap pointer */ |
877 | unsigned long *ts_bitmap; | 883 | unsigned long *ts_bitmap; |
884 | spinlock_t ts_bitmap_lock; | ||
878 | /* Used for iSCSI discovery session authentication */ | 885 | /* Used for iSCSI discovery session authentication */ |
879 | struct iscsi_node_acl discovery_acl; | 886 | struct iscsi_node_acl discovery_acl; |
880 | struct iscsi_portal_group *discovery_tpg; | 887 | struct iscsi_portal_group *discovery_tpg; |
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 672150b6aaf5..480e9f82dfea 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h | |||
@@ -165,10 +165,8 @@ enum se_cmd_flags_table { | |||
165 | SCF_SEND_DELAYED_TAS = 0x00004000, | 165 | SCF_SEND_DELAYED_TAS = 0x00004000, |
166 | SCF_ALUA_NON_OPTIMIZED = 0x00008000, | 166 | SCF_ALUA_NON_OPTIMIZED = 0x00008000, |
167 | SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000, | 167 | SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000, |
168 | SCF_ACK_KREF = 0x00040000, | ||
169 | SCF_COMPARE_AND_WRITE = 0x00080000, | 168 | SCF_COMPARE_AND_WRITE = 0x00080000, |
170 | SCF_COMPARE_AND_WRITE_POST = 0x00100000, | 169 | SCF_COMPARE_AND_WRITE_POST = 0x00100000, |
171 | SCF_CMD_XCOPY_PASSTHROUGH = 0x00200000, | ||
172 | }; | 170 | }; |
173 | 171 | ||
174 | /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */ | 172 | /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */ |
@@ -520,11 +518,11 @@ struct se_cmd { | |||
520 | struct list_head se_cmd_list; | 518 | struct list_head se_cmd_list; |
521 | struct completion cmd_wait_comp; | 519 | struct completion cmd_wait_comp; |
522 | struct kref cmd_kref; | 520 | struct kref cmd_kref; |
523 | struct target_core_fabric_ops *se_tfo; | 521 | const struct target_core_fabric_ops *se_tfo; |
524 | sense_reason_t (*execute_cmd)(struct se_cmd *); | 522 | sense_reason_t (*execute_cmd)(struct se_cmd *); |
525 | sense_reason_t (*execute_rw)(struct se_cmd *, struct scatterlist *, | 523 | sense_reason_t (*execute_rw)(struct se_cmd *, struct scatterlist *, |
526 | u32, enum dma_data_direction); | 524 | u32, enum dma_data_direction); |
527 | sense_reason_t (*transport_complete_callback)(struct se_cmd *); | 525 | sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool); |
528 | 526 | ||
529 | unsigned char *t_task_cdb; | 527 | unsigned char *t_task_cdb; |
530 | unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE]; | 528 | unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE]; |
@@ -591,6 +589,7 @@ struct se_node_acl { | |||
591 | bool acl_stop:1; | 589 | bool acl_stop:1; |
592 | u32 queue_depth; | 590 | u32 queue_depth; |
593 | u32 acl_index; | 591 | u32 acl_index; |
592 | enum target_prot_type saved_prot_type; | ||
594 | #define MAX_ACL_TAG_SIZE 64 | 593 | #define MAX_ACL_TAG_SIZE 64 |
595 | char acl_tag[MAX_ACL_TAG_SIZE]; | 594 | char acl_tag[MAX_ACL_TAG_SIZE]; |
596 | /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ | 595 | /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ |
@@ -616,6 +615,7 @@ struct se_session { | |||
616 | unsigned sess_tearing_down:1; | 615 | unsigned sess_tearing_down:1; |
617 | u64 sess_bin_isid; | 616 | u64 sess_bin_isid; |
618 | enum target_prot_op sup_prot_ops; | 617 | enum target_prot_op sup_prot_ops; |
618 | enum target_prot_type sess_prot_type; | ||
619 | struct se_node_acl *se_node_acl; | 619 | struct se_node_acl *se_node_acl; |
620 | struct se_portal_group *se_tpg; | 620 | struct se_portal_group *se_tpg; |
621 | void *fabric_sess_ptr; | 621 | void *fabric_sess_ptr; |
@@ -890,7 +890,7 @@ struct se_portal_group { | |||
890 | /* List of TCM sessions associated wth this TPG */ | 890 | /* List of TCM sessions associated wth this TPG */ |
891 | struct list_head tpg_sess_list; | 891 | struct list_head tpg_sess_list; |
892 | /* Pointer to $FABRIC_MOD dependent code */ | 892 | /* Pointer to $FABRIC_MOD dependent code */ |
893 | struct target_core_fabric_ops *se_tpg_tfo; | 893 | const struct target_core_fabric_ops *se_tpg_tfo; |
894 | struct se_wwn *se_tpg_wwn; | 894 | struct se_wwn *se_tpg_wwn; |
895 | struct config_group tpg_group; | 895 | struct config_group tpg_group; |
896 | struct config_group *tpg_default_groups[7]; | 896 | struct config_group *tpg_default_groups[7]; |
diff --git a/include/target/target_core_configfs.h b/include/target/target_core_configfs.h index e0801386e4dc..25bb04c4209e 100644 --- a/include/target/target_core_configfs.h +++ b/include/target/target_core_configfs.h | |||
@@ -5,12 +5,6 @@ | |||
5 | #define TARGET_CORE_NAME_MAX_LEN 64 | 5 | #define TARGET_CORE_NAME_MAX_LEN 64 |
6 | #define TARGET_FABRIC_NAME_SIZE 32 | 6 | #define TARGET_FABRIC_NAME_SIZE 32 |
7 | 7 | ||
8 | extern struct target_fabric_configfs *target_fabric_configfs_init( | ||
9 | struct module *, const char *); | ||
10 | extern void target_fabric_configfs_free(struct target_fabric_configfs *); | ||
11 | extern int target_fabric_configfs_register(struct target_fabric_configfs *); | ||
12 | extern void target_fabric_configfs_deregister(struct target_fabric_configfs *); | ||
13 | |||
14 | struct target_fabric_configfs_template { | 8 | struct target_fabric_configfs_template { |
15 | struct config_item_type tfc_discovery_cit; | 9 | struct config_item_type tfc_discovery_cit; |
16 | struct config_item_type tfc_wwn_cit; | 10 | struct config_item_type tfc_wwn_cit; |
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h index 22a4e98eec80..17c7f5ac7ea0 100644 --- a/include/target/target_core_fabric.h +++ b/include/target/target_core_fabric.h | |||
@@ -2,6 +2,8 @@ | |||
2 | #define TARGET_CORE_FABRIC_H | 2 | #define TARGET_CORE_FABRIC_H |
3 | 3 | ||
4 | struct target_core_fabric_ops { | 4 | struct target_core_fabric_ops { |
5 | struct module *module; | ||
6 | const char *name; | ||
5 | struct configfs_subsystem *tf_subsys; | 7 | struct configfs_subsystem *tf_subsys; |
6 | char *(*get_fabric_name)(void); | 8 | char *(*get_fabric_name)(void); |
7 | u8 (*get_fabric_proto_ident)(struct se_portal_group *); | 9 | u8 (*get_fabric_proto_ident)(struct se_portal_group *); |
@@ -27,6 +29,14 @@ struct target_core_fabric_ops { | |||
27 | * inquiry response | 29 | * inquiry response |
28 | */ | 30 | */ |
29 | int (*tpg_check_demo_mode_login_only)(struct se_portal_group *); | 31 | int (*tpg_check_demo_mode_login_only)(struct se_portal_group *); |
32 | /* | ||
33 | * Optionally used as a configfs tunable to determine when | ||
34 | * target-core should signal the PROTECT=1 feature bit for | ||
35 | * backends that don't support T10-PI, so that either fabric | ||
36 | * HW offload or target-core emulation performs the associated | ||
37 | * WRITE_STRIP and READ_INSERT operations. | ||
38 | */ | ||
39 | int (*tpg_check_prot_fabric_only)(struct se_portal_group *); | ||
30 | struct se_node_acl *(*tpg_alloc_fabric_acl)( | 40 | struct se_node_acl *(*tpg_alloc_fabric_acl)( |
31 | struct se_portal_group *); | 41 | struct se_portal_group *); |
32 | void (*tpg_release_fabric_acl)(struct se_portal_group *, | 42 | void (*tpg_release_fabric_acl)(struct se_portal_group *, |
@@ -82,8 +92,23 @@ struct target_core_fabric_ops { | |||
82 | struct se_node_acl *(*fabric_make_nodeacl)(struct se_portal_group *, | 92 | struct se_node_acl *(*fabric_make_nodeacl)(struct se_portal_group *, |
83 | struct config_group *, const char *); | 93 | struct config_group *, const char *); |
84 | void (*fabric_drop_nodeacl)(struct se_node_acl *); | 94 | void (*fabric_drop_nodeacl)(struct se_node_acl *); |
95 | |||
96 | struct configfs_attribute **tfc_discovery_attrs; | ||
97 | struct configfs_attribute **tfc_wwn_attrs; | ||
98 | struct configfs_attribute **tfc_tpg_base_attrs; | ||
99 | struct configfs_attribute **tfc_tpg_np_base_attrs; | ||
100 | struct configfs_attribute **tfc_tpg_attrib_attrs; | ||
101 | struct configfs_attribute **tfc_tpg_auth_attrs; | ||
102 | struct configfs_attribute **tfc_tpg_param_attrs; | ||
103 | struct configfs_attribute **tfc_tpg_nacl_base_attrs; | ||
104 | struct configfs_attribute **tfc_tpg_nacl_attrib_attrs; | ||
105 | struct configfs_attribute **tfc_tpg_nacl_auth_attrs; | ||
106 | struct configfs_attribute **tfc_tpg_nacl_param_attrs; | ||
85 | }; | 107 | }; |
86 | 108 | ||
109 | int target_register_template(const struct target_core_fabric_ops *fo); | ||
110 | void target_unregister_template(const struct target_core_fabric_ops *fo); | ||
111 | |||
87 | struct se_session *transport_init_session(enum target_prot_op); | 112 | struct se_session *transport_init_session(enum target_prot_op); |
88 | int transport_alloc_session_tags(struct se_session *, unsigned int, | 113 | int transport_alloc_session_tags(struct se_session *, unsigned int, |
89 | unsigned int); | 114 | unsigned int); |
@@ -95,13 +120,15 @@ void transport_register_session(struct se_portal_group *, | |||
95 | struct se_node_acl *, struct se_session *, void *); | 120 | struct se_node_acl *, struct se_session *, void *); |
96 | void target_get_session(struct se_session *); | 121 | void target_get_session(struct se_session *); |
97 | void target_put_session(struct se_session *); | 122 | void target_put_session(struct se_session *); |
123 | ssize_t target_show_dynamic_sessions(struct se_portal_group *, char *); | ||
98 | void transport_free_session(struct se_session *); | 124 | void transport_free_session(struct se_session *); |
99 | void target_put_nacl(struct se_node_acl *); | 125 | void target_put_nacl(struct se_node_acl *); |
100 | void transport_deregister_session_configfs(struct se_session *); | 126 | void transport_deregister_session_configfs(struct se_session *); |
101 | void transport_deregister_session(struct se_session *); | 127 | void transport_deregister_session(struct se_session *); |
102 | 128 | ||
103 | 129 | ||
104 | void transport_init_se_cmd(struct se_cmd *, struct target_core_fabric_ops *, | 130 | void transport_init_se_cmd(struct se_cmd *, |
131 | const struct target_core_fabric_ops *, | ||
105 | struct se_session *, u32, int, int, unsigned char *); | 132 | struct se_session *, u32, int, int, unsigned char *); |
106 | sense_reason_t transport_lookup_cmd_lun(struct se_cmd *, u32); | 133 | sense_reason_t transport_lookup_cmd_lun(struct se_cmd *, u32); |
107 | sense_reason_t target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *); | 134 | sense_reason_t target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *); |
@@ -153,8 +180,8 @@ int core_tpg_set_initiator_node_queue_depth(struct se_portal_group *, | |||
153 | unsigned char *, u32, int); | 180 | unsigned char *, u32, int); |
154 | int core_tpg_set_initiator_node_tag(struct se_portal_group *, | 181 | int core_tpg_set_initiator_node_tag(struct se_portal_group *, |
155 | struct se_node_acl *, const char *); | 182 | struct se_node_acl *, const char *); |
156 | int core_tpg_register(struct target_core_fabric_ops *, struct se_wwn *, | 183 | int core_tpg_register(const struct target_core_fabric_ops *, |
157 | struct se_portal_group *, void *, int); | 184 | struct se_wwn *, struct se_portal_group *, void *, int); |
158 | int core_tpg_deregister(struct se_portal_group *); | 185 | int core_tpg_deregister(struct se_portal_group *); |
159 | 186 | ||
160 | /* SAS helpers */ | 187 | /* SAS helpers */ |
diff --git a/include/target/target_core_fabric_configfs.h b/include/target/target_core_fabric_configfs.h index b32a14905cfa..7a0649c09e79 100644 --- a/include/target/target_core_fabric_configfs.h +++ b/include/target/target_core_fabric_configfs.h | |||
@@ -90,6 +90,11 @@ static struct target_fabric_tpg_attribute _fabric##_tpg_##_name = \ | |||
90 | _fabric##_tpg_store_##_name); | 90 | _fabric##_tpg_store_##_name); |
91 | 91 | ||
92 | 92 | ||
93 | #define TF_TPG_BASE_ATTR_RO(_fabric, _name) \ | ||
94 | static struct target_fabric_tpg_attribute _fabric##_tpg_##_name = \ | ||
95 | __CONFIGFS_EATTR_RO(_name, \ | ||
96 | _fabric##_tpg_show_##_name); | ||
97 | |||
93 | CONFIGFS_EATTR_STRUCT(target_fabric_wwn, target_fabric_configfs); | 98 | CONFIGFS_EATTR_STRUCT(target_fabric_wwn, target_fabric_configfs); |
94 | #define TF_WWN_ATTR(_fabric, _name, _mode) \ | 99 | #define TF_WWN_ATTR(_fabric, _name, _mode) \ |
95 | static struct target_fabric_wwn_attribute _fabric##_wwn_##_name = \ | 100 | static struct target_fabric_wwn_attribute _fabric##_wwn_##_name = \ |
diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h index b483d1909d3e..b67f99d3c520 100644 --- a/include/uapi/linux/target_core_user.h +++ b/include/uapi/linux/target_core_user.h | |||
@@ -6,7 +6,7 @@ | |||
6 | #include <linux/types.h> | 6 | #include <linux/types.h> |
7 | #include <linux/uio.h> | 7 | #include <linux/uio.h> |
8 | 8 | ||
9 | #define TCMU_VERSION "1.0" | 9 | #define TCMU_VERSION "2.0" |
10 | 10 | ||
11 | /* | 11 | /* |
12 | * Ring Design | 12 | * Ring Design |
@@ -39,9 +39,13 @@ | |||
39 | * should process the next packet the same way, and so on. | 39 | * should process the next packet the same way, and so on. |
40 | */ | 40 | */ |
41 | 41 | ||
42 | #define TCMU_MAILBOX_VERSION 1 | 42 | #define TCMU_MAILBOX_VERSION 2 |
43 | #define ALIGN_SIZE 64 /* Should be enough for most CPUs */ | 43 | #define ALIGN_SIZE 64 /* Should be enough for most CPUs */ |
44 | 44 | ||
45 | /* See https://gcc.gnu.org/onlinedocs/cpp/Stringification.html */ | ||
46 | #define xstr(s) str(s) | ||
47 | #define str(s) #s | ||
48 | |||
45 | struct tcmu_mailbox { | 49 | struct tcmu_mailbox { |
46 | __u16 version; | 50 | __u16 version; |
47 | __u16 flags; | 51 | __u16 flags; |
@@ -64,31 +68,36 @@ enum tcmu_opcode { | |||
64 | * Only a few opcodes, and length is 8-byte aligned, so use low bits for opcode. | 68 | * Only a few opcodes, and length is 8-byte aligned, so use low bits for opcode. |
65 | */ | 69 | */ |
66 | struct tcmu_cmd_entry_hdr { | 70 | struct tcmu_cmd_entry_hdr { |
67 | __u32 len_op; | 71 | __u32 len_op; |
72 | __u16 cmd_id; | ||
73 | __u8 kflags; | ||
74 | #define TCMU_UFLAG_UNKNOWN_OP 0x1 | ||
75 | __u8 uflags; | ||
76 | |||
68 | } __packed; | 77 | } __packed; |
69 | 78 | ||
70 | #define TCMU_OP_MASK 0x7 | 79 | #define TCMU_OP_MASK 0x7 |
71 | 80 | ||
72 | static inline enum tcmu_opcode tcmu_hdr_get_op(struct tcmu_cmd_entry_hdr *hdr) | 81 | static inline enum tcmu_opcode tcmu_hdr_get_op(__u32 len_op) |
73 | { | 82 | { |
74 | return hdr->len_op & TCMU_OP_MASK; | 83 | return len_op & TCMU_OP_MASK; |
75 | } | 84 | } |
76 | 85 | ||
77 | static inline void tcmu_hdr_set_op(struct tcmu_cmd_entry_hdr *hdr, enum tcmu_opcode op) | 86 | static inline void tcmu_hdr_set_op(__u32 *len_op, enum tcmu_opcode op) |
78 | { | 87 | { |
79 | hdr->len_op &= ~TCMU_OP_MASK; | 88 | *len_op &= ~TCMU_OP_MASK; |
80 | hdr->len_op |= (op & TCMU_OP_MASK); | 89 | *len_op |= (op & TCMU_OP_MASK); |
81 | } | 90 | } |
82 | 91 | ||
83 | static inline __u32 tcmu_hdr_get_len(struct tcmu_cmd_entry_hdr *hdr) | 92 | static inline __u32 tcmu_hdr_get_len(__u32 len_op) |
84 | { | 93 | { |
85 | return hdr->len_op & ~TCMU_OP_MASK; | 94 | return len_op & ~TCMU_OP_MASK; |
86 | } | 95 | } |
87 | 96 | ||
88 | static inline void tcmu_hdr_set_len(struct tcmu_cmd_entry_hdr *hdr, __u32 len) | 97 | static inline void tcmu_hdr_set_len(__u32 *len_op, __u32 len) |
89 | { | 98 | { |
90 | hdr->len_op &= TCMU_OP_MASK; | 99 | *len_op &= TCMU_OP_MASK; |
91 | hdr->len_op |= len; | 100 | *len_op |= len; |
92 | } | 101 | } |
93 | 102 | ||
94 | /* Currently the same as SCSI_SENSE_BUFFERSIZE */ | 103 | /* Currently the same as SCSI_SENSE_BUFFERSIZE */ |
@@ -97,13 +106,14 @@ static inline void tcmu_hdr_set_len(struct tcmu_cmd_entry_hdr *hdr, __u32 len) | |||
97 | struct tcmu_cmd_entry { | 106 | struct tcmu_cmd_entry { |
98 | struct tcmu_cmd_entry_hdr hdr; | 107 | struct tcmu_cmd_entry_hdr hdr; |
99 | 108 | ||
100 | uint16_t cmd_id; | ||
101 | uint16_t __pad1; | ||
102 | |||
103 | union { | 109 | union { |
104 | struct { | 110 | struct { |
111 | uint32_t iov_cnt; | ||
112 | uint32_t iov_bidi_cnt; | ||
113 | uint32_t iov_dif_cnt; | ||
105 | uint64_t cdb_off; | 114 | uint64_t cdb_off; |
106 | uint64_t iov_cnt; | 115 | uint64_t __pad1; |
116 | uint64_t __pad2; | ||
107 | struct iovec iov[0]; | 117 | struct iovec iov[0]; |
108 | } req; | 118 | } req; |
109 | struct { | 119 | struct { |